]>
Commit | Line | Data |
---|---|---|
951c6300 RH |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
757e725b | 25 | #include "qemu/osdep.h" |
63c91552 | 26 | #include "exec/exec-all.h" |
dcb32f1d PMD |
27 | #include "tcg/tcg.h" |
28 | #include "tcg/tcg-op.h" | |
29 | #include "tcg/tcg-mo.h" | |
dcdaadb6 LV |
30 | #include "trace-tcg.h" |
31 | #include "trace/mem.h" | |
e6d86bed | 32 | #include "exec/plugin-gen.h" |
951c6300 | 33 | |
3a13c3f3 RH |
34 | /* Reduce the number of ifdefs below. This assumes that all uses of |
35 | TCGV_HIGH and TCGV_LOW are properly protected by a conditional that | |
36 | the compiler can eliminate. */ | |
37 | #if TCG_TARGET_REG_BITS == 64 | |
38 | extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64); | |
39 | extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64); | |
40 | #define TCGV_LOW TCGV_LOW_link_error | |
41 | #define TCGV_HIGH TCGV_HIGH_link_error | |
42 | #endif | |
951c6300 | 43 | |
b7e8b17a | 44 | void tcg_gen_op1(TCGOpcode opc, TCGArg a1) |
951c6300 | 45 | { |
b7e8b17a | 46 | TCGOp *op = tcg_emit_op(opc); |
75e8b9b7 | 47 | op->args[0] = a1; |
951c6300 RH |
48 | } |
49 | ||
b7e8b17a | 50 | void tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2) |
951c6300 | 51 | { |
b7e8b17a | 52 | TCGOp *op = tcg_emit_op(opc); |
75e8b9b7 RH |
53 | op->args[0] = a1; |
54 | op->args[1] = a2; | |
951c6300 RH |
55 | } |
56 | ||
b7e8b17a | 57 | void tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3) |
951c6300 | 58 | { |
b7e8b17a | 59 | TCGOp *op = tcg_emit_op(opc); |
75e8b9b7 RH |
60 | op->args[0] = a1; |
61 | op->args[1] = a2; | |
62 | op->args[2] = a3; | |
951c6300 RH |
63 | } |
64 | ||
b7e8b17a | 65 | void tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4) |
951c6300 | 66 | { |
b7e8b17a | 67 | TCGOp *op = tcg_emit_op(opc); |
75e8b9b7 RH |
68 | op->args[0] = a1; |
69 | op->args[1] = a2; | |
70 | op->args[2] = a3; | |
71 | op->args[3] = a4; | |
951c6300 RH |
72 | } |
73 | ||
b7e8b17a RH |
74 | void tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, |
75 | TCGArg a4, TCGArg a5) | |
951c6300 | 76 | { |
b7e8b17a | 77 | TCGOp *op = tcg_emit_op(opc); |
75e8b9b7 RH |
78 | op->args[0] = a1; |
79 | op->args[1] = a2; | |
80 | op->args[2] = a3; | |
81 | op->args[3] = a4; | |
82 | op->args[4] = a5; | |
951c6300 RH |
83 | } |
84 | ||
b7e8b17a RH |
85 | void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, |
86 | TCGArg a4, TCGArg a5, TCGArg a6) | |
951c6300 | 87 | { |
b7e8b17a | 88 | TCGOp *op = tcg_emit_op(opc); |
75e8b9b7 RH |
89 | op->args[0] = a1; |
90 | op->args[1] = a2; | |
91 | op->args[2] = a3; | |
92 | op->args[3] = a4; | |
93 | op->args[4] = a5; | |
94 | op->args[5] = a6; | |
951c6300 RH |
95 | } |
96 | ||
f65e19bc PK |
97 | void tcg_gen_mb(TCGBar mb_type) |
98 | { | |
b1311c4a | 99 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { |
b7e8b17a | 100 | tcg_gen_op1(INDEX_op_mb, mb_type); |
f65e19bc PK |
101 | } |
102 | } | |
103 | ||
951c6300 RH |
104 | /* 32 bit ops */ |
105 | ||
11d11d61 RH |
106 | void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) |
107 | { | |
108 | tcg_gen_mov_i32(ret, tcg_constant_i32(arg)); | |
109 | } | |
110 | ||
951c6300 RH |
111 | void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
112 | { | |
113 | /* some cases can be optimized here */ | |
114 | if (arg2 == 0) { | |
115 | tcg_gen_mov_i32(ret, arg1); | |
116 | } else { | |
11d11d61 | 117 | tcg_gen_add_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
118 | } |
119 | } | |
120 | ||
121 | void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) | |
122 | { | |
123 | if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) { | |
124 | /* Don't recurse with tcg_gen_neg_i32. */ | |
125 | tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2); | |
126 | } else { | |
11d11d61 | 127 | tcg_gen_sub_i32(ret, tcg_constant_i32(arg1), arg2); |
951c6300 RH |
128 | } |
129 | } | |
130 | ||
131 | void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | |
132 | { | |
133 | /* some cases can be optimized here */ | |
134 | if (arg2 == 0) { | |
135 | tcg_gen_mov_i32(ret, arg1); | |
136 | } else { | |
11d11d61 | 137 | tcg_gen_sub_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
138 | } |
139 | } | |
140 | ||
474b2e8f | 141 | void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
951c6300 | 142 | { |
951c6300 RH |
143 | /* Some cases can be optimized here. */ |
144 | switch (arg2) { | |
145 | case 0: | |
146 | tcg_gen_movi_i32(ret, 0); | |
147 | return; | |
474b2e8f | 148 | case -1: |
951c6300 RH |
149 | tcg_gen_mov_i32(ret, arg1); |
150 | return; | |
474b2e8f | 151 | case 0xff: |
951c6300 RH |
152 | /* Don't recurse with tcg_gen_ext8u_i32. */ |
153 | if (TCG_TARGET_HAS_ext8u_i32) { | |
154 | tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1); | |
155 | return; | |
156 | } | |
157 | break; | |
474b2e8f | 158 | case 0xffff: |
951c6300 RH |
159 | if (TCG_TARGET_HAS_ext16u_i32) { |
160 | tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1); | |
161 | return; | |
162 | } | |
163 | break; | |
164 | } | |
11d11d61 RH |
165 | |
166 | tcg_gen_and_i32(ret, arg1, tcg_constant_i32(arg2)); | |
951c6300 RH |
167 | } |
168 | ||
169 | void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | |
170 | { | |
171 | /* Some cases can be optimized here. */ | |
172 | if (arg2 == -1) { | |
173 | tcg_gen_movi_i32(ret, -1); | |
174 | } else if (arg2 == 0) { | |
175 | tcg_gen_mov_i32(ret, arg1); | |
176 | } else { | |
11d11d61 | 177 | tcg_gen_or_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
178 | } |
179 | } | |
180 | ||
181 | void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | |
182 | { | |
183 | /* Some cases can be optimized here. */ | |
184 | if (arg2 == 0) { | |
185 | tcg_gen_mov_i32(ret, arg1); | |
186 | } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { | |
187 | /* Don't recurse with tcg_gen_not_i32. */ | |
188 | tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1); | |
189 | } else { | |
11d11d61 | 190 | tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
191 | } |
192 | } | |
193 | ||
474b2e8f | 194 | void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
951c6300 | 195 | { |
474b2e8f | 196 | tcg_debug_assert(arg2 >= 0 && arg2 < 32); |
951c6300 RH |
197 | if (arg2 == 0) { |
198 | tcg_gen_mov_i32(ret, arg1); | |
199 | } else { | |
11d11d61 | 200 | tcg_gen_shl_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
201 | } |
202 | } | |
203 | ||
474b2e8f | 204 | void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
951c6300 | 205 | { |
474b2e8f | 206 | tcg_debug_assert(arg2 >= 0 && arg2 < 32); |
951c6300 RH |
207 | if (arg2 == 0) { |
208 | tcg_gen_mov_i32(ret, arg1); | |
209 | } else { | |
11d11d61 | 210 | tcg_gen_shr_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
211 | } |
212 | } | |
213 | ||
474b2e8f | 214 | void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
951c6300 | 215 | { |
474b2e8f | 216 | tcg_debug_assert(arg2 >= 0 && arg2 < 32); |
951c6300 RH |
217 | if (arg2 == 0) { |
218 | tcg_gen_mov_i32(ret, arg1); | |
219 | } else { | |
11d11d61 | 220 | tcg_gen_sar_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
221 | } |
222 | } | |
223 | ||
42a268c2 | 224 | void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) |
951c6300 RH |
225 | { |
226 | if (cond == TCG_COND_ALWAYS) { | |
42a268c2 | 227 | tcg_gen_br(l); |
951c6300 | 228 | } else if (cond != TCG_COND_NEVER) { |
d88a117e | 229 | l->refs++; |
42a268c2 | 230 | tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); |
951c6300 RH |
231 | } |
232 | } | |
233 | ||
42a268c2 | 234 | void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l) |
951c6300 | 235 | { |
37ed3bf1 RH |
236 | if (cond == TCG_COND_ALWAYS) { |
237 | tcg_gen_br(l); | |
238 | } else if (cond != TCG_COND_NEVER) { | |
11d11d61 | 239 | tcg_gen_brcond_i32(cond, arg1, tcg_constant_i32(arg2), l); |
37ed3bf1 | 240 | } |
951c6300 RH |
241 | } |
242 | ||
243 | void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret, | |
244 | TCGv_i32 arg1, TCGv_i32 arg2) | |
245 | { | |
246 | if (cond == TCG_COND_ALWAYS) { | |
247 | tcg_gen_movi_i32(ret, 1); | |
248 | } else if (cond == TCG_COND_NEVER) { | |
249 | tcg_gen_movi_i32(ret, 0); | |
250 | } else { | |
251 | tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond); | |
252 | } | |
253 | } | |
254 | ||
255 | void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, | |
256 | TCGv_i32 arg1, int32_t arg2) | |
257 | { | |
11d11d61 | 258 | tcg_gen_setcond_i32(cond, ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
259 | } |
260 | ||
261 | void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | |
262 | { | |
b2e3ae94 RH |
263 | if (arg2 == 0) { |
264 | tcg_gen_movi_i32(ret, 0); | |
265 | } else if (is_power_of_2(arg2)) { | |
266 | tcg_gen_shli_i32(ret, arg1, ctz32(arg2)); | |
267 | } else { | |
11d11d61 | 268 | tcg_gen_mul_i32(ret, arg1, tcg_constant_i32(arg2)); |
b2e3ae94 | 269 | } |
951c6300 RH |
270 | } |
271 | ||
272 | void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
273 | { | |
274 | if (TCG_TARGET_HAS_div_i32) { | |
275 | tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); | |
276 | } else if (TCG_TARGET_HAS_div2_i32) { | |
277 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
278 | tcg_gen_sari_i32(t0, arg1, 31); | |
279 | tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); | |
280 | tcg_temp_free_i32(t0); | |
281 | } else { | |
282 | gen_helper_div_i32(ret, arg1, arg2); | |
283 | } | |
284 | } | |
285 | ||
286 | void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
287 | { | |
288 | if (TCG_TARGET_HAS_rem_i32) { | |
289 | tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); | |
290 | } else if (TCG_TARGET_HAS_div_i32) { | |
291 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
292 | tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); | |
293 | tcg_gen_mul_i32(t0, t0, arg2); | |
294 | tcg_gen_sub_i32(ret, arg1, t0); | |
295 | tcg_temp_free_i32(t0); | |
296 | } else if (TCG_TARGET_HAS_div2_i32) { | |
297 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
298 | tcg_gen_sari_i32(t0, arg1, 31); | |
299 | tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); | |
300 | tcg_temp_free_i32(t0); | |
301 | } else { | |
302 | gen_helper_rem_i32(ret, arg1, arg2); | |
303 | } | |
304 | } | |
305 | ||
306 | void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
307 | { | |
308 | if (TCG_TARGET_HAS_div_i32) { | |
309 | tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); | |
310 | } else if (TCG_TARGET_HAS_div2_i32) { | |
311 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
312 | tcg_gen_movi_i32(t0, 0); | |
313 | tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); | |
314 | tcg_temp_free_i32(t0); | |
315 | } else { | |
316 | gen_helper_divu_i32(ret, arg1, arg2); | |
317 | } | |
318 | } | |
319 | ||
320 | void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
321 | { | |
322 | if (TCG_TARGET_HAS_rem_i32) { | |
323 | tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); | |
324 | } else if (TCG_TARGET_HAS_div_i32) { | |
325 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
326 | tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2); | |
327 | tcg_gen_mul_i32(t0, t0, arg2); | |
328 | tcg_gen_sub_i32(ret, arg1, t0); | |
329 | tcg_temp_free_i32(t0); | |
330 | } else if (TCG_TARGET_HAS_div2_i32) { | |
331 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
332 | tcg_gen_movi_i32(t0, 0); | |
333 | tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); | |
334 | tcg_temp_free_i32(t0); | |
335 | } else { | |
336 | gen_helper_remu_i32(ret, arg1, arg2); | |
337 | } | |
338 | } | |
339 | ||
340 | void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
341 | { | |
342 | if (TCG_TARGET_HAS_andc_i32) { | |
343 | tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); | |
344 | } else { | |
345 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
346 | tcg_gen_not_i32(t0, arg2); | |
347 | tcg_gen_and_i32(ret, arg1, t0); | |
348 | tcg_temp_free_i32(t0); | |
349 | } | |
350 | } | |
351 | ||
352 | void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
353 | { | |
354 | if (TCG_TARGET_HAS_eqv_i32) { | |
355 | tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2); | |
356 | } else { | |
357 | tcg_gen_xor_i32(ret, arg1, arg2); | |
358 | tcg_gen_not_i32(ret, ret); | |
359 | } | |
360 | } | |
361 | ||
362 | void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
363 | { | |
364 | if (TCG_TARGET_HAS_nand_i32) { | |
365 | tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2); | |
366 | } else { | |
367 | tcg_gen_and_i32(ret, arg1, arg2); | |
368 | tcg_gen_not_i32(ret, ret); | |
369 | } | |
370 | } | |
371 | ||
372 | void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
373 | { | |
374 | if (TCG_TARGET_HAS_nor_i32) { | |
375 | tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2); | |
376 | } else { | |
377 | tcg_gen_or_i32(ret, arg1, arg2); | |
378 | tcg_gen_not_i32(ret, ret); | |
379 | } | |
380 | } | |
381 | ||
382 | void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
383 | { | |
384 | if (TCG_TARGET_HAS_orc_i32) { | |
385 | tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2); | |
386 | } else { | |
387 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
388 | tcg_gen_not_i32(t0, arg2); | |
389 | tcg_gen_or_i32(ret, arg1, t0); | |
390 | tcg_temp_free_i32(t0); | |
391 | } | |
392 | } | |
393 | ||
0e28d006 RH |
394 | void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) |
395 | { | |
396 | if (TCG_TARGET_HAS_clz_i32) { | |
397 | tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2); | |
398 | } else if (TCG_TARGET_HAS_clz_i64) { | |
399 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
400 | TCGv_i64 t2 = tcg_temp_new_i64(); | |
401 | tcg_gen_extu_i32_i64(t1, arg1); | |
402 | tcg_gen_extu_i32_i64(t2, arg2); | |
403 | tcg_gen_addi_i64(t2, t2, 32); | |
404 | tcg_gen_clz_i64(t1, t1, t2); | |
405 | tcg_gen_extrl_i64_i32(ret, t1); | |
406 | tcg_temp_free_i64(t1); | |
407 | tcg_temp_free_i64(t2); | |
408 | tcg_gen_subi_i32(ret, ret, 32); | |
409 | } else { | |
410 | gen_helper_clz_i32(ret, arg1, arg2); | |
411 | } | |
412 | } | |
413 | ||
414 | void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) | |
415 | { | |
11d11d61 | 416 | tcg_gen_clz_i32(ret, arg1, tcg_constant_i32(arg2)); |
0e28d006 RH |
417 | } |
418 | ||
419 | void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
420 | { | |
421 | if (TCG_TARGET_HAS_ctz_i32) { | |
422 | tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2); | |
423 | } else if (TCG_TARGET_HAS_ctz_i64) { | |
424 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
425 | TCGv_i64 t2 = tcg_temp_new_i64(); | |
426 | tcg_gen_extu_i32_i64(t1, arg1); | |
427 | tcg_gen_extu_i32_i64(t2, arg2); | |
428 | tcg_gen_ctz_i64(t1, t1, t2); | |
429 | tcg_gen_extrl_i64_i32(ret, t1); | |
430 | tcg_temp_free_i64(t1); | |
431 | tcg_temp_free_i64(t2); | |
14e99210 RH |
432 | } else if (TCG_TARGET_HAS_ctpop_i32 |
433 | || TCG_TARGET_HAS_ctpop_i64 | |
434 | || TCG_TARGET_HAS_clz_i32 | |
435 | || TCG_TARGET_HAS_clz_i64) { | |
436 | TCGv_i32 z, t = tcg_temp_new_i32(); | |
437 | ||
438 | if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { | |
439 | tcg_gen_subi_i32(t, arg1, 1); | |
440 | tcg_gen_andc_i32(t, t, arg1); | |
441 | tcg_gen_ctpop_i32(t, t); | |
442 | } else { | |
443 | /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */ | |
444 | tcg_gen_neg_i32(t, arg1); | |
445 | tcg_gen_and_i32(t, t, arg1); | |
446 | tcg_gen_clzi_i32(t, t, 32); | |
447 | tcg_gen_xori_i32(t, t, 31); | |
448 | } | |
11d11d61 | 449 | z = tcg_constant_i32(0); |
14e99210 RH |
450 | tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t); |
451 | tcg_temp_free_i32(t); | |
0e28d006 RH |
452 | } else { |
453 | gen_helper_ctz_i32(ret, arg1, arg2); | |
454 | } | |
455 | } | |
456 | ||
457 | void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) | |
458 | { | |
14e99210 RH |
459 | if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { |
460 | /* This equivalence has the advantage of not requiring a fixup. */ | |
461 | TCGv_i32 t = tcg_temp_new_i32(); | |
462 | tcg_gen_subi_i32(t, arg1, 1); | |
463 | tcg_gen_andc_i32(t, t, arg1); | |
464 | tcg_gen_ctpop_i32(ret, t); | |
465 | tcg_temp_free_i32(t); | |
466 | } else { | |
11d11d61 | 467 | tcg_gen_ctz_i32(ret, arg1, tcg_constant_i32(arg2)); |
14e99210 | 468 | } |
0e28d006 RH |
469 | } |
470 | ||
086920c2 RH |
471 | void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg) |
472 | { | |
473 | if (TCG_TARGET_HAS_clz_i32) { | |
474 | TCGv_i32 t = tcg_temp_new_i32(); | |
475 | tcg_gen_sari_i32(t, arg, 31); | |
476 | tcg_gen_xor_i32(t, t, arg); | |
477 | tcg_gen_clzi_i32(t, t, 32); | |
478 | tcg_gen_subi_i32(ret, t, 1); | |
479 | tcg_temp_free_i32(t); | |
480 | } else { | |
481 | gen_helper_clrsb_i32(ret, arg); | |
482 | } | |
483 | } | |
484 | ||
a768e4e9 RH |
485 | void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1) |
486 | { | |
487 | if (TCG_TARGET_HAS_ctpop_i32) { | |
488 | tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1); | |
489 | } else if (TCG_TARGET_HAS_ctpop_i64) { | |
490 | TCGv_i64 t = tcg_temp_new_i64(); | |
491 | tcg_gen_extu_i32_i64(t, arg1); | |
492 | tcg_gen_ctpop_i64(t, t); | |
493 | tcg_gen_extrl_i64_i32(ret, t); | |
494 | tcg_temp_free_i64(t); | |
495 | } else { | |
496 | gen_helper_ctpop_i32(ret, arg1); | |
497 | } | |
498 | } | |
499 | ||
951c6300 RH |
500 | void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) |
501 | { | |
502 | if (TCG_TARGET_HAS_rot_i32) { | |
503 | tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2); | |
504 | } else { | |
505 | TCGv_i32 t0, t1; | |
506 | ||
507 | t0 = tcg_temp_new_i32(); | |
508 | t1 = tcg_temp_new_i32(); | |
509 | tcg_gen_shl_i32(t0, arg1, arg2); | |
510 | tcg_gen_subfi_i32(t1, 32, arg2); | |
511 | tcg_gen_shr_i32(t1, arg1, t1); | |
512 | tcg_gen_or_i32(ret, t0, t1); | |
513 | tcg_temp_free_i32(t0); | |
514 | tcg_temp_free_i32(t1); | |
515 | } | |
516 | } | |
517 | ||
07dada03 | 518 | void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
951c6300 | 519 | { |
07dada03 | 520 | tcg_debug_assert(arg2 >= 0 && arg2 < 32); |
951c6300 RH |
521 | /* some cases can be optimized here */ |
522 | if (arg2 == 0) { | |
523 | tcg_gen_mov_i32(ret, arg1); | |
524 | } else if (TCG_TARGET_HAS_rot_i32) { | |
11d11d61 | 525 | tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2)); |
951c6300 RH |
526 | } else { |
527 | TCGv_i32 t0, t1; | |
528 | t0 = tcg_temp_new_i32(); | |
529 | t1 = tcg_temp_new_i32(); | |
530 | tcg_gen_shli_i32(t0, arg1, arg2); | |
531 | tcg_gen_shri_i32(t1, arg1, 32 - arg2); | |
532 | tcg_gen_or_i32(ret, t0, t1); | |
533 | tcg_temp_free_i32(t0); | |
534 | tcg_temp_free_i32(t1); | |
535 | } | |
536 | } | |
537 | ||
538 | void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | |
539 | { | |
540 | if (TCG_TARGET_HAS_rot_i32) { | |
541 | tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2); | |
542 | } else { | |
543 | TCGv_i32 t0, t1; | |
544 | ||
545 | t0 = tcg_temp_new_i32(); | |
546 | t1 = tcg_temp_new_i32(); | |
547 | tcg_gen_shr_i32(t0, arg1, arg2); | |
548 | tcg_gen_subfi_i32(t1, 32, arg2); | |
549 | tcg_gen_shl_i32(t1, arg1, t1); | |
550 | tcg_gen_or_i32(ret, t0, t1); | |
551 | tcg_temp_free_i32(t0); | |
552 | tcg_temp_free_i32(t1); | |
553 | } | |
554 | } | |
555 | ||
07dada03 | 556 | void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
951c6300 | 557 | { |
07dada03 | 558 | tcg_debug_assert(arg2 >= 0 && arg2 < 32); |
951c6300 RH |
559 | /* some cases can be optimized here */ |
560 | if (arg2 == 0) { | |
561 | tcg_gen_mov_i32(ret, arg1); | |
562 | } else { | |
563 | tcg_gen_rotli_i32(ret, arg1, 32 - arg2); | |
564 | } | |
565 | } | |
566 | ||
567 | void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, | |
568 | unsigned int ofs, unsigned int len) | |
569 | { | |
570 | uint32_t mask; | |
571 | TCGv_i32 t1; | |
572 | ||
573 | tcg_debug_assert(ofs < 32); | |
0d0d309d | 574 | tcg_debug_assert(len > 0); |
951c6300 RH |
575 | tcg_debug_assert(len <= 32); |
576 | tcg_debug_assert(ofs + len <= 32); | |
577 | ||
0d0d309d | 578 | if (len == 32) { |
951c6300 RH |
579 | tcg_gen_mov_i32(ret, arg2); |
580 | return; | |
581 | } | |
582 | if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { | |
583 | tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); | |
584 | return; | |
585 | } | |
586 | ||
951c6300 RH |
587 | t1 = tcg_temp_new_i32(); |
588 | ||
b0a60567 RH |
589 | if (TCG_TARGET_HAS_extract2_i32) { |
590 | if (ofs + len == 32) { | |
591 | tcg_gen_shli_i32(t1, arg1, len); | |
592 | tcg_gen_extract2_i32(ret, t1, arg2, len); | |
593 | goto done; | |
594 | } | |
595 | if (ofs == 0) { | |
596 | tcg_gen_extract2_i32(ret, arg1, arg2, len); | |
597 | tcg_gen_rotli_i32(ret, ret, len); | |
598 | goto done; | |
599 | } | |
600 | } | |
601 | ||
602 | mask = (1u << len) - 1; | |
951c6300 RH |
603 | if (ofs + len < 32) { |
604 | tcg_gen_andi_i32(t1, arg2, mask); | |
605 | tcg_gen_shli_i32(t1, t1, ofs); | |
606 | } else { | |
607 | tcg_gen_shli_i32(t1, arg2, ofs); | |
608 | } | |
609 | tcg_gen_andi_i32(ret, arg1, ~(mask << ofs)); | |
610 | tcg_gen_or_i32(ret, ret, t1); | |
b0a60567 | 611 | done: |
951c6300 RH |
612 | tcg_temp_free_i32(t1); |
613 | } | |
614 | ||
07cc68d5 RH |
615 | void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg, |
616 | unsigned int ofs, unsigned int len) | |
617 | { | |
618 | tcg_debug_assert(ofs < 32); | |
619 | tcg_debug_assert(len > 0); | |
620 | tcg_debug_assert(len <= 32); | |
621 | tcg_debug_assert(ofs + len <= 32); | |
622 | ||
623 | if (ofs + len == 32) { | |
624 | tcg_gen_shli_i32(ret, arg, ofs); | |
625 | } else if (ofs == 0) { | |
626 | tcg_gen_andi_i32(ret, arg, (1u << len) - 1); | |
627 | } else if (TCG_TARGET_HAS_deposit_i32 | |
628 | && TCG_TARGET_deposit_i32_valid(ofs, len)) { | |
11d11d61 | 629 | TCGv_i32 zero = tcg_constant_i32(0); |
07cc68d5 | 630 | tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len); |
07cc68d5 RH |
631 | } else { |
632 | /* To help two-operand hosts we prefer to zero-extend first, | |
633 | which allows ARG to stay live. */ | |
634 | switch (len) { | |
635 | case 16: | |
636 | if (TCG_TARGET_HAS_ext16u_i32) { | |
637 | tcg_gen_ext16u_i32(ret, arg); | |
638 | tcg_gen_shli_i32(ret, ret, ofs); | |
639 | return; | |
640 | } | |
641 | break; | |
642 | case 8: | |
643 | if (TCG_TARGET_HAS_ext8u_i32) { | |
644 | tcg_gen_ext8u_i32(ret, arg); | |
645 | tcg_gen_shli_i32(ret, ret, ofs); | |
646 | return; | |
647 | } | |
648 | break; | |
649 | } | |
650 | /* Otherwise prefer zero-extension over AND for code size. */ | |
651 | switch (ofs + len) { | |
652 | case 16: | |
653 | if (TCG_TARGET_HAS_ext16u_i32) { | |
654 | tcg_gen_shli_i32(ret, arg, ofs); | |
655 | tcg_gen_ext16u_i32(ret, ret); | |
656 | return; | |
657 | } | |
658 | break; | |
659 | case 8: | |
660 | if (TCG_TARGET_HAS_ext8u_i32) { | |
661 | tcg_gen_shli_i32(ret, arg, ofs); | |
662 | tcg_gen_ext8u_i32(ret, ret); | |
663 | return; | |
664 | } | |
665 | break; | |
666 | } | |
667 | tcg_gen_andi_i32(ret, arg, (1u << len) - 1); | |
668 | tcg_gen_shli_i32(ret, ret, ofs); | |
669 | } | |
670 | } | |
671 | ||
7ec8bab3 RH |
672 | void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg, |
673 | unsigned int ofs, unsigned int len) | |
674 | { | |
675 | tcg_debug_assert(ofs < 32); | |
676 | tcg_debug_assert(len > 0); | |
677 | tcg_debug_assert(len <= 32); | |
678 | tcg_debug_assert(ofs + len <= 32); | |
679 | ||
680 | /* Canonicalize certain special cases, even if extract is supported. */ | |
681 | if (ofs + len == 32) { | |
682 | tcg_gen_shri_i32(ret, arg, 32 - len); | |
683 | return; | |
684 | } | |
685 | if (ofs == 0) { | |
686 | tcg_gen_andi_i32(ret, arg, (1u << len) - 1); | |
687 | return; | |
688 | } | |
689 | ||
690 | if (TCG_TARGET_HAS_extract_i32 | |
691 | && TCG_TARGET_extract_i32_valid(ofs, len)) { | |
692 | tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len); | |
693 | return; | |
694 | } | |
695 | ||
696 | /* Assume that zero-extension, if available, is cheaper than a shift. */ | |
697 | switch (ofs + len) { | |
698 | case 16: | |
699 | if (TCG_TARGET_HAS_ext16u_i32) { | |
700 | tcg_gen_ext16u_i32(ret, arg); | |
701 | tcg_gen_shri_i32(ret, ret, ofs); | |
702 | return; | |
703 | } | |
704 | break; | |
705 | case 8: | |
706 | if (TCG_TARGET_HAS_ext8u_i32) { | |
707 | tcg_gen_ext8u_i32(ret, arg); | |
708 | tcg_gen_shri_i32(ret, ret, ofs); | |
709 | return; | |
710 | } | |
711 | break; | |
712 | } | |
713 | ||
714 | /* ??? Ideally we'd know what values are available for immediate AND. | |
715 | Assume that 8 bits are available, plus the special case of 16, | |
716 | so that we get ext8u, ext16u. */ | |
717 | switch (len) { | |
718 | case 1 ... 8: case 16: | |
719 | tcg_gen_shri_i32(ret, arg, ofs); | |
720 | tcg_gen_andi_i32(ret, ret, (1u << len) - 1); | |
721 | break; | |
722 | default: | |
723 | tcg_gen_shli_i32(ret, arg, 32 - len - ofs); | |
724 | tcg_gen_shri_i32(ret, ret, 32 - len); | |
725 | break; | |
726 | } | |
727 | } | |
728 | ||
729 | void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg, | |
730 | unsigned int ofs, unsigned int len) | |
731 | { | |
732 | tcg_debug_assert(ofs < 32); | |
733 | tcg_debug_assert(len > 0); | |
734 | tcg_debug_assert(len <= 32); | |
735 | tcg_debug_assert(ofs + len <= 32); | |
736 | ||
737 | /* Canonicalize certain special cases, even if extract is supported. */ | |
738 | if (ofs + len == 32) { | |
739 | tcg_gen_sari_i32(ret, arg, 32 - len); | |
740 | return; | |
741 | } | |
742 | if (ofs == 0) { | |
743 | switch (len) { | |
744 | case 16: | |
745 | tcg_gen_ext16s_i32(ret, arg); | |
746 | return; | |
747 | case 8: | |
748 | tcg_gen_ext8s_i32(ret, arg); | |
749 | return; | |
750 | } | |
751 | } | |
752 | ||
753 | if (TCG_TARGET_HAS_sextract_i32 | |
754 | && TCG_TARGET_extract_i32_valid(ofs, len)) { | |
755 | tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len); | |
756 | return; | |
757 | } | |
758 | ||
759 | /* Assume that sign-extension, if available, is cheaper than a shift. */ | |
760 | switch (ofs + len) { | |
761 | case 16: | |
762 | if (TCG_TARGET_HAS_ext16s_i32) { | |
763 | tcg_gen_ext16s_i32(ret, arg); | |
764 | tcg_gen_sari_i32(ret, ret, ofs); | |
765 | return; | |
766 | } | |
767 | break; | |
768 | case 8: | |
769 | if (TCG_TARGET_HAS_ext8s_i32) { | |
770 | tcg_gen_ext8s_i32(ret, arg); | |
771 | tcg_gen_sari_i32(ret, ret, ofs); | |
772 | return; | |
773 | } | |
774 | break; | |
775 | } | |
776 | switch (len) { | |
777 | case 16: | |
778 | if (TCG_TARGET_HAS_ext16s_i32) { | |
779 | tcg_gen_shri_i32(ret, arg, ofs); | |
780 | tcg_gen_ext16s_i32(ret, ret); | |
781 | return; | |
782 | } | |
783 | break; | |
784 | case 8: | |
785 | if (TCG_TARGET_HAS_ext8s_i32) { | |
786 | tcg_gen_shri_i32(ret, arg, ofs); | |
787 | tcg_gen_ext8s_i32(ret, ret); | |
788 | return; | |
789 | } | |
790 | break; | |
791 | } | |
792 | ||
793 | tcg_gen_shli_i32(ret, arg, 32 - len - ofs); | |
794 | tcg_gen_sari_i32(ret, ret, 32 - len); | |
795 | } | |
796 | ||
2089fcc9 DH |
797 | /* |
798 | * Extract 32-bits from a 64-bit input, ah:al, starting from ofs. | |
799 | * Unlike tcg_gen_extract_i32 above, len is fixed at 32. | |
800 | */ | |
801 | void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, | |
802 | unsigned int ofs) | |
803 | { | |
804 | tcg_debug_assert(ofs <= 32); | |
805 | if (ofs == 0) { | |
806 | tcg_gen_mov_i32(ret, al); | |
807 | } else if (ofs == 32) { | |
808 | tcg_gen_mov_i32(ret, ah); | |
809 | } else if (al == ah) { | |
810 | tcg_gen_rotri_i32(ret, al, ofs); | |
fce1296f RH |
811 | } else if (TCG_TARGET_HAS_extract2_i32) { |
812 | tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs); | |
2089fcc9 DH |
813 | } else { |
814 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
815 | tcg_gen_shri_i32(t0, al, ofs); | |
816 | tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs); | |
817 | tcg_temp_free_i32(t0); | |
818 | } | |
819 | } | |
820 | ||
951c6300 RH |
821 | void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, |
822 | TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2) | |
823 | { | |
37ed3bf1 RH |
824 | if (cond == TCG_COND_ALWAYS) { |
825 | tcg_gen_mov_i32(ret, v1); | |
826 | } else if (cond == TCG_COND_NEVER) { | |
827 | tcg_gen_mov_i32(ret, v2); | |
828 | } else if (TCG_TARGET_HAS_movcond_i32) { | |
951c6300 RH |
829 | tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); |
830 | } else { | |
831 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
832 | TCGv_i32 t1 = tcg_temp_new_i32(); | |
833 | tcg_gen_setcond_i32(cond, t0, c1, c2); | |
834 | tcg_gen_neg_i32(t0, t0); | |
835 | tcg_gen_and_i32(t1, v1, t0); | |
836 | tcg_gen_andc_i32(ret, v2, t0); | |
837 | tcg_gen_or_i32(ret, ret, t1); | |
838 | tcg_temp_free_i32(t0); | |
839 | tcg_temp_free_i32(t1); | |
840 | } | |
841 | } | |
842 | ||
843 | void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, | |
844 | TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) | |
845 | { | |
846 | if (TCG_TARGET_HAS_add2_i32) { | |
847 | tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); | |
951c6300 RH |
848 | } else { |
849 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
850 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
851 | tcg_gen_concat_i32_i64(t0, al, ah); | |
852 | tcg_gen_concat_i32_i64(t1, bl, bh); | |
853 | tcg_gen_add_i64(t0, t0, t1); | |
854 | tcg_gen_extr_i64_i32(rl, rh, t0); | |
855 | tcg_temp_free_i64(t0); | |
856 | tcg_temp_free_i64(t1); | |
857 | } | |
858 | } | |
859 | ||
860 | void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, | |
861 | TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) | |
862 | { | |
863 | if (TCG_TARGET_HAS_sub2_i32) { | |
864 | tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); | |
951c6300 RH |
865 | } else { |
866 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
867 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
868 | tcg_gen_concat_i32_i64(t0, al, ah); | |
869 | tcg_gen_concat_i32_i64(t1, bl, bh); | |
870 | tcg_gen_sub_i64(t0, t0, t1); | |
871 | tcg_gen_extr_i64_i32(rl, rh, t0); | |
872 | tcg_temp_free_i64(t0); | |
873 | tcg_temp_free_i64(t1); | |
874 | } | |
875 | } | |
876 | ||
877 | void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) | |
878 | { | |
879 | if (TCG_TARGET_HAS_mulu2_i32) { | |
880 | tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2); | |
951c6300 RH |
881 | } else if (TCG_TARGET_HAS_muluh_i32) { |
882 | TCGv_i32 t = tcg_temp_new_i32(); | |
883 | tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); | |
884 | tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2); | |
885 | tcg_gen_mov_i32(rl, t); | |
886 | tcg_temp_free_i32(t); | |
887 | } else { | |
888 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
889 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
890 | tcg_gen_extu_i32_i64(t0, arg1); | |
891 | tcg_gen_extu_i32_i64(t1, arg2); | |
892 | tcg_gen_mul_i64(t0, t0, t1); | |
893 | tcg_gen_extr_i64_i32(rl, rh, t0); | |
894 | tcg_temp_free_i64(t0); | |
895 | tcg_temp_free_i64(t1); | |
896 | } | |
897 | } | |
898 | ||
899 | void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) | |
900 | { | |
901 | if (TCG_TARGET_HAS_muls2_i32) { | |
902 | tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); | |
951c6300 RH |
903 | } else if (TCG_TARGET_HAS_mulsh_i32) { |
904 | TCGv_i32 t = tcg_temp_new_i32(); | |
905 | tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); | |
906 | tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); | |
907 | tcg_gen_mov_i32(rl, t); | |
908 | tcg_temp_free_i32(t); | |
909 | } else if (TCG_TARGET_REG_BITS == 32) { | |
910 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
911 | TCGv_i32 t1 = tcg_temp_new_i32(); | |
912 | TCGv_i32 t2 = tcg_temp_new_i32(); | |
913 | TCGv_i32 t3 = tcg_temp_new_i32(); | |
914 | tcg_gen_mulu2_i32(t0, t1, arg1, arg2); | |
915 | /* Adjust for negative inputs. */ | |
916 | tcg_gen_sari_i32(t2, arg1, 31); | |
917 | tcg_gen_sari_i32(t3, arg2, 31); | |
918 | tcg_gen_and_i32(t2, t2, arg2); | |
919 | tcg_gen_and_i32(t3, t3, arg1); | |
920 | tcg_gen_sub_i32(rh, t1, t2); | |
921 | tcg_gen_sub_i32(rh, rh, t3); | |
922 | tcg_gen_mov_i32(rl, t0); | |
923 | tcg_temp_free_i32(t0); | |
924 | tcg_temp_free_i32(t1); | |
925 | tcg_temp_free_i32(t2); | |
926 | tcg_temp_free_i32(t3); | |
927 | } else { | |
928 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
929 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
930 | tcg_gen_ext_i32_i64(t0, arg1); | |
931 | tcg_gen_ext_i32_i64(t1, arg2); | |
932 | tcg_gen_mul_i64(t0, t0, t1); | |
933 | tcg_gen_extr_i64_i32(rl, rh, t0); | |
934 | tcg_temp_free_i64(t0); | |
935 | tcg_temp_free_i64(t1); | |
936 | } | |
937 | } | |
938 | ||
5087abfb RH |
939 | void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) |
940 | { | |
941 | if (TCG_TARGET_REG_BITS == 32) { | |
942 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
943 | TCGv_i32 t1 = tcg_temp_new_i32(); | |
944 | TCGv_i32 t2 = tcg_temp_new_i32(); | |
945 | tcg_gen_mulu2_i32(t0, t1, arg1, arg2); | |
946 | /* Adjust for negative input for the signed arg1. */ | |
947 | tcg_gen_sari_i32(t2, arg1, 31); | |
948 | tcg_gen_and_i32(t2, t2, arg2); | |
949 | tcg_gen_sub_i32(rh, t1, t2); | |
950 | tcg_gen_mov_i32(rl, t0); | |
951 | tcg_temp_free_i32(t0); | |
952 | tcg_temp_free_i32(t1); | |
953 | tcg_temp_free_i32(t2); | |
954 | } else { | |
955 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
956 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
957 | tcg_gen_ext_i32_i64(t0, arg1); | |
958 | tcg_gen_extu_i32_i64(t1, arg2); | |
959 | tcg_gen_mul_i64(t0, t0, t1); | |
960 | tcg_gen_extr_i64_i32(rl, rh, t0); | |
961 | tcg_temp_free_i64(t0); | |
962 | tcg_temp_free_i64(t1); | |
963 | } | |
964 | } | |
965 | ||
951c6300 RH |
966 | void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg) |
967 | { | |
968 | if (TCG_TARGET_HAS_ext8s_i32) { | |
969 | tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg); | |
970 | } else { | |
971 | tcg_gen_shli_i32(ret, arg, 24); | |
972 | tcg_gen_sari_i32(ret, ret, 24); | |
973 | } | |
974 | } | |
975 | ||
976 | void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg) | |
977 | { | |
978 | if (TCG_TARGET_HAS_ext16s_i32) { | |
979 | tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg); | |
980 | } else { | |
981 | tcg_gen_shli_i32(ret, arg, 16); | |
982 | tcg_gen_sari_i32(ret, ret, 16); | |
983 | } | |
984 | } | |
985 | ||
986 | void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg) | |
987 | { | |
988 | if (TCG_TARGET_HAS_ext8u_i32) { | |
989 | tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg); | |
990 | } else { | |
991 | tcg_gen_andi_i32(ret, arg, 0xffu); | |
992 | } | |
993 | } | |
994 | ||
995 | void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg) | |
996 | { | |
997 | if (TCG_TARGET_HAS_ext16u_i32) { | |
998 | tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg); | |
999 | } else { | |
1000 | tcg_gen_andi_i32(ret, arg, 0xffffu); | |
1001 | } | |
1002 | } | |
1003 | ||
2b836c2a | 1004 | void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags) |
951c6300 | 1005 | { |
2b836c2a RH |
1006 | /* Only one extension flag may be present. */ |
1007 | tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); | |
1008 | ||
951c6300 | 1009 | if (TCG_TARGET_HAS_bswap16_i32) { |
2b836c2a | 1010 | tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags); |
951c6300 RH |
1011 | } else { |
1012 | TCGv_i32 t0 = tcg_temp_new_i32(); | |
2b836c2a RH |
1013 | TCGv_i32 t1 = tcg_temp_new_i32(); |
1014 | ||
1015 | tcg_gen_shri_i32(t0, arg, 8); | |
1016 | if (!(flags & TCG_BSWAP_IZ)) { | |
1017 | tcg_gen_ext8u_i32(t0, t0); | |
1018 | } | |
951c6300 | 1019 | |
2b836c2a RH |
1020 | if (flags & TCG_BSWAP_OS) { |
1021 | tcg_gen_shli_i32(t1, arg, 24); | |
1022 | tcg_gen_sari_i32(t1, t1, 16); | |
1023 | } else if (flags & TCG_BSWAP_OZ) { | |
1024 | tcg_gen_ext8u_i32(t1, arg); | |
1025 | tcg_gen_shli_i32(t1, t1, 8); | |
1026 | } else { | |
1027 | tcg_gen_shli_i32(t1, arg, 8); | |
1028 | } | |
1029 | ||
1030 | tcg_gen_or_i32(ret, t0, t1); | |
951c6300 | 1031 | tcg_temp_free_i32(t0); |
2b836c2a | 1032 | tcg_temp_free_i32(t1); |
951c6300 RH |
1033 | } |
1034 | } | |
1035 | ||
1036 | void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) | |
1037 | { | |
1038 | if (TCG_TARGET_HAS_bswap32_i32) { | |
587195bd | 1039 | tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0); |
951c6300 | 1040 | } else { |
a686dc71 RH |
1041 | TCGv_i32 t0 = tcg_temp_new_i32(); |
1042 | TCGv_i32 t1 = tcg_temp_new_i32(); | |
11d11d61 | 1043 | TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff); |
951c6300 | 1044 | |
a686dc71 RH |
1045 | /* arg = abcd */ |
1046 | tcg_gen_shri_i32(t0, arg, 8); /* t0 = .abc */ | |
1047 | tcg_gen_and_i32(t1, arg, t2); /* t1 = .b.d */ | |
1048 | tcg_gen_and_i32(t0, t0, t2); /* t0 = .a.c */ | |
1049 | tcg_gen_shli_i32(t1, t1, 8); /* t1 = b.d. */ | |
1050 | tcg_gen_or_i32(ret, t0, t1); /* ret = badc */ | |
951c6300 | 1051 | |
a686dc71 RH |
1052 | tcg_gen_shri_i32(t0, ret, 16); /* t0 = ..ba */ |
1053 | tcg_gen_shli_i32(t1, ret, 16); /* t1 = dc.. */ | |
1054 | tcg_gen_or_i32(ret, t0, t1); /* ret = dcba */ | |
951c6300 | 1055 | |
951c6300 RH |
1056 | tcg_temp_free_i32(t0); |
1057 | tcg_temp_free_i32(t1); | |
1058 | } | |
1059 | } | |
1060 | ||
b87fb8cd RH |
1061 | void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) |
1062 | { | |
1063 | tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b); | |
1064 | } | |
1065 | ||
1066 | void tcg_gen_umin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) | |
1067 | { | |
1068 | tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, a, b); | |
1069 | } | |
1070 | ||
1071 | void tcg_gen_smax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) | |
1072 | { | |
1073 | tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, b, a); | |
1074 | } | |
1075 | ||
1076 | void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) | |
1077 | { | |
1078 | tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a); | |
1079 | } | |
1080 | ||
ff1f11f7 RH |
1081 | void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a) |
1082 | { | |
1083 | TCGv_i32 t = tcg_temp_new_i32(); | |
1084 | ||
1085 | tcg_gen_sari_i32(t, a, 31); | |
1086 | tcg_gen_xor_i32(ret, a, t); | |
1087 | tcg_gen_sub_i32(ret, ret, t); | |
1088 | tcg_temp_free_i32(t); | |
1089 | } | |
1090 | ||
951c6300 RH |
1091 | /* 64-bit ops */ |
1092 | ||
1093 | #if TCG_TARGET_REG_BITS == 32 | |
1094 | /* These are all inline for TCG_TARGET_REG_BITS == 64. */ | |
1095 | ||
1096 | void tcg_gen_discard_i64(TCGv_i64 arg) | |
1097 | { | |
1098 | tcg_gen_discard_i32(TCGV_LOW(arg)); | |
1099 | tcg_gen_discard_i32(TCGV_HIGH(arg)); | |
1100 | } | |
1101 | ||
1102 | void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1103 | { | |
11d11d61 RH |
1104 | TCGTemp *ts = tcgv_i64_temp(arg); |
1105 | ||
1106 | /* Canonicalize TCGv_i64 TEMP_CONST into TCGv_i32 TEMP_CONST. */ | |
1107 | if (ts->kind == TEMP_CONST) { | |
1108 | tcg_gen_movi_i64(ret, ts->val); | |
1109 | } else { | |
1110 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1111 | tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); | |
1112 | } | |
951c6300 RH |
1113 | } |
1114 | ||
1115 | void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) | |
1116 | { | |
1117 | tcg_gen_movi_i32(TCGV_LOW(ret), arg); | |
1118 | tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32); | |
1119 | } | |
1120 | ||
1121 | void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1122 | { | |
1123 | tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset); | |
1124 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1125 | } | |
1126 | ||
1127 | void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1128 | { | |
1129 | tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset); | |
3ff91d7e | 1130 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); |
951c6300 RH |
1131 | } |
1132 | ||
1133 | void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1134 | { | |
1135 | tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset); | |
1136 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1137 | } | |
1138 | ||
1139 | void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1140 | { | |
1141 | tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset); | |
1142 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1143 | } | |
1144 | ||
1145 | void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1146 | { | |
1147 | tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); | |
1148 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1149 | } | |
1150 | ||
1151 | void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1152 | { | |
1153 | tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); | |
1154 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1155 | } | |
1156 | ||
1157 | void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) | |
1158 | { | |
1159 | /* Since arg2 and ret have different types, | |
1160 | they cannot be the same temporary */ | |
cf811fff | 1161 | #ifdef HOST_WORDS_BIGENDIAN |
951c6300 RH |
1162 | tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset); |
1163 | tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4); | |
1164 | #else | |
1165 | tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); | |
1166 | tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4); | |
1167 | #endif | |
1168 | } | |
1169 | ||
1170 | void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) | |
1171 | { | |
cf811fff | 1172 | #ifdef HOST_WORDS_BIGENDIAN |
951c6300 RH |
1173 | tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset); |
1174 | tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4); | |
1175 | #else | |
1176 | tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset); | |
1177 | tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4); | |
1178 | #endif | |
1179 | } | |
1180 | ||
1181 | void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1182 | { | |
1183 | tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1184 | tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1185 | } | |
1186 | ||
1187 | void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1188 | { | |
1189 | tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1190 | tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1191 | } | |
1192 | ||
1193 | void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1194 | { | |
1195 | tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1196 | tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1197 | } | |
1198 | ||
1199 | void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1200 | { | |
1201 | gen_helper_shl_i64(ret, arg1, arg2); | |
1202 | } | |
1203 | ||
1204 | void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1205 | { | |
1206 | gen_helper_shr_i64(ret, arg1, arg2); | |
1207 | } | |
1208 | ||
1209 | void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1210 | { | |
1211 | gen_helper_sar_i64(ret, arg1, arg2); | |
1212 | } | |
1213 | ||
1214 | void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1215 | { | |
1216 | TCGv_i64 t0; | |
1217 | TCGv_i32 t1; | |
1218 | ||
1219 | t0 = tcg_temp_new_i64(); | |
1220 | t1 = tcg_temp_new_i32(); | |
1221 | ||
1222 | tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0), | |
1223 | TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1224 | ||
1225 | tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2)); | |
1226 | tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); | |
1227 | tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2)); | |
1228 | tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); | |
1229 | ||
1230 | tcg_gen_mov_i64(ret, t0); | |
1231 | tcg_temp_free_i64(t0); | |
1232 | tcg_temp_free_i32(t1); | |
1233 | } | |
11d11d61 RH |
1234 | |
1235 | #else | |
1236 | ||
1237 | void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) | |
1238 | { | |
1239 | tcg_gen_mov_i64(ret, tcg_constant_i64(arg)); | |
1240 | } | |
1241 | ||
951c6300 RH |
1242 | #endif /* TCG_TARGET_REG_SIZE == 32 */ |
1243 | ||
1244 | void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | |
1245 | { | |
1246 | /* some cases can be optimized here */ | |
1247 | if (arg2 == 0) { | |
1248 | tcg_gen_mov_i64(ret, arg1); | |
11d11d61 RH |
1249 | } else if (TCG_TARGET_REG_BITS == 64) { |
1250 | tcg_gen_add_i64(ret, arg1, tcg_constant_i64(arg2)); | |
951c6300 | 1251 | } else { |
11d11d61 RH |
1252 | tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), |
1253 | TCGV_LOW(arg1), TCGV_HIGH(arg1), | |
1254 | tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32)); | |
951c6300 RH |
1255 | } |
1256 | } | |
1257 | ||
1258 | void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) | |
1259 | { | |
1260 | if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) { | |
1261 | /* Don't recurse with tcg_gen_neg_i64. */ | |
1262 | tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2); | |
11d11d61 RH |
1263 | } else if (TCG_TARGET_REG_BITS == 64) { |
1264 | tcg_gen_sub_i64(ret, tcg_constant_i64(arg1), arg2); | |
951c6300 | 1265 | } else { |
11d11d61 RH |
1266 | tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), |
1267 | tcg_constant_i32(arg1), tcg_constant_i32(arg1 >> 32), | |
1268 | TCGV_LOW(arg2), TCGV_HIGH(arg2)); | |
951c6300 RH |
1269 | } |
1270 | } | |
1271 | ||
1272 | void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | |
1273 | { | |
1274 | /* some cases can be optimized here */ | |
1275 | if (arg2 == 0) { | |
1276 | tcg_gen_mov_i64(ret, arg1); | |
11d11d61 RH |
1277 | } else if (TCG_TARGET_REG_BITS == 64) { |
1278 | tcg_gen_sub_i64(ret, arg1, tcg_constant_i64(arg2)); | |
951c6300 | 1279 | } else { |
11d11d61 RH |
1280 | tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), |
1281 | TCGV_LOW(arg1), TCGV_HIGH(arg1), | |
1282 | tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32)); | |
951c6300 RH |
1283 | } |
1284 | } | |
1285 | ||
474b2e8f | 1286 | void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
951c6300 | 1287 | { |
3a13c3f3 RH |
1288 | if (TCG_TARGET_REG_BITS == 32) { |
1289 | tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); | |
1290 | tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); | |
1291 | return; | |
1292 | } | |
1293 | ||
951c6300 RH |
1294 | /* Some cases can be optimized here. */ |
1295 | switch (arg2) { | |
1296 | case 0: | |
1297 | tcg_gen_movi_i64(ret, 0); | |
1298 | return; | |
474b2e8f | 1299 | case -1: |
951c6300 RH |
1300 | tcg_gen_mov_i64(ret, arg1); |
1301 | return; | |
474b2e8f | 1302 | case 0xff: |
951c6300 RH |
1303 | /* Don't recurse with tcg_gen_ext8u_i64. */ |
1304 | if (TCG_TARGET_HAS_ext8u_i64) { | |
1305 | tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1); | |
1306 | return; | |
1307 | } | |
1308 | break; | |
474b2e8f | 1309 | case 0xffff: |
951c6300 RH |
1310 | if (TCG_TARGET_HAS_ext16u_i64) { |
1311 | tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1); | |
1312 | return; | |
1313 | } | |
1314 | break; | |
474b2e8f | 1315 | case 0xffffffffu: |
951c6300 RH |
1316 | if (TCG_TARGET_HAS_ext32u_i64) { |
1317 | tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1); | |
1318 | return; | |
1319 | } | |
1320 | break; | |
1321 | } | |
11d11d61 RH |
1322 | |
1323 | tcg_gen_and_i64(ret, arg1, tcg_constant_i64(arg2)); | |
951c6300 RH |
1324 | } |
1325 | ||
1326 | void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | |
1327 | { | |
3a13c3f3 RH |
1328 | if (TCG_TARGET_REG_BITS == 32) { |
1329 | tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); | |
1330 | tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); | |
1331 | return; | |
1332 | } | |
951c6300 RH |
1333 | /* Some cases can be optimized here. */ |
1334 | if (arg2 == -1) { | |
1335 | tcg_gen_movi_i64(ret, -1); | |
1336 | } else if (arg2 == 0) { | |
1337 | tcg_gen_mov_i64(ret, arg1); | |
1338 | } else { | |
11d11d61 | 1339 | tcg_gen_or_i64(ret, arg1, tcg_constant_i64(arg2)); |
951c6300 | 1340 | } |
951c6300 RH |
1341 | } |
1342 | ||
1343 | void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | |
1344 | { | |
3a13c3f3 RH |
1345 | if (TCG_TARGET_REG_BITS == 32) { |
1346 | tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); | |
1347 | tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); | |
1348 | return; | |
1349 | } | |
951c6300 RH |
1350 | /* Some cases can be optimized here. */ |
1351 | if (arg2 == 0) { | |
1352 | tcg_gen_mov_i64(ret, arg1); | |
1353 | } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { | |
1354 | /* Don't recurse with tcg_gen_not_i64. */ | |
1355 | tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1); | |
1356 | } else { | |
11d11d61 | 1357 | tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2)); |
951c6300 | 1358 | } |
951c6300 RH |
1359 | } |
1360 | ||
951c6300 RH |
1361 | static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, |
1362 | unsigned c, bool right, bool arith) | |
1363 | { | |
1364 | tcg_debug_assert(c < 64); | |
1365 | if (c == 0) { | |
1366 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); | |
1367 | tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); | |
1368 | } else if (c >= 32) { | |
1369 | c -= 32; | |
1370 | if (right) { | |
1371 | if (arith) { | |
1372 | tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); | |
1373 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31); | |
1374 | } else { | |
1375 | tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); | |
1376 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1377 | } | |
1378 | } else { | |
1379 | tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c); | |
1380 | tcg_gen_movi_i32(TCGV_LOW(ret), 0); | |
1381 | } | |
02616bad RH |
1382 | } else if (right) { |
1383 | if (TCG_TARGET_HAS_extract2_i32) { | |
1384 | tcg_gen_extract2_i32(TCGV_LOW(ret), | |
1385 | TCGV_LOW(arg1), TCGV_HIGH(arg1), c); | |
1386 | } else { | |
951c6300 | 1387 | tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c); |
02616bad RH |
1388 | tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(ret), |
1389 | TCGV_HIGH(arg1), 32 - c, c); | |
1390 | } | |
1391 | if (arith) { | |
1392 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); | |
1393 | } else { | |
1394 | tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); | |
1395 | } | |
1396 | } else { | |
1397 | if (TCG_TARGET_HAS_extract2_i32) { | |
1398 | tcg_gen_extract2_i32(TCGV_HIGH(ret), | |
1399 | TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c); | |
951c6300 | 1400 | } else { |
02616bad | 1401 | TCGv_i32 t0 = tcg_temp_new_i32(); |
951c6300 | 1402 | tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c); |
02616bad RH |
1403 | tcg_gen_deposit_i32(TCGV_HIGH(ret), t0, |
1404 | TCGV_HIGH(arg1), c, 32 - c); | |
1405 | tcg_temp_free_i32(t0); | |
951c6300 | 1406 | } |
02616bad | 1407 | tcg_gen_shli_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c); |
951c6300 RH |
1408 | } |
1409 | } | |
1410 | ||
474b2e8f | 1411 | void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
951c6300 | 1412 | { |
474b2e8f | 1413 | tcg_debug_assert(arg2 >= 0 && arg2 < 64); |
3a13c3f3 RH |
1414 | if (TCG_TARGET_REG_BITS == 32) { |
1415 | tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0); | |
1416 | } else if (arg2 == 0) { | |
951c6300 RH |
1417 | tcg_gen_mov_i64(ret, arg1); |
1418 | } else { | |
11d11d61 | 1419 | tcg_gen_shl_i64(ret, arg1, tcg_constant_i64(arg2)); |
951c6300 RH |
1420 | } |
1421 | } | |
1422 | ||
474b2e8f | 1423 | void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
951c6300 | 1424 | { |
474b2e8f | 1425 | tcg_debug_assert(arg2 >= 0 && arg2 < 64); |
3a13c3f3 RH |
1426 | if (TCG_TARGET_REG_BITS == 32) { |
1427 | tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0); | |
1428 | } else if (arg2 == 0) { | |
951c6300 RH |
1429 | tcg_gen_mov_i64(ret, arg1); |
1430 | } else { | |
11d11d61 | 1431 | tcg_gen_shr_i64(ret, arg1, tcg_constant_i64(arg2)); |
951c6300 RH |
1432 | } |
1433 | } | |
1434 | ||
474b2e8f | 1435 | void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
951c6300 | 1436 | { |
474b2e8f | 1437 | tcg_debug_assert(arg2 >= 0 && arg2 < 64); |
3a13c3f3 RH |
1438 | if (TCG_TARGET_REG_BITS == 32) { |
1439 | tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1); | |
1440 | } else if (arg2 == 0) { | |
951c6300 RH |
1441 | tcg_gen_mov_i64(ret, arg1); |
1442 | } else { | |
11d11d61 | 1443 | tcg_gen_sar_i64(ret, arg1, tcg_constant_i64(arg2)); |
951c6300 RH |
1444 | } |
1445 | } | |
951c6300 | 1446 | |
42a268c2 | 1447 | void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) |
951c6300 RH |
1448 | { |
1449 | if (cond == TCG_COND_ALWAYS) { | |
42a268c2 | 1450 | tcg_gen_br(l); |
951c6300 | 1451 | } else if (cond != TCG_COND_NEVER) { |
d88a117e | 1452 | l->refs++; |
3a13c3f3 RH |
1453 | if (TCG_TARGET_REG_BITS == 32) { |
1454 | tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), | |
1455 | TCGV_HIGH(arg1), TCGV_LOW(arg2), | |
42a268c2 | 1456 | TCGV_HIGH(arg2), cond, label_arg(l)); |
3a13c3f3 | 1457 | } else { |
42a268c2 RH |
1458 | tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, |
1459 | label_arg(l)); | |
3a13c3f3 | 1460 | } |
951c6300 RH |
1461 | } |
1462 | } | |
1463 | ||
42a268c2 | 1464 | void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) |
951c6300 | 1465 | { |
11d11d61 RH |
1466 | if (TCG_TARGET_REG_BITS == 64) { |
1467 | tcg_gen_brcond_i64(cond, arg1, tcg_constant_i64(arg2), l); | |
1468 | } else if (cond == TCG_COND_ALWAYS) { | |
42a268c2 | 1469 | tcg_gen_br(l); |
951c6300 | 1470 | } else if (cond != TCG_COND_NEVER) { |
11d11d61 RH |
1471 | l->refs++; |
1472 | tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, | |
1473 | TCGV_LOW(arg1), TCGV_HIGH(arg1), | |
1474 | tcg_constant_i32(arg2), | |
1475 | tcg_constant_i32(arg2 >> 32), | |
1476 | cond, label_arg(l)); | |
951c6300 RH |
1477 | } |
1478 | } | |
1479 | ||
1480 | void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret, | |
1481 | TCGv_i64 arg1, TCGv_i64 arg2) | |
1482 | { | |
1483 | if (cond == TCG_COND_ALWAYS) { | |
1484 | tcg_gen_movi_i64(ret, 1); | |
1485 | } else if (cond == TCG_COND_NEVER) { | |
1486 | tcg_gen_movi_i64(ret, 0); | |
1487 | } else { | |
3a13c3f3 RH |
1488 | if (TCG_TARGET_REG_BITS == 32) { |
1489 | tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), | |
1490 | TCGV_LOW(arg1), TCGV_HIGH(arg1), | |
1491 | TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); | |
1492 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1493 | } else { | |
1494 | tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond); | |
1495 | } | |
951c6300 RH |
1496 | } |
1497 | } | |
1498 | ||
1499 | void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, | |
1500 | TCGv_i64 arg1, int64_t arg2) | |
1501 | { | |
11d11d61 RH |
1502 | if (TCG_TARGET_REG_BITS == 64) { |
1503 | tcg_gen_setcond_i64(cond, ret, arg1, tcg_constant_i64(arg2)); | |
1504 | } else if (cond == TCG_COND_ALWAYS) { | |
1505 | tcg_gen_movi_i64(ret, 1); | |
1506 | } else if (cond == TCG_COND_NEVER) { | |
1507 | tcg_gen_movi_i64(ret, 0); | |
1508 | } else { | |
1509 | tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), | |
1510 | TCGV_LOW(arg1), TCGV_HIGH(arg1), | |
1511 | tcg_constant_i32(arg2), | |
1512 | tcg_constant_i32(arg2 >> 32), cond); | |
1513 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1514 | } | |
951c6300 RH |
1515 | } |
1516 | ||
1517 | void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | |
1518 | { | |
b2e3ae94 RH |
1519 | if (arg2 == 0) { |
1520 | tcg_gen_movi_i64(ret, 0); | |
1521 | } else if (is_power_of_2(arg2)) { | |
1522 | tcg_gen_shli_i64(ret, arg1, ctz64(arg2)); | |
1523 | } else { | |
1524 | TCGv_i64 t0 = tcg_const_i64(arg2); | |
1525 | tcg_gen_mul_i64(ret, arg1, t0); | |
1526 | tcg_temp_free_i64(t0); | |
1527 | } | |
951c6300 RH |
1528 | } |
1529 | ||
1530 | void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1531 | { | |
1532 | if (TCG_TARGET_HAS_div_i64) { | |
1533 | tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); | |
1534 | } else if (TCG_TARGET_HAS_div2_i64) { | |
1535 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1536 | tcg_gen_sari_i64(t0, arg1, 63); | |
1537 | tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); | |
1538 | tcg_temp_free_i64(t0); | |
1539 | } else { | |
1540 | gen_helper_div_i64(ret, arg1, arg2); | |
1541 | } | |
1542 | } | |
1543 | ||
1544 | void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1545 | { | |
1546 | if (TCG_TARGET_HAS_rem_i64) { | |
1547 | tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); | |
1548 | } else if (TCG_TARGET_HAS_div_i64) { | |
1549 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1550 | tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); | |
1551 | tcg_gen_mul_i64(t0, t0, arg2); | |
1552 | tcg_gen_sub_i64(ret, arg1, t0); | |
1553 | tcg_temp_free_i64(t0); | |
1554 | } else if (TCG_TARGET_HAS_div2_i64) { | |
1555 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1556 | tcg_gen_sari_i64(t0, arg1, 63); | |
1557 | tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); | |
1558 | tcg_temp_free_i64(t0); | |
1559 | } else { | |
1560 | gen_helper_rem_i64(ret, arg1, arg2); | |
1561 | } | |
1562 | } | |
1563 | ||
1564 | void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1565 | { | |
1566 | if (TCG_TARGET_HAS_div_i64) { | |
1567 | tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); | |
1568 | } else if (TCG_TARGET_HAS_div2_i64) { | |
1569 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1570 | tcg_gen_movi_i64(t0, 0); | |
1571 | tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); | |
1572 | tcg_temp_free_i64(t0); | |
1573 | } else { | |
1574 | gen_helper_divu_i64(ret, arg1, arg2); | |
1575 | } | |
1576 | } | |
1577 | ||
1578 | void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1579 | { | |
1580 | if (TCG_TARGET_HAS_rem_i64) { | |
1581 | tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); | |
1582 | } else if (TCG_TARGET_HAS_div_i64) { | |
1583 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1584 | tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2); | |
1585 | tcg_gen_mul_i64(t0, t0, arg2); | |
1586 | tcg_gen_sub_i64(ret, arg1, t0); | |
1587 | tcg_temp_free_i64(t0); | |
1588 | } else if (TCG_TARGET_HAS_div2_i64) { | |
1589 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1590 | tcg_gen_movi_i64(t0, 0); | |
1591 | tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); | |
1592 | tcg_temp_free_i64(t0); | |
1593 | } else { | |
1594 | gen_helper_remu_i64(ret, arg1, arg2); | |
1595 | } | |
1596 | } | |
1597 | ||
1598 | void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1599 | { | |
3a13c3f3 RH |
1600 | if (TCG_TARGET_REG_BITS == 32) { |
1601 | tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1602 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1603 | } else if (TCG_TARGET_HAS_ext8s_i64) { | |
951c6300 RH |
1604 | tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg); |
1605 | } else { | |
1606 | tcg_gen_shli_i64(ret, arg, 56); | |
1607 | tcg_gen_sari_i64(ret, ret, 56); | |
1608 | } | |
951c6300 RH |
1609 | } |
1610 | ||
1611 | void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1612 | { | |
3a13c3f3 RH |
1613 | if (TCG_TARGET_REG_BITS == 32) { |
1614 | tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1615 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1616 | } else if (TCG_TARGET_HAS_ext16s_i64) { | |
951c6300 RH |
1617 | tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg); |
1618 | } else { | |
1619 | tcg_gen_shli_i64(ret, arg, 48); | |
1620 | tcg_gen_sari_i64(ret, ret, 48); | |
1621 | } | |
951c6300 RH |
1622 | } |
1623 | ||
1624 | void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1625 | { | |
3a13c3f3 RH |
1626 | if (TCG_TARGET_REG_BITS == 32) { |
1627 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1628 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1629 | } else if (TCG_TARGET_HAS_ext32s_i64) { | |
951c6300 RH |
1630 | tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg); |
1631 | } else { | |
1632 | tcg_gen_shli_i64(ret, arg, 32); | |
1633 | tcg_gen_sari_i64(ret, ret, 32); | |
1634 | } | |
951c6300 RH |
1635 | } |
1636 | ||
1637 | void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1638 | { | |
3a13c3f3 RH |
1639 | if (TCG_TARGET_REG_BITS == 32) { |
1640 | tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1641 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1642 | } else if (TCG_TARGET_HAS_ext8u_i64) { | |
951c6300 RH |
1643 | tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg); |
1644 | } else { | |
1645 | tcg_gen_andi_i64(ret, arg, 0xffu); | |
1646 | } | |
951c6300 RH |
1647 | } |
1648 | ||
1649 | void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1650 | { | |
3a13c3f3 RH |
1651 | if (TCG_TARGET_REG_BITS == 32) { |
1652 | tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1653 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1654 | } else if (TCG_TARGET_HAS_ext16u_i64) { | |
951c6300 RH |
1655 | tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg); |
1656 | } else { | |
1657 | tcg_gen_andi_i64(ret, arg, 0xffffu); | |
1658 | } | |
951c6300 RH |
1659 | } |
1660 | ||
1661 | void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1662 | { | |
3a13c3f3 RH |
1663 | if (TCG_TARGET_REG_BITS == 32) { |
1664 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1665 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1666 | } else if (TCG_TARGET_HAS_ext32u_i64) { | |
951c6300 RH |
1667 | tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg); |
1668 | } else { | |
1669 | tcg_gen_andi_i64(ret, arg, 0xffffffffu); | |
1670 | } | |
951c6300 RH |
1671 | } |
1672 | ||
2b836c2a | 1673 | void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) |
951c6300 | 1674 | { |
2b836c2a RH |
1675 | /* Only one extension flag may be present. */ |
1676 | tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); | |
1677 | ||
3a13c3f3 | 1678 | if (TCG_TARGET_REG_BITS == 32) { |
2b836c2a RH |
1679 | tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg), flags); |
1680 | if (flags & TCG_BSWAP_OS) { | |
1681 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1682 | } else { | |
1683 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1684 | } | |
3a13c3f3 | 1685 | } else if (TCG_TARGET_HAS_bswap16_i64) { |
2b836c2a | 1686 | tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags); |
951c6300 RH |
1687 | } else { |
1688 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2b836c2a | 1689 | TCGv_i64 t1 = tcg_temp_new_i64(); |
951c6300 | 1690 | |
2b836c2a RH |
1691 | tcg_gen_shri_i64(t0, arg, 8); |
1692 | if (!(flags & TCG_BSWAP_IZ)) { | |
1693 | tcg_gen_ext8u_i64(t0, t0); | |
1694 | } | |
1695 | ||
1696 | if (flags & TCG_BSWAP_OS) { | |
1697 | tcg_gen_shli_i64(t1, arg, 56); | |
1698 | tcg_gen_sari_i64(t1, t1, 48); | |
1699 | } else if (flags & TCG_BSWAP_OZ) { | |
1700 | tcg_gen_ext8u_i64(t1, arg); | |
1701 | tcg_gen_shli_i64(t1, t1, 8); | |
1702 | } else { | |
1703 | tcg_gen_shli_i64(t1, arg, 8); | |
1704 | } | |
1705 | ||
1706 | tcg_gen_or_i64(ret, t0, t1); | |
951c6300 | 1707 | tcg_temp_free_i64(t0); |
2b836c2a | 1708 | tcg_temp_free_i64(t1); |
951c6300 | 1709 | } |
951c6300 RH |
1710 | } |
1711 | ||
2b836c2a | 1712 | void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) |
951c6300 | 1713 | { |
2b836c2a RH |
1714 | /* Only one extension flag may be present. */ |
1715 | tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); | |
1716 | ||
3a13c3f3 RH |
1717 | if (TCG_TARGET_REG_BITS == 32) { |
1718 | tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
2b836c2a RH |
1719 | if (flags & TCG_BSWAP_OS) { |
1720 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
1721 | } else { | |
1722 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1723 | } | |
3a13c3f3 | 1724 | } else if (TCG_TARGET_HAS_bswap32_i64) { |
2b836c2a | 1725 | tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags); |
951c6300 | 1726 | } else { |
a686dc71 RH |
1727 | TCGv_i64 t0 = tcg_temp_new_i64(); |
1728 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
11d11d61 | 1729 | TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff); |
951c6300 | 1730 | |
2b836c2a RH |
1731 | /* arg = xxxxabcd */ |
1732 | tcg_gen_shri_i64(t0, arg, 8); /* t0 = .xxxxabc */ | |
1733 | tcg_gen_and_i64(t1, arg, t2); /* t1 = .....b.d */ | |
1734 | tcg_gen_and_i64(t0, t0, t2); /* t0 = .....a.c */ | |
1735 | tcg_gen_shli_i64(t1, t1, 8); /* t1 = ....b.d. */ | |
1736 | tcg_gen_or_i64(ret, t0, t1); /* ret = ....badc */ | |
1737 | ||
1738 | tcg_gen_shli_i64(t1, ret, 48); /* t1 = dc...... */ | |
1739 | tcg_gen_shri_i64(t0, ret, 16); /* t0 = ......ba */ | |
1740 | if (flags & TCG_BSWAP_OS) { | |
1741 | tcg_gen_sari_i64(t1, t1, 32); /* t1 = ssssdc.. */ | |
1742 | } else { | |
1743 | tcg_gen_shri_i64(t1, t1, 32); /* t1 = ....dc.. */ | |
1744 | } | |
1745 | tcg_gen_or_i64(ret, t0, t1); /* ret = ssssdcba */ | |
951c6300 | 1746 | |
951c6300 RH |
1747 | tcg_temp_free_i64(t0); |
1748 | tcg_temp_free_i64(t1); | |
1749 | } | |
951c6300 RH |
1750 | } |
1751 | ||
1752 | void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1753 | { | |
3a13c3f3 RH |
1754 | if (TCG_TARGET_REG_BITS == 32) { |
1755 | TCGv_i32 t0, t1; | |
1756 | t0 = tcg_temp_new_i32(); | |
1757 | t1 = tcg_temp_new_i32(); | |
951c6300 | 1758 | |
3a13c3f3 RH |
1759 | tcg_gen_bswap32_i32(t0, TCGV_LOW(arg)); |
1760 | tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg)); | |
1761 | tcg_gen_mov_i32(TCGV_LOW(ret), t1); | |
1762 | tcg_gen_mov_i32(TCGV_HIGH(ret), t0); | |
1763 | tcg_temp_free_i32(t0); | |
1764 | tcg_temp_free_i32(t1); | |
1765 | } else if (TCG_TARGET_HAS_bswap64_i64) { | |
587195bd | 1766 | tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0); |
951c6300 RH |
1767 | } else { |
1768 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1769 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
9e821eab | 1770 | TCGv_i64 t2 = tcg_temp_new_i64(); |
951c6300 | 1771 | |
9e821eab RH |
1772 | /* arg = abcdefgh */ |
1773 | tcg_gen_movi_i64(t2, 0x00ff00ff00ff00ffull); | |
1774 | tcg_gen_shri_i64(t0, arg, 8); /* t0 = .abcdefg */ | |
1775 | tcg_gen_and_i64(t1, arg, t2); /* t1 = .b.d.f.h */ | |
1776 | tcg_gen_and_i64(t0, t0, t2); /* t0 = .a.c.e.g */ | |
1777 | tcg_gen_shli_i64(t1, t1, 8); /* t1 = b.d.f.h. */ | |
1778 | tcg_gen_or_i64(ret, t0, t1); /* ret = badcfehg */ | |
1779 | ||
1780 | tcg_gen_movi_i64(t2, 0x0000ffff0000ffffull); | |
1781 | tcg_gen_shri_i64(t0, ret, 16); /* t0 = ..badcfe */ | |
1782 | tcg_gen_and_i64(t1, ret, t2); /* t1 = ..dc..hg */ | |
1783 | tcg_gen_and_i64(t0, t0, t2); /* t0 = ..ba..fe */ | |
1784 | tcg_gen_shli_i64(t1, t1, 16); /* t1 = dc..hg.. */ | |
1785 | tcg_gen_or_i64(ret, t0, t1); /* ret = dcbahgfe */ | |
1786 | ||
1787 | tcg_gen_shri_i64(t0, ret, 32); /* t0 = ....dcba */ | |
1788 | tcg_gen_shli_i64(t1, ret, 32); /* t1 = hgfe.... */ | |
1789 | tcg_gen_or_i64(ret, t0, t1); /* ret = hgfedcba */ | |
951c6300 | 1790 | |
951c6300 RH |
1791 | tcg_temp_free_i64(t0); |
1792 | tcg_temp_free_i64(t1); | |
9e821eab | 1793 | tcg_temp_free_i64(t2); |
951c6300 | 1794 | } |
951c6300 RH |
1795 | } |
1796 | ||
1797 | void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg) | |
1798 | { | |
3a13c3f3 RH |
1799 | if (TCG_TARGET_REG_BITS == 32) { |
1800 | tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
1801 | tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); | |
1802 | } else if (TCG_TARGET_HAS_not_i64) { | |
951c6300 RH |
1803 | tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg); |
1804 | } else { | |
1805 | tcg_gen_xori_i64(ret, arg, -1); | |
1806 | } | |
951c6300 RH |
1807 | } |
1808 | ||
1809 | void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1810 | { | |
3a13c3f3 RH |
1811 | if (TCG_TARGET_REG_BITS == 32) { |
1812 | tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1813 | tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1814 | } else if (TCG_TARGET_HAS_andc_i64) { | |
951c6300 RH |
1815 | tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); |
1816 | } else { | |
1817 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1818 | tcg_gen_not_i64(t0, arg2); | |
1819 | tcg_gen_and_i64(ret, arg1, t0); | |
1820 | tcg_temp_free_i64(t0); | |
1821 | } | |
951c6300 RH |
1822 | } |
1823 | ||
1824 | void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1825 | { | |
3a13c3f3 RH |
1826 | if (TCG_TARGET_REG_BITS == 32) { |
1827 | tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1828 | tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1829 | } else if (TCG_TARGET_HAS_eqv_i64) { | |
951c6300 RH |
1830 | tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2); |
1831 | } else { | |
1832 | tcg_gen_xor_i64(ret, arg1, arg2); | |
1833 | tcg_gen_not_i64(ret, ret); | |
1834 | } | |
951c6300 RH |
1835 | } |
1836 | ||
1837 | void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1838 | { | |
3a13c3f3 RH |
1839 | if (TCG_TARGET_REG_BITS == 32) { |
1840 | tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1841 | tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1842 | } else if (TCG_TARGET_HAS_nand_i64) { | |
951c6300 RH |
1843 | tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2); |
1844 | } else { | |
1845 | tcg_gen_and_i64(ret, arg1, arg2); | |
1846 | tcg_gen_not_i64(ret, ret); | |
1847 | } | |
951c6300 RH |
1848 | } |
1849 | ||
1850 | void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1851 | { | |
3a13c3f3 RH |
1852 | if (TCG_TARGET_REG_BITS == 32) { |
1853 | tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1854 | tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1855 | } else if (TCG_TARGET_HAS_nor_i64) { | |
951c6300 RH |
1856 | tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2); |
1857 | } else { | |
1858 | tcg_gen_or_i64(ret, arg1, arg2); | |
1859 | tcg_gen_not_i64(ret, ret); | |
1860 | } | |
951c6300 RH |
1861 | } |
1862 | ||
1863 | void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1864 | { | |
3a13c3f3 RH |
1865 | if (TCG_TARGET_REG_BITS == 32) { |
1866 | tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); | |
1867 | tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); | |
1868 | } else if (TCG_TARGET_HAS_orc_i64) { | |
951c6300 RH |
1869 | tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2); |
1870 | } else { | |
1871 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
1872 | tcg_gen_not_i64(t0, arg2); | |
1873 | tcg_gen_or_i64(ret, arg1, t0); | |
1874 | tcg_temp_free_i64(t0); | |
1875 | } | |
951c6300 RH |
1876 | } |
1877 | ||
0e28d006 RH |
1878 | void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) |
1879 | { | |
1880 | if (TCG_TARGET_HAS_clz_i64) { | |
1881 | tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2); | |
1882 | } else { | |
1883 | gen_helper_clz_i64(ret, arg1, arg2); | |
1884 | } | |
1885 | } | |
1886 | ||
1887 | void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) | |
1888 | { | |
1889 | if (TCG_TARGET_REG_BITS == 32 | |
1890 | && TCG_TARGET_HAS_clz_i32 | |
1891 | && arg2 <= 0xffffffffu) { | |
11d11d61 RH |
1892 | TCGv_i32 t = tcg_temp_new_i32(); |
1893 | tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32); | |
0e28d006 RH |
1894 | tcg_gen_addi_i32(t, t, 32); |
1895 | tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t); | |
1896 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1897 | tcg_temp_free_i32(t); | |
1898 | } else { | |
11d11d61 RH |
1899 | TCGv_i64 t0 = tcg_const_i64(arg2); |
1900 | tcg_gen_clz_i64(ret, arg1, t0); | |
1901 | tcg_temp_free_i64(t0); | |
0e28d006 RH |
1902 | } |
1903 | } | |
1904 | ||
1905 | void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
1906 | { | |
1907 | if (TCG_TARGET_HAS_ctz_i64) { | |
1908 | tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2); | |
14e99210 RH |
1909 | } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) { |
1910 | TCGv_i64 z, t = tcg_temp_new_i64(); | |
1911 | ||
1912 | if (TCG_TARGET_HAS_ctpop_i64) { | |
1913 | tcg_gen_subi_i64(t, arg1, 1); | |
1914 | tcg_gen_andc_i64(t, t, arg1); | |
1915 | tcg_gen_ctpop_i64(t, t); | |
1916 | } else { | |
1917 | /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */ | |
1918 | tcg_gen_neg_i64(t, arg1); | |
1919 | tcg_gen_and_i64(t, t, arg1); | |
1920 | tcg_gen_clzi_i64(t, t, 64); | |
1921 | tcg_gen_xori_i64(t, t, 63); | |
1922 | } | |
11d11d61 | 1923 | z = tcg_constant_i64(0); |
14e99210 RH |
1924 | tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t); |
1925 | tcg_temp_free_i64(t); | |
1926 | tcg_temp_free_i64(z); | |
0e28d006 RH |
1927 | } else { |
1928 | gen_helper_ctz_i64(ret, arg1, arg2); | |
1929 | } | |
1930 | } | |
1931 | ||
1932 | void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) | |
1933 | { | |
1934 | if (TCG_TARGET_REG_BITS == 32 | |
1935 | && TCG_TARGET_HAS_ctz_i32 | |
1936 | && arg2 <= 0xffffffffu) { | |
11d11d61 RH |
1937 | TCGv_i32 t32 = tcg_temp_new_i32(); |
1938 | tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32); | |
0e28d006 RH |
1939 | tcg_gen_addi_i32(t32, t32, 32); |
1940 | tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); | |
1941 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1942 | tcg_temp_free_i32(t32); | |
14e99210 RH |
1943 | } else if (!TCG_TARGET_HAS_ctz_i64 |
1944 | && TCG_TARGET_HAS_ctpop_i64 | |
1945 | && arg2 == 64) { | |
1946 | /* This equivalence has the advantage of not requiring a fixup. */ | |
1947 | TCGv_i64 t = tcg_temp_new_i64(); | |
1948 | tcg_gen_subi_i64(t, arg1, 1); | |
1949 | tcg_gen_andc_i64(t, t, arg1); | |
1950 | tcg_gen_ctpop_i64(ret, t); | |
1951 | tcg_temp_free_i64(t); | |
0e28d006 | 1952 | } else { |
11d11d61 RH |
1953 | TCGv_i64 t0 = tcg_const_i64(arg2); |
1954 | tcg_gen_ctz_i64(ret, arg1, t0); | |
1955 | tcg_temp_free_i64(t0); | |
0e28d006 RH |
1956 | } |
1957 | } | |
1958 | ||
086920c2 RH |
1959 | void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg) |
1960 | { | |
1961 | if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) { | |
1962 | TCGv_i64 t = tcg_temp_new_i64(); | |
1963 | tcg_gen_sari_i64(t, arg, 63); | |
1964 | tcg_gen_xor_i64(t, t, arg); | |
1965 | tcg_gen_clzi_i64(t, t, 64); | |
1966 | tcg_gen_subi_i64(ret, t, 1); | |
1967 | tcg_temp_free_i64(t); | |
1968 | } else { | |
1969 | gen_helper_clrsb_i64(ret, arg); | |
1970 | } | |
1971 | } | |
1972 | ||
a768e4e9 RH |
1973 | void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1) |
1974 | { | |
1975 | if (TCG_TARGET_HAS_ctpop_i64) { | |
1976 | tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1); | |
1977 | } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) { | |
1978 | tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); | |
1979 | tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); | |
1980 | tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret)); | |
1981 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
1982 | } else { | |
1983 | gen_helper_ctpop_i64(ret, arg1); | |
1984 | } | |
1985 | } | |
1986 | ||
951c6300 RH |
1987 | void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) |
1988 | { | |
1989 | if (TCG_TARGET_HAS_rot_i64) { | |
1990 | tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2); | |
1991 | } else { | |
1992 | TCGv_i64 t0, t1; | |
1993 | t0 = tcg_temp_new_i64(); | |
1994 | t1 = tcg_temp_new_i64(); | |
1995 | tcg_gen_shl_i64(t0, arg1, arg2); | |
1996 | tcg_gen_subfi_i64(t1, 64, arg2); | |
1997 | tcg_gen_shr_i64(t1, arg1, t1); | |
1998 | tcg_gen_or_i64(ret, t0, t1); | |
1999 | tcg_temp_free_i64(t0); | |
2000 | tcg_temp_free_i64(t1); | |
2001 | } | |
2002 | } | |
2003 | ||
07dada03 | 2004 | void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
951c6300 | 2005 | { |
07dada03 | 2006 | tcg_debug_assert(arg2 >= 0 && arg2 < 64); |
951c6300 RH |
2007 | /* some cases can be optimized here */ |
2008 | if (arg2 == 0) { | |
2009 | tcg_gen_mov_i64(ret, arg1); | |
2010 | } else if (TCG_TARGET_HAS_rot_i64) { | |
11d11d61 | 2011 | tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2)); |
951c6300 RH |
2012 | } else { |
2013 | TCGv_i64 t0, t1; | |
2014 | t0 = tcg_temp_new_i64(); | |
2015 | t1 = tcg_temp_new_i64(); | |
2016 | tcg_gen_shli_i64(t0, arg1, arg2); | |
2017 | tcg_gen_shri_i64(t1, arg1, 64 - arg2); | |
2018 | tcg_gen_or_i64(ret, t0, t1); | |
2019 | tcg_temp_free_i64(t0); | |
2020 | tcg_temp_free_i64(t1); | |
2021 | } | |
2022 | } | |
2023 | ||
2024 | void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | |
2025 | { | |
2026 | if (TCG_TARGET_HAS_rot_i64) { | |
2027 | tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2); | |
2028 | } else { | |
2029 | TCGv_i64 t0, t1; | |
2030 | t0 = tcg_temp_new_i64(); | |
2031 | t1 = tcg_temp_new_i64(); | |
2032 | tcg_gen_shr_i64(t0, arg1, arg2); | |
2033 | tcg_gen_subfi_i64(t1, 64, arg2); | |
2034 | tcg_gen_shl_i64(t1, arg1, t1); | |
2035 | tcg_gen_or_i64(ret, t0, t1); | |
2036 | tcg_temp_free_i64(t0); | |
2037 | tcg_temp_free_i64(t1); | |
2038 | } | |
2039 | } | |
2040 | ||
07dada03 | 2041 | void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
951c6300 | 2042 | { |
07dada03 | 2043 | tcg_debug_assert(arg2 >= 0 && arg2 < 64); |
951c6300 RH |
2044 | /* some cases can be optimized here */ |
2045 | if (arg2 == 0) { | |
2046 | tcg_gen_mov_i64(ret, arg1); | |
2047 | } else { | |
2048 | tcg_gen_rotli_i64(ret, arg1, 64 - arg2); | |
2049 | } | |
2050 | } | |
2051 | ||
2052 | void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, | |
2053 | unsigned int ofs, unsigned int len) | |
2054 | { | |
2055 | uint64_t mask; | |
2056 | TCGv_i64 t1; | |
2057 | ||
2058 | tcg_debug_assert(ofs < 64); | |
0d0d309d | 2059 | tcg_debug_assert(len > 0); |
951c6300 RH |
2060 | tcg_debug_assert(len <= 64); |
2061 | tcg_debug_assert(ofs + len <= 64); | |
2062 | ||
0d0d309d | 2063 | if (len == 64) { |
951c6300 RH |
2064 | tcg_gen_mov_i64(ret, arg2); |
2065 | return; | |
2066 | } | |
2067 | if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { | |
2068 | tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); | |
2069 | return; | |
2070 | } | |
2071 | ||
3a13c3f3 RH |
2072 | if (TCG_TARGET_REG_BITS == 32) { |
2073 | if (ofs >= 32) { | |
2074 | tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), | |
2075 | TCGV_LOW(arg2), ofs - 32, len); | |
2076 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); | |
2077 | return; | |
2078 | } | |
2079 | if (ofs + len <= 32) { | |
2080 | tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1), | |
2081 | TCGV_LOW(arg2), ofs, len); | |
2082 | tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); | |
2083 | return; | |
2084 | } | |
951c6300 | 2085 | } |
951c6300 | 2086 | |
951c6300 RH |
2087 | t1 = tcg_temp_new_i64(); |
2088 | ||
b0a60567 RH |
2089 | if (TCG_TARGET_HAS_extract2_i64) { |
2090 | if (ofs + len == 64) { | |
2091 | tcg_gen_shli_i64(t1, arg1, len); | |
2092 | tcg_gen_extract2_i64(ret, t1, arg2, len); | |
2093 | goto done; | |
2094 | } | |
2095 | if (ofs == 0) { | |
2096 | tcg_gen_extract2_i64(ret, arg1, arg2, len); | |
2097 | tcg_gen_rotli_i64(ret, ret, len); | |
2098 | goto done; | |
2099 | } | |
2100 | } | |
2101 | ||
2102 | mask = (1ull << len) - 1; | |
951c6300 RH |
2103 | if (ofs + len < 64) { |
2104 | tcg_gen_andi_i64(t1, arg2, mask); | |
2105 | tcg_gen_shli_i64(t1, t1, ofs); | |
2106 | } else { | |
2107 | tcg_gen_shli_i64(t1, arg2, ofs); | |
2108 | } | |
2109 | tcg_gen_andi_i64(ret, arg1, ~(mask << ofs)); | |
2110 | tcg_gen_or_i64(ret, ret, t1); | |
b0a60567 | 2111 | done: |
951c6300 RH |
2112 | tcg_temp_free_i64(t1); |
2113 | } | |
2114 | ||
07cc68d5 RH |
2115 | void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg, |
2116 | unsigned int ofs, unsigned int len) | |
2117 | { | |
2118 | tcg_debug_assert(ofs < 64); | |
2119 | tcg_debug_assert(len > 0); | |
2120 | tcg_debug_assert(len <= 64); | |
2121 | tcg_debug_assert(ofs + len <= 64); | |
2122 | ||
2123 | if (ofs + len == 64) { | |
2124 | tcg_gen_shli_i64(ret, arg, ofs); | |
2125 | } else if (ofs == 0) { | |
2126 | tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); | |
2127 | } else if (TCG_TARGET_HAS_deposit_i64 | |
2128 | && TCG_TARGET_deposit_i64_valid(ofs, len)) { | |
11d11d61 | 2129 | TCGv_i64 zero = tcg_constant_i64(0); |
07cc68d5 | 2130 | tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len); |
07cc68d5 RH |
2131 | } else { |
2132 | if (TCG_TARGET_REG_BITS == 32) { | |
2133 | if (ofs >= 32) { | |
2134 | tcg_gen_deposit_z_i32(TCGV_HIGH(ret), TCGV_LOW(arg), | |
2135 | ofs - 32, len); | |
2136 | tcg_gen_movi_i32(TCGV_LOW(ret), 0); | |
2137 | return; | |
2138 | } | |
2139 | if (ofs + len <= 32) { | |
2140 | tcg_gen_deposit_z_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len); | |
2141 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
2142 | return; | |
2143 | } | |
2144 | } | |
2145 | /* To help two-operand hosts we prefer to zero-extend first, | |
2146 | which allows ARG to stay live. */ | |
2147 | switch (len) { | |
2148 | case 32: | |
2149 | if (TCG_TARGET_HAS_ext32u_i64) { | |
2150 | tcg_gen_ext32u_i64(ret, arg); | |
2151 | tcg_gen_shli_i64(ret, ret, ofs); | |
2152 | return; | |
2153 | } | |
2154 | break; | |
2155 | case 16: | |
2156 | if (TCG_TARGET_HAS_ext16u_i64) { | |
2157 | tcg_gen_ext16u_i64(ret, arg); | |
2158 | tcg_gen_shli_i64(ret, ret, ofs); | |
2159 | return; | |
2160 | } | |
2161 | break; | |
2162 | case 8: | |
2163 | if (TCG_TARGET_HAS_ext8u_i64) { | |
2164 | tcg_gen_ext8u_i64(ret, arg); | |
2165 | tcg_gen_shli_i64(ret, ret, ofs); | |
2166 | return; | |
2167 | } | |
2168 | break; | |
2169 | } | |
2170 | /* Otherwise prefer zero-extension over AND for code size. */ | |
2171 | switch (ofs + len) { | |
2172 | case 32: | |
2173 | if (TCG_TARGET_HAS_ext32u_i64) { | |
2174 | tcg_gen_shli_i64(ret, arg, ofs); | |
2175 | tcg_gen_ext32u_i64(ret, ret); | |
2176 | return; | |
2177 | } | |
2178 | break; | |
2179 | case 16: | |
2180 | if (TCG_TARGET_HAS_ext16u_i64) { | |
2181 | tcg_gen_shli_i64(ret, arg, ofs); | |
2182 | tcg_gen_ext16u_i64(ret, ret); | |
2183 | return; | |
2184 | } | |
2185 | break; | |
2186 | case 8: | |
2187 | if (TCG_TARGET_HAS_ext8u_i64) { | |
2188 | tcg_gen_shli_i64(ret, arg, ofs); | |
2189 | tcg_gen_ext8u_i64(ret, ret); | |
2190 | return; | |
2191 | } | |
2192 | break; | |
2193 | } | |
2194 | tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); | |
2195 | tcg_gen_shli_i64(ret, ret, ofs); | |
2196 | } | |
2197 | } | |
2198 | ||
7ec8bab3 RH |
2199 | void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg, |
2200 | unsigned int ofs, unsigned int len) | |
2201 | { | |
2202 | tcg_debug_assert(ofs < 64); | |
2203 | tcg_debug_assert(len > 0); | |
2204 | tcg_debug_assert(len <= 64); | |
2205 | tcg_debug_assert(ofs + len <= 64); | |
2206 | ||
2207 | /* Canonicalize certain special cases, even if extract is supported. */ | |
2208 | if (ofs + len == 64) { | |
2209 | tcg_gen_shri_i64(ret, arg, 64 - len); | |
2210 | return; | |
2211 | } | |
2212 | if (ofs == 0) { | |
2213 | tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); | |
2214 | return; | |
2215 | } | |
2216 | ||
2217 | if (TCG_TARGET_REG_BITS == 32) { | |
2218 | /* Look for a 32-bit extract within one of the two words. */ | |
2219 | if (ofs >= 32) { | |
2220 | tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len); | |
2221 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
2222 | return; | |
2223 | } | |
2224 | if (ofs + len <= 32) { | |
2225 | tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len); | |
2226 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
2227 | return; | |
2228 | } | |
2229 | /* The field is split across two words. One double-word | |
2230 | shift is better than two double-word shifts. */ | |
2231 | goto do_shift_and; | |
2232 | } | |
2233 | ||
2234 | if (TCG_TARGET_HAS_extract_i64 | |
2235 | && TCG_TARGET_extract_i64_valid(ofs, len)) { | |
2236 | tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len); | |
2237 | return; | |
2238 | } | |
2239 | ||
2240 | /* Assume that zero-extension, if available, is cheaper than a shift. */ | |
2241 | switch (ofs + len) { | |
2242 | case 32: | |
2243 | if (TCG_TARGET_HAS_ext32u_i64) { | |
2244 | tcg_gen_ext32u_i64(ret, arg); | |
2245 | tcg_gen_shri_i64(ret, ret, ofs); | |
2246 | return; | |
2247 | } | |
2248 | break; | |
2249 | case 16: | |
2250 | if (TCG_TARGET_HAS_ext16u_i64) { | |
2251 | tcg_gen_ext16u_i64(ret, arg); | |
2252 | tcg_gen_shri_i64(ret, ret, ofs); | |
2253 | return; | |
2254 | } | |
2255 | break; | |
2256 | case 8: | |
2257 | if (TCG_TARGET_HAS_ext8u_i64) { | |
2258 | tcg_gen_ext8u_i64(ret, arg); | |
2259 | tcg_gen_shri_i64(ret, ret, ofs); | |
2260 | return; | |
2261 | } | |
2262 | break; | |
2263 | } | |
2264 | ||
2265 | /* ??? Ideally we'd know what values are available for immediate AND. | |
2266 | Assume that 8 bits are available, plus the special cases of 16 and 32, | |
2267 | so that we get ext8u, ext16u, and ext32u. */ | |
2268 | switch (len) { | |
2269 | case 1 ... 8: case 16: case 32: | |
2270 | do_shift_and: | |
2271 | tcg_gen_shri_i64(ret, arg, ofs); | |
2272 | tcg_gen_andi_i64(ret, ret, (1ull << len) - 1); | |
2273 | break; | |
2274 | default: | |
2275 | tcg_gen_shli_i64(ret, arg, 64 - len - ofs); | |
2276 | tcg_gen_shri_i64(ret, ret, 64 - len); | |
2277 | break; | |
2278 | } | |
2279 | } | |
2280 | ||
2281 | void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg, | |
2282 | unsigned int ofs, unsigned int len) | |
2283 | { | |
2284 | tcg_debug_assert(ofs < 64); | |
2285 | tcg_debug_assert(len > 0); | |
2286 | tcg_debug_assert(len <= 64); | |
2287 | tcg_debug_assert(ofs + len <= 64); | |
2288 | ||
2289 | /* Canonicalize certain special cases, even if sextract is supported. */ | |
2290 | if (ofs + len == 64) { | |
2291 | tcg_gen_sari_i64(ret, arg, 64 - len); | |
2292 | return; | |
2293 | } | |
2294 | if (ofs == 0) { | |
2295 | switch (len) { | |
2296 | case 32: | |
2297 | tcg_gen_ext32s_i64(ret, arg); | |
2298 | return; | |
2299 | case 16: | |
2300 | tcg_gen_ext16s_i64(ret, arg); | |
2301 | return; | |
2302 | case 8: | |
2303 | tcg_gen_ext8s_i64(ret, arg); | |
2304 | return; | |
2305 | } | |
2306 | } | |
2307 | ||
2308 | if (TCG_TARGET_REG_BITS == 32) { | |
2309 | /* Look for a 32-bit extract within one of the two words. */ | |
2310 | if (ofs >= 32) { | |
2311 | tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len); | |
2312 | } else if (ofs + len <= 32) { | |
2313 | tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len); | |
2314 | } else if (ofs == 0) { | |
2315 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | |
2316 | tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32); | |
2317 | return; | |
2318 | } else if (len > 32) { | |
2319 | TCGv_i32 t = tcg_temp_new_i32(); | |
2320 | /* Extract the bits for the high word normally. */ | |
2321 | tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32); | |
2322 | /* Shift the field down for the low part. */ | |
2323 | tcg_gen_shri_i64(ret, arg, ofs); | |
2324 | /* Overwrite the shift into the high part. */ | |
2325 | tcg_gen_mov_i32(TCGV_HIGH(ret), t); | |
2326 | tcg_temp_free_i32(t); | |
2327 | return; | |
2328 | } else { | |
2329 | /* Shift the field down for the low part, such that the | |
2330 | field sits at the MSB. */ | |
2331 | tcg_gen_shri_i64(ret, arg, ofs + len - 32); | |
2332 | /* Shift the field down from the MSB, sign extending. */ | |
2333 | tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_LOW(ret), 32 - len); | |
2334 | } | |
2335 | /* Sign-extend the field from 32 bits. */ | |
2336 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
2337 | return; | |
2338 | } | |
2339 | ||
2340 | if (TCG_TARGET_HAS_sextract_i64 | |
2341 | && TCG_TARGET_extract_i64_valid(ofs, len)) { | |
2342 | tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len); | |
2343 | return; | |
2344 | } | |
2345 | ||
2346 | /* Assume that sign-extension, if available, is cheaper than a shift. */ | |
2347 | switch (ofs + len) { | |
2348 | case 32: | |
2349 | if (TCG_TARGET_HAS_ext32s_i64) { | |
2350 | tcg_gen_ext32s_i64(ret, arg); | |
2351 | tcg_gen_sari_i64(ret, ret, ofs); | |
2352 | return; | |
2353 | } | |
2354 | break; | |
2355 | case 16: | |
2356 | if (TCG_TARGET_HAS_ext16s_i64) { | |
2357 | tcg_gen_ext16s_i64(ret, arg); | |
2358 | tcg_gen_sari_i64(ret, ret, ofs); | |
2359 | return; | |
2360 | } | |
2361 | break; | |
2362 | case 8: | |
2363 | if (TCG_TARGET_HAS_ext8s_i64) { | |
2364 | tcg_gen_ext8s_i64(ret, arg); | |
2365 | tcg_gen_sari_i64(ret, ret, ofs); | |
2366 | return; | |
2367 | } | |
2368 | break; | |
2369 | } | |
2370 | switch (len) { | |
2371 | case 32: | |
2372 | if (TCG_TARGET_HAS_ext32s_i64) { | |
2373 | tcg_gen_shri_i64(ret, arg, ofs); | |
2374 | tcg_gen_ext32s_i64(ret, ret); | |
2375 | return; | |
2376 | } | |
2377 | break; | |
2378 | case 16: | |
2379 | if (TCG_TARGET_HAS_ext16s_i64) { | |
2380 | tcg_gen_shri_i64(ret, arg, ofs); | |
2381 | tcg_gen_ext16s_i64(ret, ret); | |
2382 | return; | |
2383 | } | |
2384 | break; | |
2385 | case 8: | |
2386 | if (TCG_TARGET_HAS_ext8s_i64) { | |
2387 | tcg_gen_shri_i64(ret, arg, ofs); | |
2388 | tcg_gen_ext8s_i64(ret, ret); | |
2389 | return; | |
2390 | } | |
2391 | break; | |
2392 | } | |
2393 | tcg_gen_shli_i64(ret, arg, 64 - len - ofs); | |
2394 | tcg_gen_sari_i64(ret, ret, 64 - len); | |
2395 | } | |
2396 | ||
2089fcc9 DH |
2397 | /* |
2398 | * Extract 64 bits from a 128-bit input, ah:al, starting from ofs. | |
2399 | * Unlike tcg_gen_extract_i64 above, len is fixed at 64. | |
2400 | */ | |
2401 | void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, | |
2402 | unsigned int ofs) | |
2403 | { | |
2404 | tcg_debug_assert(ofs <= 64); | |
2405 | if (ofs == 0) { | |
2406 | tcg_gen_mov_i64(ret, al); | |
2407 | } else if (ofs == 64) { | |
2408 | tcg_gen_mov_i64(ret, ah); | |
2409 | } else if (al == ah) { | |
2410 | tcg_gen_rotri_i64(ret, al, ofs); | |
fce1296f RH |
2411 | } else if (TCG_TARGET_HAS_extract2_i64) { |
2412 | tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs); | |
2089fcc9 DH |
2413 | } else { |
2414 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2415 | tcg_gen_shri_i64(t0, al, ofs); | |
2416 | tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs); | |
2417 | tcg_temp_free_i64(t0); | |
2418 | } | |
2419 | } | |
2420 | ||
951c6300 RH |
2421 | void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, |
2422 | TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2) | |
2423 | { | |
37ed3bf1 RH |
2424 | if (cond == TCG_COND_ALWAYS) { |
2425 | tcg_gen_mov_i64(ret, v1); | |
2426 | } else if (cond == TCG_COND_NEVER) { | |
2427 | tcg_gen_mov_i64(ret, v2); | |
2428 | } else if (TCG_TARGET_REG_BITS == 32) { | |
3a13c3f3 RH |
2429 | TCGv_i32 t0 = tcg_temp_new_i32(); |
2430 | TCGv_i32 t1 = tcg_temp_new_i32(); | |
2431 | tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0, | |
2432 | TCGV_LOW(c1), TCGV_HIGH(c1), | |
2433 | TCGV_LOW(c2), TCGV_HIGH(c2), cond); | |
2434 | ||
2435 | if (TCG_TARGET_HAS_movcond_i32) { | |
2436 | tcg_gen_movi_i32(t1, 0); | |
2437 | tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1, | |
2438 | TCGV_LOW(v1), TCGV_LOW(v2)); | |
2439 | tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1, | |
2440 | TCGV_HIGH(v1), TCGV_HIGH(v2)); | |
2441 | } else { | |
2442 | tcg_gen_neg_i32(t0, t0); | |
951c6300 | 2443 | |
3a13c3f3 RH |
2444 | tcg_gen_and_i32(t1, TCGV_LOW(v1), t0); |
2445 | tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0); | |
2446 | tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1); | |
951c6300 | 2447 | |
3a13c3f3 RH |
2448 | tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0); |
2449 | tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0); | |
2450 | tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1); | |
2451 | } | |
2452 | tcg_temp_free_i32(t0); | |
2453 | tcg_temp_free_i32(t1); | |
2454 | } else if (TCG_TARGET_HAS_movcond_i64) { | |
951c6300 RH |
2455 | tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); |
2456 | } else { | |
2457 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2458 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
2459 | tcg_gen_setcond_i64(cond, t0, c1, c2); | |
2460 | tcg_gen_neg_i64(t0, t0); | |
2461 | tcg_gen_and_i64(t1, v1, t0); | |
2462 | tcg_gen_andc_i64(ret, v2, t0); | |
2463 | tcg_gen_or_i64(ret, ret, t1); | |
2464 | tcg_temp_free_i64(t0); | |
2465 | tcg_temp_free_i64(t1); | |
2466 | } | |
951c6300 RH |
2467 | } |
2468 | ||
2469 | void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, | |
2470 | TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) | |
2471 | { | |
2472 | if (TCG_TARGET_HAS_add2_i64) { | |
2473 | tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); | |
951c6300 RH |
2474 | } else { |
2475 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2476 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
2477 | tcg_gen_add_i64(t0, al, bl); | |
2478 | tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al); | |
2479 | tcg_gen_add_i64(rh, ah, bh); | |
2480 | tcg_gen_add_i64(rh, rh, t1); | |
2481 | tcg_gen_mov_i64(rl, t0); | |
2482 | tcg_temp_free_i64(t0); | |
2483 | tcg_temp_free_i64(t1); | |
2484 | } | |
2485 | } | |
2486 | ||
2487 | void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, | |
2488 | TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) | |
2489 | { | |
2490 | if (TCG_TARGET_HAS_sub2_i64) { | |
2491 | tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); | |
951c6300 RH |
2492 | } else { |
2493 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2494 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
2495 | tcg_gen_sub_i64(t0, al, bl); | |
2496 | tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl); | |
2497 | tcg_gen_sub_i64(rh, ah, bh); | |
2498 | tcg_gen_sub_i64(rh, rh, t1); | |
2499 | tcg_gen_mov_i64(rl, t0); | |
2500 | tcg_temp_free_i64(t0); | |
2501 | tcg_temp_free_i64(t1); | |
2502 | } | |
2503 | } | |
2504 | ||
2505 | void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) | |
2506 | { | |
2507 | if (TCG_TARGET_HAS_mulu2_i64) { | |
2508 | tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); | |
951c6300 RH |
2509 | } else if (TCG_TARGET_HAS_muluh_i64) { |
2510 | TCGv_i64 t = tcg_temp_new_i64(); | |
2511 | tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); | |
2512 | tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); | |
2513 | tcg_gen_mov_i64(rl, t); | |
2514 | tcg_temp_free_i64(t); | |
2515 | } else { | |
2516 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2517 | tcg_gen_mul_i64(t0, arg1, arg2); | |
2518 | gen_helper_muluh_i64(rh, arg1, arg2); | |
2519 | tcg_gen_mov_i64(rl, t0); | |
2520 | tcg_temp_free_i64(t0); | |
2521 | } | |
2522 | } | |
2523 | ||
2524 | void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) | |
2525 | { | |
2526 | if (TCG_TARGET_HAS_muls2_i64) { | |
2527 | tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2); | |
951c6300 RH |
2528 | } else if (TCG_TARGET_HAS_mulsh_i64) { |
2529 | TCGv_i64 t = tcg_temp_new_i64(); | |
2530 | tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); | |
2531 | tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); | |
2532 | tcg_gen_mov_i64(rl, t); | |
2533 | tcg_temp_free_i64(t); | |
2534 | } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { | |
2535 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2536 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
2537 | TCGv_i64 t2 = tcg_temp_new_i64(); | |
2538 | TCGv_i64 t3 = tcg_temp_new_i64(); | |
2539 | tcg_gen_mulu2_i64(t0, t1, arg1, arg2); | |
2540 | /* Adjust for negative inputs. */ | |
2541 | tcg_gen_sari_i64(t2, arg1, 63); | |
2542 | tcg_gen_sari_i64(t3, arg2, 63); | |
2543 | tcg_gen_and_i64(t2, t2, arg2); | |
2544 | tcg_gen_and_i64(t3, t3, arg1); | |
2545 | tcg_gen_sub_i64(rh, t1, t2); | |
2546 | tcg_gen_sub_i64(rh, rh, t3); | |
2547 | tcg_gen_mov_i64(rl, t0); | |
2548 | tcg_temp_free_i64(t0); | |
2549 | tcg_temp_free_i64(t1); | |
2550 | tcg_temp_free_i64(t2); | |
2551 | tcg_temp_free_i64(t3); | |
2552 | } else { | |
2553 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2554 | tcg_gen_mul_i64(t0, arg1, arg2); | |
2555 | gen_helper_mulsh_i64(rh, arg1, arg2); | |
2556 | tcg_gen_mov_i64(rl, t0); | |
2557 | tcg_temp_free_i64(t0); | |
2558 | } | |
2559 | } | |
2560 | ||
5087abfb RH |
2561 | void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) |
2562 | { | |
2563 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
2564 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
2565 | TCGv_i64 t2 = tcg_temp_new_i64(); | |
2566 | tcg_gen_mulu2_i64(t0, t1, arg1, arg2); | |
2567 | /* Adjust for negative input for the signed arg1. */ | |
2568 | tcg_gen_sari_i64(t2, arg1, 63); | |
2569 | tcg_gen_and_i64(t2, t2, arg2); | |
2570 | tcg_gen_sub_i64(rh, t1, t2); | |
2571 | tcg_gen_mov_i64(rl, t0); | |
2572 | tcg_temp_free_i64(t0); | |
2573 | tcg_temp_free_i64(t1); | |
2574 | tcg_temp_free_i64(t2); | |
2575 | } | |
2576 | ||
b87fb8cd RH |
2577 | void tcg_gen_smin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) |
2578 | { | |
2579 | tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, a, b); | |
2580 | } | |
2581 | ||
2582 | void tcg_gen_umin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) | |
2583 | { | |
2584 | tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, a, b); | |
2585 | } | |
2586 | ||
2587 | void tcg_gen_smax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) | |
2588 | { | |
2589 | tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, b, a); | |
2590 | } | |
2591 | ||
2592 | void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) | |
2593 | { | |
2594 | tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a); | |
2595 | } | |
2596 | ||
ff1f11f7 RH |
2597 | void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a) |
2598 | { | |
2599 | TCGv_i64 t = tcg_temp_new_i64(); | |
2600 | ||
2601 | tcg_gen_sari_i64(t, a, 63); | |
2602 | tcg_gen_xor_i64(ret, a, t); | |
2603 | tcg_gen_sub_i64(ret, ret, t); | |
2604 | tcg_temp_free_i64(t); | |
2605 | } | |
2606 | ||
951c6300 RH |
2607 | /* Size changing operations. */ |
2608 | ||
609ad705 | 2609 | void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg) |
951c6300 | 2610 | { |
3a13c3f3 | 2611 | if (TCG_TARGET_REG_BITS == 32) { |
609ad705 RH |
2612 | tcg_gen_mov_i32(ret, TCGV_LOW(arg)); |
2613 | } else if (TCG_TARGET_HAS_extrl_i64_i32) { | |
b7e8b17a | 2614 | tcg_gen_op2(INDEX_op_extrl_i64_i32, |
ae8b75dc | 2615 | tcgv_i32_arg(ret), tcgv_i64_arg(arg)); |
609ad705 | 2616 | } else { |
dc41aa7d | 2617 | tcg_gen_mov_i32(ret, (TCGv_i32)arg); |
609ad705 RH |
2618 | } |
2619 | } | |
2620 | ||
2621 | void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg) | |
2622 | { | |
2623 | if (TCG_TARGET_REG_BITS == 32) { | |
2624 | tcg_gen_mov_i32(ret, TCGV_HIGH(arg)); | |
2625 | } else if (TCG_TARGET_HAS_extrh_i64_i32) { | |
b7e8b17a | 2626 | tcg_gen_op2(INDEX_op_extrh_i64_i32, |
ae8b75dc | 2627 | tcgv_i32_arg(ret), tcgv_i64_arg(arg)); |
951c6300 RH |
2628 | } else { |
2629 | TCGv_i64 t = tcg_temp_new_i64(); | |
609ad705 | 2630 | tcg_gen_shri_i64(t, arg, 32); |
dc41aa7d | 2631 | tcg_gen_mov_i32(ret, (TCGv_i32)t); |
951c6300 RH |
2632 | tcg_temp_free_i64(t); |
2633 | } | |
951c6300 RH |
2634 | } |
2635 | ||
2636 | void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) | |
2637 | { | |
3a13c3f3 RH |
2638 | if (TCG_TARGET_REG_BITS == 32) { |
2639 | tcg_gen_mov_i32(TCGV_LOW(ret), arg); | |
2640 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
2641 | } else { | |
b7e8b17a | 2642 | tcg_gen_op2(INDEX_op_extu_i32_i64, |
ae8b75dc | 2643 | tcgv_i64_arg(ret), tcgv_i32_arg(arg)); |
3a13c3f3 | 2644 | } |
951c6300 RH |
2645 | } |
2646 | ||
2647 | void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg) | |
2648 | { | |
3a13c3f3 RH |
2649 | if (TCG_TARGET_REG_BITS == 32) { |
2650 | tcg_gen_mov_i32(TCGV_LOW(ret), arg); | |
2651 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); | |
2652 | } else { | |
b7e8b17a | 2653 | tcg_gen_op2(INDEX_op_ext_i32_i64, |
ae8b75dc | 2654 | tcgv_i64_arg(ret), tcgv_i32_arg(arg)); |
3a13c3f3 | 2655 | } |
951c6300 RH |
2656 | } |
2657 | ||
2658 | void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) | |
2659 | { | |
3a13c3f3 RH |
2660 | TCGv_i64 tmp; |
2661 | ||
2662 | if (TCG_TARGET_REG_BITS == 32) { | |
2663 | tcg_gen_mov_i32(TCGV_LOW(dest), low); | |
2664 | tcg_gen_mov_i32(TCGV_HIGH(dest), high); | |
2665 | return; | |
2666 | } | |
2667 | ||
2668 | tmp = tcg_temp_new_i64(); | |
951c6300 RH |
2669 | /* These extensions are only needed for type correctness. |
2670 | We may be able to do better given target specific information. */ | |
2671 | tcg_gen_extu_i32_i64(tmp, high); | |
2672 | tcg_gen_extu_i32_i64(dest, low); | |
2673 | /* If deposit is available, use it. Otherwise use the extra | |
2674 | knowledge that we have of the zero-extensions above. */ | |
2675 | if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) { | |
2676 | tcg_gen_deposit_i64(dest, dest, tmp, 32, 32); | |
2677 | } else { | |
2678 | tcg_gen_shli_i64(tmp, tmp, 32); | |
2679 | tcg_gen_or_i64(dest, dest, tmp); | |
2680 | } | |
2681 | tcg_temp_free_i64(tmp); | |
951c6300 RH |
2682 | } |
2683 | ||
2684 | void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg) | |
2685 | { | |
3a13c3f3 RH |
2686 | if (TCG_TARGET_REG_BITS == 32) { |
2687 | tcg_gen_mov_i32(lo, TCGV_LOW(arg)); | |
2688 | tcg_gen_mov_i32(hi, TCGV_HIGH(arg)); | |
2689 | } else { | |
609ad705 RH |
2690 | tcg_gen_extrl_i64_i32(lo, arg); |
2691 | tcg_gen_extrh_i64_i32(hi, arg); | |
3a13c3f3 | 2692 | } |
951c6300 RH |
2693 | } |
2694 | ||
2695 | void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg) | |
2696 | { | |
2697 | tcg_gen_ext32u_i64(lo, arg); | |
2698 | tcg_gen_shri_i64(hi, arg, 32); | |
2699 | } | |
2700 | ||
2701 | /* QEMU specific operations. */ | |
2702 | ||
d9971435 | 2703 | void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx) |
07ea28b4 | 2704 | { |
eba40358 RH |
2705 | /* |
2706 | * Let the jit code return the read-only version of the | |
2707 | * TranslationBlock, so that we minimize the pc-relative | |
2708 | * distance of the address of the exit_tb code to TB. | |
2709 | * This will improve utilization of pc-relative address loads. | |
2710 | * | |
2711 | * TODO: Move this to translator_loop, so that all const | |
2712 | * TranslationBlock pointers refer to read-only memory. | |
2713 | * This requires coordination with targets that do not use | |
2714 | * the translator_loop. | |
2715 | */ | |
2716 | uintptr_t val = (uintptr_t)tcg_splitwx_to_rx((void *)tb) + idx; | |
07ea28b4 RH |
2717 | |
2718 | if (tb == NULL) { | |
2719 | tcg_debug_assert(idx == 0); | |
2720 | } else if (idx <= TB_EXIT_IDXMAX) { | |
2721 | #ifdef CONFIG_DEBUG_TCG | |
2722 | /* This is an exit following a goto_tb. Verify that we have | |
2723 | seen this numbered exit before, via tcg_gen_goto_tb. */ | |
2724 | tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx)); | |
2725 | #endif | |
d7f425fd RH |
2726 | /* When not chaining, exit without indicating a link. */ |
2727 | if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
2728 | val = 0; | |
2729 | } | |
07ea28b4 RH |
2730 | } else { |
2731 | /* This is an exit via the exitreq label. */ | |
2732 | tcg_debug_assert(idx == TB_EXIT_REQUESTED); | |
2733 | } | |
2734 | ||
e6d86bed | 2735 | plugin_gen_disable_mem_helpers(); |
07ea28b4 RH |
2736 | tcg_gen_op1i(INDEX_op_exit_tb, val); |
2737 | } | |
2738 | ||
951c6300 RH |
2739 | void tcg_gen_goto_tb(unsigned idx) |
2740 | { | |
2741 | /* We only support two chained exits. */ | |
07ea28b4 | 2742 | tcg_debug_assert(idx <= TB_EXIT_IDXMAX); |
951c6300 RH |
2743 | #ifdef CONFIG_DEBUG_TCG |
2744 | /* Verify that we havn't seen this numbered exit before. */ | |
b1311c4a EC |
2745 | tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); |
2746 | tcg_ctx->goto_tb_issue_mask |= 1 << idx; | |
951c6300 | 2747 | #endif |
e6d86bed | 2748 | plugin_gen_disable_mem_helpers(); |
d7f425fd RH |
2749 | /* When not chaining, we simply fall through to the "fallback" exit. */ |
2750 | if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
2751 | tcg_gen_op1i(INDEX_op_goto_tb, idx); | |
2752 | } | |
951c6300 RH |
2753 | } |
2754 | ||
7f11636d | 2755 | void tcg_gen_lookup_and_goto_ptr(void) |
cedbcb01 EC |
2756 | { |
2757 | if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
e6d86bed EC |
2758 | TCGv_ptr ptr; |
2759 | ||
2760 | plugin_gen_disable_mem_helpers(); | |
2761 | ptr = tcg_temp_new_ptr(); | |
1c2adb95 | 2762 | gen_helper_lookup_tb_ptr(ptr, cpu_env); |
ae8b75dc | 2763 | tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); |
cedbcb01 EC |
2764 | tcg_temp_free_ptr(ptr); |
2765 | } else { | |
07ea28b4 | 2766 | tcg_gen_exit_tb(NULL, 0); |
cedbcb01 EC |
2767 | } |
2768 | } | |
2769 | ||
14776ab5 | 2770 | static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) |
951c6300 | 2771 | { |
1f00b27f SS |
2772 | /* Trigger the asserts within as early as possible. */ |
2773 | (void)get_alignment_bits(op); | |
2774 | ||
951c6300 RH |
2775 | switch (op & MO_SIZE) { |
2776 | case MO_8: | |
2777 | op &= ~MO_BSWAP; | |
2778 | break; | |
2779 | case MO_16: | |
2780 | break; | |
2781 | case MO_32: | |
2782 | if (!is64) { | |
2783 | op &= ~MO_SIGN; | |
2784 | } | |
2785 | break; | |
2786 | case MO_64: | |
2787 | if (!is64) { | |
2788 | tcg_abort(); | |
2789 | } | |
2790 | break; | |
2791 | } | |
2792 | if (st) { | |
2793 | op &= ~MO_SIGN; | |
2794 | } | |
2795 | return op; | |
2796 | } | |
2797 | ||
c45cb8bb | 2798 | static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, |
14776ab5 | 2799 | MemOp memop, TCGArg idx) |
951c6300 | 2800 | { |
59227d5d | 2801 | TCGMemOpIdx oi = make_memop_idx(memop, idx); |
c45cb8bb | 2802 | #if TARGET_LONG_BITS == 32 |
59227d5d | 2803 | tcg_gen_op3i_i32(opc, val, addr, oi); |
c45cb8bb | 2804 | #else |
3a13c3f3 | 2805 | if (TCG_TARGET_REG_BITS == 32) { |
59227d5d | 2806 | tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); |
3a13c3f3 | 2807 | } else { |
ae8b75dc | 2808 | tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); |
3a13c3f3 | 2809 | } |
c45cb8bb | 2810 | #endif |
951c6300 RH |
2811 | } |
2812 | ||
c45cb8bb | 2813 | static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, |
14776ab5 | 2814 | MemOp memop, TCGArg idx) |
c45cb8bb | 2815 | { |
59227d5d | 2816 | TCGMemOpIdx oi = make_memop_idx(memop, idx); |
951c6300 | 2817 | #if TARGET_LONG_BITS == 32 |
c45cb8bb | 2818 | if (TCG_TARGET_REG_BITS == 32) { |
59227d5d | 2819 | tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); |
c45cb8bb | 2820 | } else { |
ae8b75dc | 2821 | tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); |
c45cb8bb | 2822 | } |
951c6300 | 2823 | #else |
c45cb8bb | 2824 | if (TCG_TARGET_REG_BITS == 32) { |
59227d5d RH |
2825 | tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), |
2826 | TCGV_LOW(addr), TCGV_HIGH(addr), oi); | |
c45cb8bb | 2827 | } else { |
59227d5d | 2828 | tcg_gen_op3i_i64(opc, val, addr, oi); |
c45cb8bb | 2829 | } |
951c6300 | 2830 | #endif |
c45cb8bb | 2831 | } |
951c6300 | 2832 | |
b32dc337 PK |
2833 | static void tcg_gen_req_mo(TCGBar type) |
2834 | { | |
2835 | #ifdef TCG_GUEST_DEFAULT_MO | |
2836 | type &= TCG_GUEST_DEFAULT_MO; | |
2837 | #endif | |
2838 | type &= ~TCG_TARGET_DEFAULT_MO; | |
2839 | if (type) { | |
2840 | tcg_gen_mb(type | TCG_BAR_SC); | |
2841 | } | |
2842 | } | |
2843 | ||
fcc54ab5 AB |
2844 | static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) |
2845 | { | |
2846 | #ifdef CONFIG_PLUGIN | |
2847 | if (tcg_ctx->plugin_insn != NULL) { | |
2848 | /* Save a copy of the vaddr for use after a load. */ | |
2849 | TCGv temp = tcg_temp_new(); | |
2850 | tcg_gen_mov_tl(temp, vaddr); | |
2851 | return temp; | |
2852 | } | |
2853 | #endif | |
2854 | return vaddr; | |
2855 | } | |
2856 | ||
e6d86bed EC |
2857 | static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) |
2858 | { | |
2859 | #ifdef CONFIG_PLUGIN | |
fcc54ab5 AB |
2860 | if (tcg_ctx->plugin_insn != NULL) { |
2861 | plugin_gen_empty_mem_callback(vaddr, info); | |
2862 | tcg_temp_free(vaddr); | |
e6d86bed | 2863 | } |
e6d86bed EC |
2864 | #endif |
2865 | } | |
2866 | ||
14776ab5 | 2867 | void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) |
951c6300 | 2868 | { |
14776ab5 | 2869 | MemOp orig_memop; |
e6d86bed | 2870 | uint16_t info = trace_mem_get_info(memop, idx, 0); |
e1dcf352 | 2871 | |
b32dc337 | 2872 | tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
951c6300 | 2873 | memop = tcg_canonicalize_memop(memop, 0, 0); |
e6d86bed | 2874 | trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); |
e1dcf352 RH |
2875 | |
2876 | orig_memop = memop; | |
2877 | if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { | |
2878 | memop &= ~MO_BSWAP; | |
359feba5 | 2879 | /* The bswap primitive benefits from zero-extended input. */ |
e1dcf352 RH |
2880 | if ((memop & MO_SSIZE) == MO_SW) { |
2881 | memop &= ~MO_SIGN; | |
2882 | } | |
2883 | } | |
2884 | ||
fcc54ab5 | 2885 | addr = plugin_prep_mem_callbacks(addr); |
c45cb8bb | 2886 | gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); |
e6d86bed | 2887 | plugin_gen_mem_callbacks(addr, info); |
e1dcf352 RH |
2888 | |
2889 | if ((orig_memop ^ memop) & MO_BSWAP) { | |
2890 | switch (orig_memop & MO_SIZE) { | |
2891 | case MO_16: | |
359feba5 RH |
2892 | tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN |
2893 | ? TCG_BSWAP_IZ | TCG_BSWAP_OS | |
2894 | : TCG_BSWAP_IZ | TCG_BSWAP_OZ)); | |
e1dcf352 RH |
2895 | break; |
2896 | case MO_32: | |
2897 | tcg_gen_bswap32_i32(val, val); | |
2898 | break; | |
2899 | default: | |
2900 | g_assert_not_reached(); | |
2901 | } | |
2902 | } | |
951c6300 RH |
2903 | } |
2904 | ||
14776ab5 | 2905 | void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) |
951c6300 | 2906 | { |
e1dcf352 | 2907 | TCGv_i32 swap = NULL; |
e6d86bed | 2908 | uint16_t info = trace_mem_get_info(memop, idx, 1); |
e1dcf352 | 2909 | |
b32dc337 | 2910 | tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
951c6300 | 2911 | memop = tcg_canonicalize_memop(memop, 0, 1); |
e6d86bed | 2912 | trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); |
e1dcf352 RH |
2913 | |
2914 | if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { | |
2915 | swap = tcg_temp_new_i32(); | |
2916 | switch (memop & MO_SIZE) { | |
2917 | case MO_16: | |
b53357ac | 2918 | tcg_gen_bswap16_i32(swap, val, 0); |
e1dcf352 RH |
2919 | break; |
2920 | case MO_32: | |
2921 | tcg_gen_bswap32_i32(swap, val); | |
2922 | break; | |
2923 | default: | |
2924 | g_assert_not_reached(); | |
2925 | } | |
2926 | val = swap; | |
2927 | memop &= ~MO_BSWAP; | |
2928 | } | |
2929 | ||
fcc54ab5 | 2930 | addr = plugin_prep_mem_callbacks(addr); |
07ce0b05 RH |
2931 | if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { |
2932 | gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); | |
2933 | } else { | |
2934 | gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); | |
2935 | } | |
e6d86bed | 2936 | plugin_gen_mem_callbacks(addr, info); |
e1dcf352 RH |
2937 | |
2938 | if (swap) { | |
2939 | tcg_temp_free_i32(swap); | |
2940 | } | |
951c6300 RH |
2941 | } |
2942 | ||
14776ab5 | 2943 | void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) |
951c6300 | 2944 | { |
14776ab5 | 2945 | MemOp orig_memop; |
e6d86bed | 2946 | uint16_t info; |
e1dcf352 | 2947 | |
3a13c3f3 | 2948 | if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { |
951c6300 RH |
2949 | tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); |
2950 | if (memop & MO_SIGN) { | |
2951 | tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); | |
2952 | } else { | |
2953 | tcg_gen_movi_i32(TCGV_HIGH(val), 0); | |
2954 | } | |
2955 | return; | |
2956 | } | |
951c6300 | 2957 | |
e1dcf352 | 2958 | tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); |
c45cb8bb | 2959 | memop = tcg_canonicalize_memop(memop, 1, 0); |
e6d86bed EC |
2960 | info = trace_mem_get_info(memop, idx, 0); |
2961 | trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); | |
e1dcf352 RH |
2962 | |
2963 | orig_memop = memop; | |
2964 | if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { | |
2965 | memop &= ~MO_BSWAP; | |
359feba5 | 2966 | /* The bswap primitive benefits from zero-extended input. */ |
e1dcf352 RH |
2967 | if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { |
2968 | memop &= ~MO_SIGN; | |
2969 | } | |
2970 | } | |
2971 | ||
fcc54ab5 | 2972 | addr = plugin_prep_mem_callbacks(addr); |
c45cb8bb | 2973 | gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); |
e6d86bed | 2974 | plugin_gen_mem_callbacks(addr, info); |
e1dcf352 RH |
2975 | |
2976 | if ((orig_memop ^ memop) & MO_BSWAP) { | |
359feba5 RH |
2977 | int flags = (orig_memop & MO_SIGN |
2978 | ? TCG_BSWAP_IZ | TCG_BSWAP_OS | |
2979 | : TCG_BSWAP_IZ | TCG_BSWAP_OZ); | |
e1dcf352 RH |
2980 | switch (orig_memop & MO_SIZE) { |
2981 | case MO_16: | |
359feba5 | 2982 | tcg_gen_bswap16_i64(val, val, flags); |
e1dcf352 RH |
2983 | break; |
2984 | case MO_32: | |
359feba5 | 2985 | tcg_gen_bswap32_i64(val, val, flags); |
e1dcf352 RH |
2986 | break; |
2987 | case MO_64: | |
2988 | tcg_gen_bswap64_i64(val, val); | |
2989 | break; | |
2990 | default: | |
2991 | g_assert_not_reached(); | |
2992 | } | |
2993 | } | |
951c6300 RH |
2994 | } |
2995 | ||
14776ab5 | 2996 | void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) |
951c6300 | 2997 | { |
e1dcf352 | 2998 | TCGv_i64 swap = NULL; |
e6d86bed | 2999 | uint16_t info; |
e1dcf352 | 3000 | |
3a13c3f3 | 3001 | if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { |
951c6300 RH |
3002 | tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); |
3003 | return; | |
3004 | } | |
951c6300 | 3005 | |
e1dcf352 | 3006 | tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); |
c45cb8bb | 3007 | memop = tcg_canonicalize_memop(memop, 1, 1); |
e6d86bed EC |
3008 | info = trace_mem_get_info(memop, idx, 1); |
3009 | trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); | |
e1dcf352 RH |
3010 | |
3011 | if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { | |
3012 | swap = tcg_temp_new_i64(); | |
3013 | switch (memop & MO_SIZE) { | |
3014 | case MO_16: | |
b53357ac | 3015 | tcg_gen_bswap16_i64(swap, val, 0); |
e1dcf352 RH |
3016 | break; |
3017 | case MO_32: | |
b53357ac | 3018 | tcg_gen_bswap32_i64(swap, val, 0); |
e1dcf352 RH |
3019 | break; |
3020 | case MO_64: | |
3021 | tcg_gen_bswap64_i64(swap, val); | |
3022 | break; | |
3023 | default: | |
3024 | g_assert_not_reached(); | |
3025 | } | |
3026 | val = swap; | |
3027 | memop &= ~MO_BSWAP; | |
3028 | } | |
3029 | ||
fcc54ab5 | 3030 | addr = plugin_prep_mem_callbacks(addr); |
c45cb8bb | 3031 | gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); |
e6d86bed | 3032 | plugin_gen_mem_callbacks(addr, info); |
e1dcf352 RH |
3033 | |
3034 | if (swap) { | |
3035 | tcg_temp_free_i64(swap); | |
3036 | } | |
951c6300 | 3037 | } |
c482cb11 | 3038 | |
14776ab5 | 3039 | static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) |
c482cb11 RH |
3040 | { |
3041 | switch (opc & MO_SSIZE) { | |
3042 | case MO_SB: | |
3043 | tcg_gen_ext8s_i32(ret, val); | |
3044 | break; | |
3045 | case MO_UB: | |
3046 | tcg_gen_ext8u_i32(ret, val); | |
3047 | break; | |
3048 | case MO_SW: | |
3049 | tcg_gen_ext16s_i32(ret, val); | |
3050 | break; | |
3051 | case MO_UW: | |
3052 | tcg_gen_ext16u_i32(ret, val); | |
3053 | break; | |
3054 | default: | |
3055 | tcg_gen_mov_i32(ret, val); | |
3056 | break; | |
3057 | } | |
3058 | } | |
3059 | ||
14776ab5 | 3060 | static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) |
c482cb11 RH |
3061 | { |
3062 | switch (opc & MO_SSIZE) { | |
3063 | case MO_SB: | |
3064 | tcg_gen_ext8s_i64(ret, val); | |
3065 | break; | |
3066 | case MO_UB: | |
3067 | tcg_gen_ext8u_i64(ret, val); | |
3068 | break; | |
3069 | case MO_SW: | |
3070 | tcg_gen_ext16s_i64(ret, val); | |
3071 | break; | |
3072 | case MO_UW: | |
3073 | tcg_gen_ext16u_i64(ret, val); | |
3074 | break; | |
3075 | case MO_SL: | |
3076 | tcg_gen_ext32s_i64(ret, val); | |
3077 | break; | |
3078 | case MO_UL: | |
3079 | tcg_gen_ext32u_i64(ret, val); | |
3080 | break; | |
3081 | default: | |
3082 | tcg_gen_mov_i64(ret, val); | |
3083 | break; | |
3084 | } | |
3085 | } | |
3086 | ||
3087 | #ifdef CONFIG_SOFTMMU | |
3088 | typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, | |
3089 | TCGv_i32, TCGv_i32, TCGv_i32); | |
3090 | typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, | |
3091 | TCGv_i64, TCGv_i64, TCGv_i32); | |
3092 | typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, | |
3093 | TCGv_i32, TCGv_i32); | |
3094 | typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, | |
3095 | TCGv_i64, TCGv_i32); | |
3096 | #else | |
3097 | typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32); | |
3098 | typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64); | |
3099 | typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32); | |
3100 | typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64); | |
3101 | #endif | |
3102 | ||
df79b996 RH |
3103 | #ifdef CONFIG_ATOMIC64 |
3104 | # define WITH_ATOMIC64(X) X, | |
3105 | #else | |
3106 | # define WITH_ATOMIC64(X) | |
3107 | #endif | |
3108 | ||
c482cb11 RH |
3109 | static void * const table_cmpxchg[16] = { |
3110 | [MO_8] = gen_helper_atomic_cmpxchgb, | |
3111 | [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, | |
3112 | [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, | |
3113 | [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, | |
3114 | [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, | |
df79b996 RH |
3115 | WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) |
3116 | WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) | |
c482cb11 RH |
3117 | }; |
3118 | ||
3119 | void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, | |
14776ab5 | 3120 | TCGv_i32 newv, TCGArg idx, MemOp memop) |
c482cb11 RH |
3121 | { |
3122 | memop = tcg_canonicalize_memop(memop, 0, 0); | |
3123 | ||
b1311c4a | 3124 | if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) { |
c482cb11 RH |
3125 | TCGv_i32 t1 = tcg_temp_new_i32(); |
3126 | TCGv_i32 t2 = tcg_temp_new_i32(); | |
3127 | ||
3128 | tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); | |
3129 | ||
3130 | tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); | |
3131 | tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); | |
3132 | tcg_gen_qemu_st_i32(t2, addr, idx, memop); | |
3133 | tcg_temp_free_i32(t2); | |
3134 | ||
3135 | if (memop & MO_SIGN) { | |
3136 | tcg_gen_ext_i32(retv, t1, memop); | |
3137 | } else { | |
3138 | tcg_gen_mov_i32(retv, t1); | |
3139 | } | |
3140 | tcg_temp_free_i32(t1); | |
3141 | } else { | |
3142 | gen_atomic_cx_i32 gen; | |
3143 | ||
3144 | gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; | |
3145 | tcg_debug_assert(gen != NULL); | |
3146 | ||
3147 | #ifdef CONFIG_SOFTMMU | |
3148 | { | |
11d11d61 RH |
3149 | TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); |
3150 | gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); | |
c482cb11 RH |
3151 | } |
3152 | #else | |
1c2adb95 | 3153 | gen(retv, cpu_env, addr, cmpv, newv); |
c482cb11 RH |
3154 | #endif |
3155 | ||
3156 | if (memop & MO_SIGN) { | |
3157 | tcg_gen_ext_i32(retv, retv, memop); | |
3158 | } | |
3159 | } | |
3160 | } | |
3161 | ||
3162 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, | |
14776ab5 | 3163 | TCGv_i64 newv, TCGArg idx, MemOp memop) |
c482cb11 RH |
3164 | { |
3165 | memop = tcg_canonicalize_memop(memop, 1, 0); | |
3166 | ||
b1311c4a | 3167 | if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) { |
c482cb11 RH |
3168 | TCGv_i64 t1 = tcg_temp_new_i64(); |
3169 | TCGv_i64 t2 = tcg_temp_new_i64(); | |
3170 | ||
3171 | tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); | |
3172 | ||
3173 | tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); | |
3174 | tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); | |
3175 | tcg_gen_qemu_st_i64(t2, addr, idx, memop); | |
3176 | tcg_temp_free_i64(t2); | |
3177 | ||
3178 | if (memop & MO_SIGN) { | |
3179 | tcg_gen_ext_i64(retv, t1, memop); | |
3180 | } else { | |
3181 | tcg_gen_mov_i64(retv, t1); | |
3182 | } | |
3183 | tcg_temp_free_i64(t1); | |
3184 | } else if ((memop & MO_SIZE) == MO_64) { | |
df79b996 | 3185 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
3186 | gen_atomic_cx_i64 gen; |
3187 | ||
3188 | gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; | |
3189 | tcg_debug_assert(gen != NULL); | |
3190 | ||
3191 | #ifdef CONFIG_SOFTMMU | |
3192 | { | |
11d11d61 RH |
3193 | TCGMemOpIdx oi = make_memop_idx(memop, idx); |
3194 | gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); | |
c482cb11 RH |
3195 | } |
3196 | #else | |
1c2adb95 | 3197 | gen(retv, cpu_env, addr, cmpv, newv); |
c482cb11 | 3198 | #endif |
df79b996 | 3199 | #else |
1c2adb95 | 3200 | gen_helper_exit_atomic(cpu_env); |
79b1af90 RH |
3201 | /* Produce a result, so that we have a well-formed opcode stream |
3202 | with respect to uses of the result in the (dead) code following. */ | |
3203 | tcg_gen_movi_i64(retv, 0); | |
df79b996 | 3204 | #endif /* CONFIG_ATOMIC64 */ |
c482cb11 RH |
3205 | } else { |
3206 | TCGv_i32 c32 = tcg_temp_new_i32(); | |
3207 | TCGv_i32 n32 = tcg_temp_new_i32(); | |
3208 | TCGv_i32 r32 = tcg_temp_new_i32(); | |
3209 | ||
3210 | tcg_gen_extrl_i64_i32(c32, cmpv); | |
3211 | tcg_gen_extrl_i64_i32(n32, newv); | |
3212 | tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); | |
3213 | tcg_temp_free_i32(c32); | |
3214 | tcg_temp_free_i32(n32); | |
3215 | ||
3216 | tcg_gen_extu_i32_i64(retv, r32); | |
3217 | tcg_temp_free_i32(r32); | |
3218 | ||
3219 | if (memop & MO_SIGN) { | |
3220 | tcg_gen_ext_i64(retv, retv, memop); | |
3221 | } | |
3222 | } | |
3223 | } | |
3224 | ||
3225 | static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | |
14776ab5 | 3226 | TCGArg idx, MemOp memop, bool new_val, |
c482cb11 RH |
3227 | void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) |
3228 | { | |
3229 | TCGv_i32 t1 = tcg_temp_new_i32(); | |
3230 | TCGv_i32 t2 = tcg_temp_new_i32(); | |
3231 | ||
3232 | memop = tcg_canonicalize_memop(memop, 0, 0); | |
3233 | ||
852f933e RH |
3234 | tcg_gen_qemu_ld_i32(t1, addr, idx, memop); |
3235 | tcg_gen_ext_i32(t2, val, memop); | |
3236 | gen(t2, t1, t2); | |
c482cb11 RH |
3237 | tcg_gen_qemu_st_i32(t2, addr, idx, memop); |
3238 | ||
3239 | tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); | |
3240 | tcg_temp_free_i32(t1); | |
3241 | tcg_temp_free_i32(t2); | |
3242 | } | |
3243 | ||
3244 | static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | |
14776ab5 | 3245 | TCGArg idx, MemOp memop, void * const table[]) |
c482cb11 RH |
3246 | { |
3247 | gen_atomic_op_i32 gen; | |
3248 | ||
3249 | memop = tcg_canonicalize_memop(memop, 0, 0); | |
3250 | ||
3251 | gen = table[memop & (MO_SIZE | MO_BSWAP)]; | |
3252 | tcg_debug_assert(gen != NULL); | |
3253 | ||
3254 | #ifdef CONFIG_SOFTMMU | |
3255 | { | |
11d11d61 RH |
3256 | TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); |
3257 | gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); | |
c482cb11 RH |
3258 | } |
3259 | #else | |
1c2adb95 | 3260 | gen(ret, cpu_env, addr, val); |
c482cb11 RH |
3261 | #endif |
3262 | ||
3263 | if (memop & MO_SIGN) { | |
3264 | tcg_gen_ext_i32(ret, ret, memop); | |
3265 | } | |
3266 | } | |
3267 | ||
3268 | static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | |
14776ab5 | 3269 | TCGArg idx, MemOp memop, bool new_val, |
c482cb11 RH |
3270 | void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) |
3271 | { | |
3272 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
3273 | TCGv_i64 t2 = tcg_temp_new_i64(); | |
3274 | ||
3275 | memop = tcg_canonicalize_memop(memop, 1, 0); | |
3276 | ||
852f933e RH |
3277 | tcg_gen_qemu_ld_i64(t1, addr, idx, memop); |
3278 | tcg_gen_ext_i64(t2, val, memop); | |
3279 | gen(t2, t1, t2); | |
c482cb11 RH |
3280 | tcg_gen_qemu_st_i64(t2, addr, idx, memop); |
3281 | ||
3282 | tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); | |
3283 | tcg_temp_free_i64(t1); | |
3284 | tcg_temp_free_i64(t2); | |
3285 | } | |
3286 | ||
3287 | static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | |
14776ab5 | 3288 | TCGArg idx, MemOp memop, void * const table[]) |
c482cb11 RH |
3289 | { |
3290 | memop = tcg_canonicalize_memop(memop, 1, 0); | |
3291 | ||
3292 | if ((memop & MO_SIZE) == MO_64) { | |
df79b996 | 3293 | #ifdef CONFIG_ATOMIC64 |
c482cb11 RH |
3294 | gen_atomic_op_i64 gen; |
3295 | ||
3296 | gen = table[memop & (MO_SIZE | MO_BSWAP)]; | |
3297 | tcg_debug_assert(gen != NULL); | |
3298 | ||
3299 | #ifdef CONFIG_SOFTMMU | |
3300 | { | |
11d11d61 RH |
3301 | TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); |
3302 | gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); | |
c482cb11 RH |
3303 | } |
3304 | #else | |
1c2adb95 | 3305 | gen(ret, cpu_env, addr, val); |
c482cb11 | 3306 | #endif |
df79b996 | 3307 | #else |
1c2adb95 | 3308 | gen_helper_exit_atomic(cpu_env); |
79b1af90 RH |
3309 | /* Produce a result, so that we have a well-formed opcode stream |
3310 | with respect to uses of the result in the (dead) code following. */ | |
3311 | tcg_gen_movi_i64(ret, 0); | |
df79b996 | 3312 | #endif /* CONFIG_ATOMIC64 */ |
c482cb11 RH |
3313 | } else { |
3314 | TCGv_i32 v32 = tcg_temp_new_i32(); | |
3315 | TCGv_i32 r32 = tcg_temp_new_i32(); | |
3316 | ||
3317 | tcg_gen_extrl_i64_i32(v32, val); | |
3318 | do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); | |
3319 | tcg_temp_free_i32(v32); | |
3320 | ||
3321 | tcg_gen_extu_i32_i64(ret, r32); | |
3322 | tcg_temp_free_i32(r32); | |
3323 | ||
3324 | if (memop & MO_SIGN) { | |
3325 | tcg_gen_ext_i64(ret, ret, memop); | |
3326 | } | |
3327 | } | |
3328 | } | |
3329 | ||
3330 | #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ | |
3331 | static void * const table_##NAME[16] = { \ | |
3332 | [MO_8] = gen_helper_atomic_##NAME##b, \ | |
3333 | [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ | |
3334 | [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ | |
3335 | [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ | |
3336 | [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ | |
df79b996 RH |
3337 | WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ |
3338 | WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ | |
c482cb11 RH |
3339 | }; \ |
3340 | void tcg_gen_atomic_##NAME##_i32 \ | |
14776ab5 | 3341 | (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ |
c482cb11 | 3342 | { \ |
b1311c4a | 3343 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ |
c482cb11 RH |
3344 | do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ |
3345 | } else { \ | |
3346 | do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ | |
3347 | tcg_gen_##OP##_i32); \ | |
3348 | } \ | |
3349 | } \ | |
3350 | void tcg_gen_atomic_##NAME##_i64 \ | |
14776ab5 | 3351 | (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ |
c482cb11 | 3352 | { \ |
b1311c4a | 3353 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ |
c482cb11 RH |
3354 | do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ |
3355 | } else { \ | |
3356 | do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ | |
3357 | tcg_gen_##OP##_i64); \ | |
3358 | } \ | |
3359 | } | |
3360 | ||
3361 | GEN_ATOMIC_HELPER(fetch_add, add, 0) | |
3362 | GEN_ATOMIC_HELPER(fetch_and, and, 0) | |
3363 | GEN_ATOMIC_HELPER(fetch_or, or, 0) | |
3364 | GEN_ATOMIC_HELPER(fetch_xor, xor, 0) | |
5507c2bf RH |
3365 | GEN_ATOMIC_HELPER(fetch_smin, smin, 0) |
3366 | GEN_ATOMIC_HELPER(fetch_umin, umin, 0) | |
3367 | GEN_ATOMIC_HELPER(fetch_smax, smax, 0) | |
3368 | GEN_ATOMIC_HELPER(fetch_umax, umax, 0) | |
c482cb11 RH |
3369 | |
3370 | GEN_ATOMIC_HELPER(add_fetch, add, 1) | |
3371 | GEN_ATOMIC_HELPER(and_fetch, and, 1) | |
3372 | GEN_ATOMIC_HELPER(or_fetch, or, 1) | |
3373 | GEN_ATOMIC_HELPER(xor_fetch, xor, 1) | |
5507c2bf RH |
3374 | GEN_ATOMIC_HELPER(smin_fetch, smin, 1) |
3375 | GEN_ATOMIC_HELPER(umin_fetch, umin, 1) | |
3376 | GEN_ATOMIC_HELPER(smax_fetch, smax, 1) | |
3377 | GEN_ATOMIC_HELPER(umax_fetch, umax, 1) | |
c482cb11 RH |
3378 | |
3379 | static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) | |
3380 | { | |
3381 | tcg_gen_mov_i32(r, b); | |
3382 | } | |
3383 | ||
3384 | static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) | |
3385 | { | |
3386 | tcg_gen_mov_i64(r, b); | |
3387 | } | |
3388 | ||
3389 | GEN_ATOMIC_HELPER(xchg, mov2, 0) | |
3390 | ||
3391 | #undef GEN_ATOMIC_HELPER |