]>
Commit | Line | Data |
---|---|---|
3014427a BH |
1 | /*** VSX extension ***/ |
2 | ||
3 | static inline TCGv_i64 cpu_vsrh(int n) | |
4 | { | |
5 | if (n < 32) { | |
6 | return cpu_fpr[n]; | |
7 | } else { | |
8 | return cpu_avrh[n-32]; | |
9 | } | |
10 | } | |
11 | ||
12 | static inline TCGv_i64 cpu_vsrl(int n) | |
13 | { | |
14 | if (n < 32) { | |
15 | return cpu_vsr[n]; | |
16 | } else { | |
17 | return cpu_avrl[n-32]; | |
18 | } | |
19 | } | |
20 | ||
21 | #define VSX_LOAD_SCALAR(name, operation) \ | |
22 | static void gen_##name(DisasContext *ctx) \ | |
23 | { \ | |
24 | TCGv EA; \ | |
25 | if (unlikely(!ctx->vsx_enabled)) { \ | |
26 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
27 | return; \ | |
28 | } \ | |
29 | gen_set_access_type(ctx, ACCESS_INT); \ | |
30 | EA = tcg_temp_new(); \ | |
31 | gen_addr_reg_index(ctx, EA); \ | |
32 | gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \ | |
33 | /* NOTE: cpu_vsrl is undefined */ \ | |
34 | tcg_temp_free(EA); \ | |
35 | } | |
36 | ||
4f364fe7 | 37 | VSX_LOAD_SCALAR(lxsdx, ld64_i64) |
3014427a | 38 | VSX_LOAD_SCALAR(lxsiwax, ld32s_i64) |
740ae9a2 ND |
39 | VSX_LOAD_SCALAR(lxsibzx, ld8u_i64) |
40 | VSX_LOAD_SCALAR(lxsihzx, ld16u_i64) | |
3014427a BH |
41 | VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64) |
42 | VSX_LOAD_SCALAR(lxsspx, ld32fs) | |
43 | ||
44 | static void gen_lxvd2x(DisasContext *ctx) | |
45 | { | |
46 | TCGv EA; | |
47 | if (unlikely(!ctx->vsx_enabled)) { | |
48 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
49 | return; | |
50 | } | |
51 | gen_set_access_type(ctx, ACCESS_INT); | |
52 | EA = tcg_temp_new(); | |
53 | gen_addr_reg_index(ctx, EA); | |
4f364fe7 | 54 | gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA); |
3014427a | 55 | tcg_gen_addi_tl(EA, EA, 8); |
4f364fe7 | 56 | gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA); |
3014427a BH |
57 | tcg_temp_free(EA); |
58 | } | |
59 | ||
60 | static void gen_lxvdsx(DisasContext *ctx) | |
61 | { | |
62 | TCGv EA; | |
63 | if (unlikely(!ctx->vsx_enabled)) { | |
64 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
65 | return; | |
66 | } | |
67 | gen_set_access_type(ctx, ACCESS_INT); | |
68 | EA = tcg_temp_new(); | |
69 | gen_addr_reg_index(ctx, EA); | |
4f364fe7 | 70 | gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA); |
3014427a BH |
71 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode))); |
72 | tcg_temp_free(EA); | |
73 | } | |
74 | ||
75 | static void gen_lxvw4x(DisasContext *ctx) | |
76 | { | |
77 | TCGv EA; | |
3014427a BH |
78 | TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode)); |
79 | TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode)); | |
80 | if (unlikely(!ctx->vsx_enabled)) { | |
81 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
82 | return; | |
83 | } | |
84 | gen_set_access_type(ctx, ACCESS_INT); | |
85 | EA = tcg_temp_new(); | |
3014427a BH |
86 | |
87 | gen_addr_reg_index(ctx, EA); | |
f34001ec ND |
88 | if (ctx->le_mode) { |
89 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
90 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
3014427a | 91 | |
f34001ec ND |
92 | tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ); |
93 | tcg_gen_shri_i64(t1, t0, 32); | |
94 | tcg_gen_deposit_i64(xth, t1, t0, 32, 32); | |
95 | tcg_gen_addi_tl(EA, EA, 8); | |
96 | tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ); | |
97 | tcg_gen_shri_i64(t1, t0, 32); | |
98 | tcg_gen_deposit_i64(xtl, t1, t0, 32, 32); | |
99 | tcg_temp_free_i64(t0); | |
100 | tcg_temp_free_i64(t1); | |
101 | } else { | |
102 | tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); | |
103 | tcg_gen_addi_tl(EA, EA, 8); | |
104 | tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); | |
105 | } | |
3014427a | 106 | tcg_temp_free(EA); |
3014427a BH |
107 | } |
108 | ||
1c074419 ND |
109 | static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, |
110 | TCGv_i64 inh, TCGv_i64 inl) | |
111 | { | |
112 | TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF); | |
113 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
114 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
115 | ||
116 | /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */ | |
117 | tcg_gen_and_i64(t0, inh, mask); | |
118 | tcg_gen_shli_i64(t0, t0, 8); | |
119 | tcg_gen_shri_i64(t1, inh, 8); | |
120 | tcg_gen_and_i64(t1, t1, mask); | |
121 | tcg_gen_or_i64(outh, t0, t1); | |
122 | ||
123 | /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */ | |
124 | tcg_gen_and_i64(t0, inl, mask); | |
125 | tcg_gen_shli_i64(t0, t0, 8); | |
126 | tcg_gen_shri_i64(t1, inl, 8); | |
127 | tcg_gen_and_i64(t1, t1, mask); | |
128 | tcg_gen_or_i64(outl, t0, t1); | |
129 | ||
130 | tcg_temp_free_i64(t0); | |
131 | tcg_temp_free_i64(t1); | |
132 | tcg_temp_free_i64(mask); | |
133 | } | |
134 | ||
135 | static void gen_lxvh8x(DisasContext *ctx) | |
136 | { | |
137 | TCGv EA; | |
138 | TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode)); | |
139 | TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode)); | |
140 | ||
141 | if (unlikely(!ctx->vsx_enabled)) { | |
142 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
143 | return; | |
144 | } | |
145 | gen_set_access_type(ctx, ACCESS_INT); | |
146 | ||
147 | EA = tcg_temp_new(); | |
148 | gen_addr_reg_index(ctx, EA); | |
149 | tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); | |
150 | tcg_gen_addi_tl(EA, EA, 8); | |
151 | tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); | |
152 | if (ctx->le_mode) { | |
153 | gen_bswap16x8(xth, xtl, xth, xtl); | |
154 | } | |
155 | tcg_temp_free(EA); | |
156 | } | |
157 | ||
8ee38fac ND |
158 | static void gen_lxvb16x(DisasContext *ctx) |
159 | { | |
160 | TCGv EA; | |
161 | TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode)); | |
162 | TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode)); | |
163 | ||
164 | if (unlikely(!ctx->vsx_enabled)) { | |
165 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
166 | return; | |
167 | } | |
168 | gen_set_access_type(ctx, ACCESS_INT); | |
169 | EA = tcg_temp_new(); | |
170 | gen_addr_reg_index(ctx, EA); | |
171 | tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); | |
172 | tcg_gen_addi_tl(EA, EA, 8); | |
173 | tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); | |
174 | tcg_temp_free(EA); | |
175 | } | |
176 | ||
3014427a BH |
177 | #define VSX_STORE_SCALAR(name, operation) \ |
178 | static void gen_##name(DisasContext *ctx) \ | |
179 | { \ | |
180 | TCGv EA; \ | |
181 | if (unlikely(!ctx->vsx_enabled)) { \ | |
182 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
183 | return; \ | |
184 | } \ | |
185 | gen_set_access_type(ctx, ACCESS_INT); \ | |
186 | EA = tcg_temp_new(); \ | |
187 | gen_addr_reg_index(ctx, EA); \ | |
188 | gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \ | |
189 | tcg_temp_free(EA); \ | |
190 | } | |
191 | ||
2468f23d | 192 | VSX_STORE_SCALAR(stxsdx, st64_i64) |
ddb9ac50 ND |
193 | |
194 | VSX_STORE_SCALAR(stxsibx, st8_i64) | |
195 | VSX_STORE_SCALAR(stxsihx, st16_i64) | |
3014427a BH |
196 | VSX_STORE_SCALAR(stxsiwx, st32_i64) |
197 | VSX_STORE_SCALAR(stxsspx, st32fs) | |
198 | ||
199 | static void gen_stxvd2x(DisasContext *ctx) | |
200 | { | |
201 | TCGv EA; | |
202 | if (unlikely(!ctx->vsx_enabled)) { | |
203 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
204 | return; | |
205 | } | |
206 | gen_set_access_type(ctx, ACCESS_INT); | |
207 | EA = tcg_temp_new(); | |
208 | gen_addr_reg_index(ctx, EA); | |
2468f23d | 209 | gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA); |
3014427a | 210 | tcg_gen_addi_tl(EA, EA, 8); |
2468f23d | 211 | gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA); |
3014427a BH |
212 | tcg_temp_free(EA); |
213 | } | |
214 | ||
215 | static void gen_stxvw4x(DisasContext *ctx) | |
216 | { | |
0aec21d8 ND |
217 | TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode)); |
218 | TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode)); | |
3014427a BH |
219 | TCGv EA; |
220 | if (unlikely(!ctx->vsx_enabled)) { | |
221 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
222 | return; | |
223 | } | |
224 | gen_set_access_type(ctx, ACCESS_INT); | |
225 | EA = tcg_temp_new(); | |
226 | gen_addr_reg_index(ctx, EA); | |
0aec21d8 ND |
227 | if (ctx->le_mode) { |
228 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
229 | TCGv_i64 t1 = tcg_temp_new_i64(); | |
3014427a | 230 | |
0aec21d8 ND |
231 | tcg_gen_shri_i64(t0, xsh, 32); |
232 | tcg_gen_deposit_i64(t1, t0, xsh, 32, 32); | |
233 | tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ); | |
234 | tcg_gen_addi_tl(EA, EA, 8); | |
235 | tcg_gen_shri_i64(t0, xsl, 32); | |
236 | tcg_gen_deposit_i64(t1, t0, xsl, 32, 32); | |
237 | tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ); | |
238 | tcg_temp_free_i64(t0); | |
239 | tcg_temp_free_i64(t1); | |
240 | } else { | |
241 | tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ); | |
242 | tcg_gen_addi_tl(EA, EA, 8); | |
0b8ac648 ND |
243 | tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ); |
244 | } | |
245 | tcg_temp_free(EA); | |
246 | } | |
247 | ||
248 | static void gen_stxvh8x(DisasContext *ctx) | |
249 | { | |
250 | TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode)); | |
251 | TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode)); | |
252 | TCGv EA; | |
253 | ||
254 | if (unlikely(!ctx->vsx_enabled)) { | |
255 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
256 | return; | |
257 | } | |
258 | gen_set_access_type(ctx, ACCESS_INT); | |
259 | EA = tcg_temp_new(); | |
260 | gen_addr_reg_index(ctx, EA); | |
261 | if (ctx->le_mode) { | |
262 | TCGv_i64 outh = tcg_temp_new_i64(); | |
263 | TCGv_i64 outl = tcg_temp_new_i64(); | |
264 | ||
265 | gen_bswap16x8(outh, outl, xsh, xsl); | |
266 | tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ); | |
267 | tcg_gen_addi_tl(EA, EA, 8); | |
268 | tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ); | |
269 | tcg_temp_free_i64(outh); | |
270 | tcg_temp_free_i64(outl); | |
271 | } else { | |
272 | tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ); | |
273 | tcg_gen_addi_tl(EA, EA, 8); | |
0aec21d8 ND |
274 | tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ); |
275 | } | |
3014427a | 276 | tcg_temp_free(EA); |
3014427a BH |
277 | } |
278 | ||
f3333ce0 ND |
279 | static void gen_stxvb16x(DisasContext *ctx) |
280 | { | |
281 | TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode)); | |
282 | TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode)); | |
283 | TCGv EA; | |
284 | ||
285 | if (unlikely(!ctx->vsx_enabled)) { | |
286 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
287 | return; | |
288 | } | |
289 | gen_set_access_type(ctx, ACCESS_INT); | |
290 | EA = tcg_temp_new(); | |
291 | gen_addr_reg_index(ctx, EA); | |
292 | tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ); | |
293 | tcg_gen_addi_tl(EA, EA, 8); | |
294 | tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ); | |
295 | tcg_temp_free(EA); | |
296 | } | |
297 | ||
3014427a BH |
298 | #define MV_VSRW(name, tcgop1, tcgop2, target, source) \ |
299 | static void gen_##name(DisasContext *ctx) \ | |
300 | { \ | |
301 | if (xS(ctx->opcode) < 32) { \ | |
302 | if (unlikely(!ctx->fpu_enabled)) { \ | |
303 | gen_exception(ctx, POWERPC_EXCP_FPU); \ | |
304 | return; \ | |
305 | } \ | |
306 | } else { \ | |
307 | if (unlikely(!ctx->altivec_enabled)) { \ | |
308 | gen_exception(ctx, POWERPC_EXCP_VPU); \ | |
309 | return; \ | |
310 | } \ | |
311 | } \ | |
312 | TCGv_i64 tmp = tcg_temp_new_i64(); \ | |
313 | tcg_gen_##tcgop1(tmp, source); \ | |
314 | tcg_gen_##tcgop2(target, tmp); \ | |
315 | tcg_temp_free_i64(tmp); \ | |
316 | } | |
317 | ||
318 | ||
319 | MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \ | |
320 | cpu_vsrh(xS(ctx->opcode))) | |
321 | MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \ | |
322 | cpu_gpr[rA(ctx->opcode)]) | |
323 | MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \ | |
324 | cpu_gpr[rA(ctx->opcode)]) | |
325 | ||
326 | #if defined(TARGET_PPC64) | |
327 | #define MV_VSRD(name, target, source) \ | |
328 | static void gen_##name(DisasContext *ctx) \ | |
329 | { \ | |
330 | if (xS(ctx->opcode) < 32) { \ | |
331 | if (unlikely(!ctx->fpu_enabled)) { \ | |
332 | gen_exception(ctx, POWERPC_EXCP_FPU); \ | |
333 | return; \ | |
334 | } \ | |
335 | } else { \ | |
336 | if (unlikely(!ctx->altivec_enabled)) { \ | |
337 | gen_exception(ctx, POWERPC_EXCP_VPU); \ | |
338 | return; \ | |
339 | } \ | |
340 | } \ | |
341 | tcg_gen_mov_i64(target, source); \ | |
342 | } | |
343 | ||
344 | MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode))) | |
345 | MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]) | |
346 | ||
63583202 RB |
347 | static void gen_mfvsrld(DisasContext *ctx) |
348 | { | |
349 | if (xS(ctx->opcode) < 32) { | |
350 | if (unlikely(!ctx->vsx_enabled)) { | |
351 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
352 | return; | |
353 | } | |
354 | } else { | |
355 | if (unlikely(!ctx->altivec_enabled)) { | |
356 | gen_exception(ctx, POWERPC_EXCP_VPU); | |
357 | return; | |
358 | } | |
359 | } | |
360 | ||
361 | tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], cpu_vsrl(xS(ctx->opcode))); | |
362 | } | |
363 | ||
b9731075 RB |
364 | static void gen_mtvsrdd(DisasContext *ctx) |
365 | { | |
366 | if (xT(ctx->opcode) < 32) { | |
367 | if (unlikely(!ctx->vsx_enabled)) { | |
368 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
369 | return; | |
370 | } | |
371 | } else { | |
372 | if (unlikely(!ctx->altivec_enabled)) { | |
373 | gen_exception(ctx, POWERPC_EXCP_VPU); | |
374 | return; | |
375 | } | |
376 | } | |
377 | ||
378 | if (!rA(ctx->opcode)) { | |
379 | tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0); | |
380 | } else { | |
381 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]); | |
382 | } | |
383 | ||
384 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rB(ctx->opcode)]); | |
385 | } | |
386 | ||
3014427a BH |
387 | #endif |
388 | ||
389 | static void gen_xxpermdi(DisasContext *ctx) | |
390 | { | |
391 | if (unlikely(!ctx->vsx_enabled)) { | |
392 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
393 | return; | |
394 | } | |
395 | ||
396 | if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) || | |
397 | (xT(ctx->opcode) == xB(ctx->opcode)))) { | |
398 | TCGv_i64 xh, xl; | |
399 | ||
400 | xh = tcg_temp_new_i64(); | |
401 | xl = tcg_temp_new_i64(); | |
402 | ||
403 | if ((DM(ctx->opcode) & 2) == 0) { | |
404 | tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode))); | |
405 | } else { | |
406 | tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode))); | |
407 | } | |
408 | if ((DM(ctx->opcode) & 1) == 0) { | |
409 | tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode))); | |
410 | } else { | |
411 | tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode))); | |
412 | } | |
413 | ||
414 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh); | |
415 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl); | |
416 | ||
417 | tcg_temp_free_i64(xh); | |
418 | tcg_temp_free_i64(xl); | |
419 | } else { | |
420 | if ((DM(ctx->opcode) & 2) == 0) { | |
421 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode))); | |
422 | } else { | |
423 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode))); | |
424 | } | |
425 | if ((DM(ctx->opcode) & 1) == 0) { | |
426 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode))); | |
427 | } else { | |
428 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode))); | |
429 | } | |
430 | } | |
431 | } | |
432 | ||
433 | #define OP_ABS 1 | |
434 | #define OP_NABS 2 | |
435 | #define OP_NEG 3 | |
436 | #define OP_CPSGN 4 | |
437 | #define SGN_MASK_DP 0x8000000000000000ull | |
438 | #define SGN_MASK_SP 0x8000000080000000ull | |
439 | ||
440 | #define VSX_SCALAR_MOVE(name, op, sgn_mask) \ | |
441 | static void glue(gen_, name)(DisasContext * ctx) \ | |
442 | { \ | |
443 | TCGv_i64 xb, sgm; \ | |
444 | if (unlikely(!ctx->vsx_enabled)) { \ | |
445 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
446 | return; \ | |
447 | } \ | |
448 | xb = tcg_temp_new_i64(); \ | |
449 | sgm = tcg_temp_new_i64(); \ | |
450 | tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \ | |
451 | tcg_gen_movi_i64(sgm, sgn_mask); \ | |
452 | switch (op) { \ | |
453 | case OP_ABS: { \ | |
454 | tcg_gen_andc_i64(xb, xb, sgm); \ | |
455 | break; \ | |
456 | } \ | |
457 | case OP_NABS: { \ | |
458 | tcg_gen_or_i64(xb, xb, sgm); \ | |
459 | break; \ | |
460 | } \ | |
461 | case OP_NEG: { \ | |
462 | tcg_gen_xor_i64(xb, xb, sgm); \ | |
463 | break; \ | |
464 | } \ | |
465 | case OP_CPSGN: { \ | |
466 | TCGv_i64 xa = tcg_temp_new_i64(); \ | |
467 | tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \ | |
468 | tcg_gen_and_i64(xa, xa, sgm); \ | |
469 | tcg_gen_andc_i64(xb, xb, sgm); \ | |
470 | tcg_gen_or_i64(xb, xb, xa); \ | |
471 | tcg_temp_free_i64(xa); \ | |
472 | break; \ | |
473 | } \ | |
474 | } \ | |
475 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \ | |
476 | tcg_temp_free_i64(xb); \ | |
477 | tcg_temp_free_i64(sgm); \ | |
478 | } | |
479 | ||
480 | VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP) | |
481 | VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP) | |
482 | VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP) | |
483 | VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP) | |
484 | ||
485 | #define VSX_VECTOR_MOVE(name, op, sgn_mask) \ | |
486 | static void glue(gen_, name)(DisasContext * ctx) \ | |
487 | { \ | |
488 | TCGv_i64 xbh, xbl, sgm; \ | |
489 | if (unlikely(!ctx->vsx_enabled)) { \ | |
490 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
491 | return; \ | |
492 | } \ | |
493 | xbh = tcg_temp_new_i64(); \ | |
494 | xbl = tcg_temp_new_i64(); \ | |
495 | sgm = tcg_temp_new_i64(); \ | |
496 | tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \ | |
497 | tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \ | |
498 | tcg_gen_movi_i64(sgm, sgn_mask); \ | |
499 | switch (op) { \ | |
500 | case OP_ABS: { \ | |
501 | tcg_gen_andc_i64(xbh, xbh, sgm); \ | |
502 | tcg_gen_andc_i64(xbl, xbl, sgm); \ | |
503 | break; \ | |
504 | } \ | |
505 | case OP_NABS: { \ | |
506 | tcg_gen_or_i64(xbh, xbh, sgm); \ | |
507 | tcg_gen_or_i64(xbl, xbl, sgm); \ | |
508 | break; \ | |
509 | } \ | |
510 | case OP_NEG: { \ | |
511 | tcg_gen_xor_i64(xbh, xbh, sgm); \ | |
512 | tcg_gen_xor_i64(xbl, xbl, sgm); \ | |
513 | break; \ | |
514 | } \ | |
515 | case OP_CPSGN: { \ | |
516 | TCGv_i64 xah = tcg_temp_new_i64(); \ | |
517 | TCGv_i64 xal = tcg_temp_new_i64(); \ | |
518 | tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \ | |
519 | tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \ | |
520 | tcg_gen_and_i64(xah, xah, sgm); \ | |
521 | tcg_gen_and_i64(xal, xal, sgm); \ | |
522 | tcg_gen_andc_i64(xbh, xbh, sgm); \ | |
523 | tcg_gen_andc_i64(xbl, xbl, sgm); \ | |
524 | tcg_gen_or_i64(xbh, xbh, xah); \ | |
525 | tcg_gen_or_i64(xbl, xbl, xal); \ | |
526 | tcg_temp_free_i64(xah); \ | |
527 | tcg_temp_free_i64(xal); \ | |
528 | break; \ | |
529 | } \ | |
530 | } \ | |
531 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \ | |
532 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \ | |
533 | tcg_temp_free_i64(xbh); \ | |
534 | tcg_temp_free_i64(xbl); \ | |
535 | tcg_temp_free_i64(sgm); \ | |
536 | } | |
537 | ||
538 | VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP) | |
539 | VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP) | |
540 | VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP) | |
541 | VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) | |
542 | VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP) | |
543 | VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP) | |
544 | VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP) | |
545 | VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) | |
546 | ||
547 | #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \ | |
548 | static void gen_##name(DisasContext * ctx) \ | |
549 | { \ | |
550 | TCGv_i32 opc; \ | |
551 | if (unlikely(!ctx->vsx_enabled)) { \ | |
552 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
553 | return; \ | |
554 | } \ | |
3014427a BH |
555 | opc = tcg_const_i32(ctx->opcode); \ |
556 | gen_helper_##name(cpu_env, opc); \ | |
557 | tcg_temp_free_i32(opc); \ | |
558 | } | |
559 | ||
560 | #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ | |
561 | static void gen_##name(DisasContext * ctx) \ | |
562 | { \ | |
563 | if (unlikely(!ctx->vsx_enabled)) { \ | |
564 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
565 | return; \ | |
566 | } \ | |
3014427a BH |
567 | gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \ |
568 | cpu_vsrh(xB(ctx->opcode))); \ | |
569 | } | |
570 | ||
571 | GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX) | |
572 | GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX) | |
573 | GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX) | |
574 | GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX) | |
575 | GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX) | |
576 | GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX) | |
577 | GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX) | |
578 | GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX) | |
579 | GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX) | |
580 | GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX) | |
581 | GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX) | |
582 | GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX) | |
583 | GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX) | |
584 | GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX) | |
585 | GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX) | |
586 | GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX) | |
587 | GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX) | |
588 | GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX) | |
589 | GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX) | |
590 | GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX) | |
591 | GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX) | |
592 | GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX) | |
593 | GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207) | |
594 | GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX) | |
595 | GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207) | |
596 | GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX) | |
597 | GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX) | |
598 | GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX) | |
599 | GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX) | |
600 | GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX) | |
601 | GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX) | |
602 | GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX) | |
603 | GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX) | |
604 | GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX) | |
605 | GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX) | |
606 | GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX) | |
607 | GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207) | |
608 | ||
609 | GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207) | |
610 | GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207) | |
611 | GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207) | |
612 | GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207) | |
613 | GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207) | |
614 | GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207) | |
615 | GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207) | |
616 | GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207) | |
617 | GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207) | |
618 | GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207) | |
619 | GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207) | |
620 | GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207) | |
621 | GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207) | |
622 | GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207) | |
623 | GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207) | |
624 | GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) | |
625 | GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) | |
626 | ||
627 | GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX) | |
628 | GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX) | |
629 | GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX) | |
630 | GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX) | |
631 | GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX) | |
632 | GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX) | |
633 | GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX) | |
634 | GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX) | |
635 | GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX) | |
636 | GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX) | |
637 | GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX) | |
638 | GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX) | |
639 | GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX) | |
640 | GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX) | |
641 | GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX) | |
642 | GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX) | |
643 | GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX) | |
644 | GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX) | |
645 | GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX) | |
646 | GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX) | |
647 | GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX) | |
648 | GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX) | |
649 | GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX) | |
650 | GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX) | |
651 | GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX) | |
652 | GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX) | |
653 | GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX) | |
654 | GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX) | |
655 | GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX) | |
656 | GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX) | |
657 | GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX) | |
658 | GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX) | |
659 | GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX) | |
660 | GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX) | |
661 | GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX) | |
662 | GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX) | |
663 | ||
664 | GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX) | |
665 | GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX) | |
666 | GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX) | |
667 | GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX) | |
668 | GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX) | |
669 | GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX) | |
670 | GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX) | |
671 | GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX) | |
672 | GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX) | |
673 | GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX) | |
674 | GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX) | |
675 | GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX) | |
676 | GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX) | |
677 | GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX) | |
678 | GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX) | |
679 | GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX) | |
680 | GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX) | |
681 | GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX) | |
682 | GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX) | |
683 | GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX) | |
684 | GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX) | |
685 | GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX) | |
686 | GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX) | |
687 | GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX) | |
688 | GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX) | |
689 | GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX) | |
690 | GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX) | |
691 | GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX) | |
692 | GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX) | |
693 | GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX) | |
694 | GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX) | |
695 | GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX) | |
696 | GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX) | |
697 | GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX) | |
698 | GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX) | |
699 | GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX) | |
700 | ||
701 | #define VSX_LOGICAL(name, tcg_op) \ | |
702 | static void glue(gen_, name)(DisasContext * ctx) \ | |
703 | { \ | |
704 | if (unlikely(!ctx->vsx_enabled)) { \ | |
705 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
706 | return; \ | |
707 | } \ | |
708 | tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \ | |
709 | cpu_vsrh(xB(ctx->opcode))); \ | |
710 | tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \ | |
711 | cpu_vsrl(xB(ctx->opcode))); \ | |
712 | } | |
713 | ||
714 | VSX_LOGICAL(xxland, tcg_gen_and_i64) | |
715 | VSX_LOGICAL(xxlandc, tcg_gen_andc_i64) | |
716 | VSX_LOGICAL(xxlor, tcg_gen_or_i64) | |
717 | VSX_LOGICAL(xxlxor, tcg_gen_xor_i64) | |
718 | VSX_LOGICAL(xxlnor, tcg_gen_nor_i64) | |
719 | VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64) | |
720 | VSX_LOGICAL(xxlnand, tcg_gen_nand_i64) | |
721 | VSX_LOGICAL(xxlorc, tcg_gen_orc_i64) | |
722 | ||
723 | #define VSX_XXMRG(name, high) \ | |
724 | static void glue(gen_, name)(DisasContext * ctx) \ | |
725 | { \ | |
726 | TCGv_i64 a0, a1, b0, b1; \ | |
727 | if (unlikely(!ctx->vsx_enabled)) { \ | |
728 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | |
729 | return; \ | |
730 | } \ | |
731 | a0 = tcg_temp_new_i64(); \ | |
732 | a1 = tcg_temp_new_i64(); \ | |
733 | b0 = tcg_temp_new_i64(); \ | |
734 | b1 = tcg_temp_new_i64(); \ | |
735 | if (high) { \ | |
736 | tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \ | |
737 | tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \ | |
738 | tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \ | |
739 | tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \ | |
740 | } else { \ | |
741 | tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \ | |
742 | tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \ | |
743 | tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \ | |
744 | tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \ | |
745 | } \ | |
746 | tcg_gen_shri_i64(a0, a0, 32); \ | |
747 | tcg_gen_shri_i64(b0, b0, 32); \ | |
748 | tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \ | |
749 | b0, a0, 32, 32); \ | |
750 | tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \ | |
751 | b1, a1, 32, 32); \ | |
752 | tcg_temp_free_i64(a0); \ | |
753 | tcg_temp_free_i64(a1); \ | |
754 | tcg_temp_free_i64(b0); \ | |
755 | tcg_temp_free_i64(b1); \ | |
756 | } | |
757 | ||
758 | VSX_XXMRG(xxmrghw, 1) | |
759 | VSX_XXMRG(xxmrglw, 0) | |
760 | ||
761 | static void gen_xxsel(DisasContext * ctx) | |
762 | { | |
763 | TCGv_i64 a, b, c; | |
764 | if (unlikely(!ctx->vsx_enabled)) { | |
765 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
766 | return; | |
767 | } | |
768 | a = tcg_temp_new_i64(); | |
769 | b = tcg_temp_new_i64(); | |
770 | c = tcg_temp_new_i64(); | |
771 | ||
772 | tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode))); | |
773 | tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode))); | |
774 | tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode))); | |
775 | ||
776 | tcg_gen_and_i64(b, b, c); | |
777 | tcg_gen_andc_i64(a, a, c); | |
778 | tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b); | |
779 | ||
780 | tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode))); | |
781 | tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode))); | |
782 | tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode))); | |
783 | ||
784 | tcg_gen_and_i64(b, b, c); | |
785 | tcg_gen_andc_i64(a, a, c); | |
786 | tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b); | |
787 | ||
788 | tcg_temp_free_i64(a); | |
789 | tcg_temp_free_i64(b); | |
790 | tcg_temp_free_i64(c); | |
791 | } | |
792 | ||
793 | static void gen_xxspltw(DisasContext *ctx) | |
794 | { | |
795 | TCGv_i64 b, b2; | |
796 | TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ? | |
797 | cpu_vsrl(xB(ctx->opcode)) : | |
798 | cpu_vsrh(xB(ctx->opcode)); | |
799 | ||
800 | if (unlikely(!ctx->vsx_enabled)) { | |
801 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
802 | return; | |
803 | } | |
804 | ||
805 | b = tcg_temp_new_i64(); | |
806 | b2 = tcg_temp_new_i64(); | |
807 | ||
808 | if (UIM(ctx->opcode) & 1) { | |
809 | tcg_gen_ext32u_i64(b, vsr); | |
810 | } else { | |
811 | tcg_gen_shri_i64(b, vsr, 32); | |
812 | } | |
813 | ||
814 | tcg_gen_shli_i64(b2, b, 32); | |
815 | tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2); | |
816 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode))); | |
817 | ||
818 | tcg_temp_free_i64(b); | |
819 | tcg_temp_free_i64(b2); | |
820 | } | |
821 | ||
f1132835 ND |
822 | #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) |
823 | ||
824 | static void gen_xxspltib(DisasContext *ctx) | |
825 | { | |
826 | unsigned char uim8 = IMM8(ctx->opcode); | |
827 | if (xS(ctx->opcode) < 32) { | |
828 | if (unlikely(!ctx->altivec_enabled)) { | |
829 | gen_exception(ctx, POWERPC_EXCP_VPU); | |
830 | return; | |
831 | } | |
832 | } else { | |
833 | if (unlikely(!ctx->vsx_enabled)) { | |
834 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
835 | return; | |
836 | } | |
837 | } | |
838 | tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8)); | |
839 | tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8)); | |
840 | } | |
841 | ||
3014427a BH |
842 | static void gen_xxsldwi(DisasContext *ctx) |
843 | { | |
844 | TCGv_i64 xth, xtl; | |
845 | if (unlikely(!ctx->vsx_enabled)) { | |
846 | gen_exception(ctx, POWERPC_EXCP_VSXU); | |
847 | return; | |
848 | } | |
849 | xth = tcg_temp_new_i64(); | |
850 | xtl = tcg_temp_new_i64(); | |
851 | ||
852 | switch (SHW(ctx->opcode)) { | |
853 | case 0: { | |
854 | tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode))); | |
855 | tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode))); | |
856 | break; | |
857 | } | |
858 | case 1: { | |
859 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
860 | tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode))); | |
861 | tcg_gen_shli_i64(xth, xth, 32); | |
862 | tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode))); | |
863 | tcg_gen_shri_i64(t0, t0, 32); | |
864 | tcg_gen_or_i64(xth, xth, t0); | |
865 | tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode))); | |
866 | tcg_gen_shli_i64(xtl, xtl, 32); | |
867 | tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode))); | |
868 | tcg_gen_shri_i64(t0, t0, 32); | |
869 | tcg_gen_or_i64(xtl, xtl, t0); | |
870 | tcg_temp_free_i64(t0); | |
871 | break; | |
872 | } | |
873 | case 2: { | |
874 | tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode))); | |
875 | tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode))); | |
876 | break; | |
877 | } | |
878 | case 3: { | |
879 | TCGv_i64 t0 = tcg_temp_new_i64(); | |
880 | tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode))); | |
881 | tcg_gen_shli_i64(xth, xth, 32); | |
882 | tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode))); | |
883 | tcg_gen_shri_i64(t0, t0, 32); | |
884 | tcg_gen_or_i64(xth, xth, t0); | |
885 | tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode))); | |
886 | tcg_gen_shli_i64(xtl, xtl, 32); | |
887 | tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode))); | |
888 | tcg_gen_shri_i64(t0, t0, 32); | |
889 | tcg_gen_or_i64(xtl, xtl, t0); | |
890 | tcg_temp_free_i64(t0); | |
891 | break; | |
892 | } | |
893 | } | |
894 | ||
895 | tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth); | |
896 | tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl); | |
897 | ||
898 | tcg_temp_free_i64(xth); | |
899 | tcg_temp_free_i64(xtl); | |
900 | } | |
901 | ||
902 | #undef GEN_XX2FORM | |
903 | #undef GEN_XX3FORM | |
904 | #undef GEN_XX2IFORM | |
905 | #undef GEN_XX3_RC_FORM | |
906 | #undef GEN_XX3FORM_DM | |
907 | #undef VSX_LOGICAL |