]> Git Repo - qemu.git/blob - target-ppc/fpu_helper.c
target-ppc: Add VSX xmax/xmin Instructions
[qemu.git] / target-ppc / fpu_helper.c
1 /*
2  *  PowerPC floating point and SPE emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "cpu.h"
20 #include "helper.h"
21
22 /*****************************************************************************/
23 /* Floating point operations helpers */
24 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
25 {
26     CPU_FloatU f;
27     CPU_DoubleU d;
28
29     f.l = arg;
30     d.d = float32_to_float64(f.f, &env->fp_status);
31     return d.ll;
32 }
33
34 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
35 {
36     CPU_FloatU f;
37     CPU_DoubleU d;
38
39     d.ll = arg;
40     f.f = float64_to_float32(d.d, &env->fp_status);
41     return f.l;
42 }
43
44 static inline int isden(float64 d)
45 {
46     CPU_DoubleU u;
47
48     u.d = d;
49
50     return ((u.ll >> 52) & 0x7FF) == 0;
51 }
52
53 uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf)
54 {
55     CPU_DoubleU farg;
56     int isneg;
57     int ret;
58
59     farg.ll = arg;
60     isneg = float64_is_neg(farg.d);
61     if (unlikely(float64_is_any_nan(farg.d))) {
62         if (float64_is_signaling_nan(farg.d)) {
63             /* Signaling NaN: flags are undefined */
64             ret = 0x00;
65         } else {
66             /* Quiet NaN */
67             ret = 0x11;
68         }
69     } else if (unlikely(float64_is_infinity(farg.d))) {
70         /* +/- infinity */
71         if (isneg) {
72             ret = 0x09;
73         } else {
74             ret = 0x05;
75         }
76     } else {
77         if (float64_is_zero(farg.d)) {
78             /* +/- zero */
79             if (isneg) {
80                 ret = 0x12;
81             } else {
82                 ret = 0x02;
83             }
84         } else {
85             if (isden(farg.d)) {
86                 /* Denormalized numbers */
87                 ret = 0x10;
88             } else {
89                 /* Normalized numbers */
90                 ret = 0x00;
91             }
92             if (isneg) {
93                 ret |= 0x08;
94             } else {
95                 ret |= 0x04;
96             }
97         }
98     }
99     if (set_fprf) {
100         /* We update FPSCR_FPRF */
101         env->fpscr &= ~(0x1F << FPSCR_FPRF);
102         env->fpscr |= ret << FPSCR_FPRF;
103     }
104     /* We just need fpcc to update Rc1 */
105     return ret & 0xF;
106 }
107
108 /* Floating-point invalid operations exception */
109 static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
110                                              int set_fpcc)
111 {
112     uint64_t ret = 0;
113     int ve;
114
115     ve = fpscr_ve;
116     switch (op) {
117     case POWERPC_EXCP_FP_VXSNAN:
118         env->fpscr |= 1 << FPSCR_VXSNAN;
119         break;
120     case POWERPC_EXCP_FP_VXSOFT:
121         env->fpscr |= 1 << FPSCR_VXSOFT;
122         break;
123     case POWERPC_EXCP_FP_VXISI:
124         /* Magnitude subtraction of infinities */
125         env->fpscr |= 1 << FPSCR_VXISI;
126         goto update_arith;
127     case POWERPC_EXCP_FP_VXIDI:
128         /* Division of infinity by infinity */
129         env->fpscr |= 1 << FPSCR_VXIDI;
130         goto update_arith;
131     case POWERPC_EXCP_FP_VXZDZ:
132         /* Division of zero by zero */
133         env->fpscr |= 1 << FPSCR_VXZDZ;
134         goto update_arith;
135     case POWERPC_EXCP_FP_VXIMZ:
136         /* Multiplication of zero by infinity */
137         env->fpscr |= 1 << FPSCR_VXIMZ;
138         goto update_arith;
139     case POWERPC_EXCP_FP_VXVC:
140         /* Ordered comparison of NaN */
141         env->fpscr |= 1 << FPSCR_VXVC;
142         if (set_fpcc) {
143             env->fpscr &= ~(0xF << FPSCR_FPCC);
144             env->fpscr |= 0x11 << FPSCR_FPCC;
145         }
146         /* We must update the target FPR before raising the exception */
147         if (ve != 0) {
148             env->exception_index = POWERPC_EXCP_PROGRAM;
149             env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
150             /* Update the floating-point enabled exception summary */
151             env->fpscr |= 1 << FPSCR_FEX;
152             /* Exception is differed */
153             ve = 0;
154         }
155         break;
156     case POWERPC_EXCP_FP_VXSQRT:
157         /* Square root of a negative number */
158         env->fpscr |= 1 << FPSCR_VXSQRT;
159     update_arith:
160         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
161         if (ve == 0) {
162             /* Set the result to quiet NaN */
163             ret = 0x7FF8000000000000ULL;
164             if (set_fpcc) {
165                 env->fpscr &= ~(0xF << FPSCR_FPCC);
166                 env->fpscr |= 0x11 << FPSCR_FPCC;
167             }
168         }
169         break;
170     case POWERPC_EXCP_FP_VXCVI:
171         /* Invalid conversion */
172         env->fpscr |= 1 << FPSCR_VXCVI;
173         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
174         if (ve == 0) {
175             /* Set the result to quiet NaN */
176             ret = 0x7FF8000000000000ULL;
177             if (set_fpcc) {
178                 env->fpscr &= ~(0xF << FPSCR_FPCC);
179                 env->fpscr |= 0x11 << FPSCR_FPCC;
180             }
181         }
182         break;
183     }
184     /* Update the floating-point invalid operation summary */
185     env->fpscr |= 1 << FPSCR_VX;
186     /* Update the floating-point exception summary */
187     env->fpscr |= 1 << FPSCR_FX;
188     if (ve != 0) {
189         /* Update the floating-point enabled exception summary */
190         env->fpscr |= 1 << FPSCR_FEX;
191         if (msr_fe0 != 0 || msr_fe1 != 0) {
192             helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
193                                        POWERPC_EXCP_FP | op);
194         }
195     }
196     return ret;
197 }
198
199 static inline void float_zero_divide_excp(CPUPPCState *env)
200 {
201     env->fpscr |= 1 << FPSCR_ZX;
202     env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
203     /* Update the floating-point exception summary */
204     env->fpscr |= 1 << FPSCR_FX;
205     if (fpscr_ze != 0) {
206         /* Update the floating-point enabled exception summary */
207         env->fpscr |= 1 << FPSCR_FEX;
208         if (msr_fe0 != 0 || msr_fe1 != 0) {
209             helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
210                                        POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
211         }
212     }
213 }
214
215 static inline void float_overflow_excp(CPUPPCState *env)
216 {
217     env->fpscr |= 1 << FPSCR_OX;
218     /* Update the floating-point exception summary */
219     env->fpscr |= 1 << FPSCR_FX;
220     if (fpscr_oe != 0) {
221         /* XXX: should adjust the result */
222         /* Update the floating-point enabled exception summary */
223         env->fpscr |= 1 << FPSCR_FEX;
224         /* We must update the target FPR before raising the exception */
225         env->exception_index = POWERPC_EXCP_PROGRAM;
226         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
227     } else {
228         env->fpscr |= 1 << FPSCR_XX;
229         env->fpscr |= 1 << FPSCR_FI;
230     }
231 }
232
233 static inline void float_underflow_excp(CPUPPCState *env)
234 {
235     env->fpscr |= 1 << FPSCR_UX;
236     /* Update the floating-point exception summary */
237     env->fpscr |= 1 << FPSCR_FX;
238     if (fpscr_ue != 0) {
239         /* XXX: should adjust the result */
240         /* Update the floating-point enabled exception summary */
241         env->fpscr |= 1 << FPSCR_FEX;
242         /* We must update the target FPR before raising the exception */
243         env->exception_index = POWERPC_EXCP_PROGRAM;
244         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
245     }
246 }
247
248 static inline void float_inexact_excp(CPUPPCState *env)
249 {
250     env->fpscr |= 1 << FPSCR_XX;
251     /* Update the floating-point exception summary */
252     env->fpscr |= 1 << FPSCR_FX;
253     if (fpscr_xe != 0) {
254         /* Update the floating-point enabled exception summary */
255         env->fpscr |= 1 << FPSCR_FEX;
256         /* We must update the target FPR before raising the exception */
257         env->exception_index = POWERPC_EXCP_PROGRAM;
258         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
259     }
260 }
261
262 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
263 {
264     int rnd_type;
265
266     /* Set rounding mode */
267     switch (fpscr_rn) {
268     case 0:
269         /* Best approximation (round to nearest) */
270         rnd_type = float_round_nearest_even;
271         break;
272     case 1:
273         /* Smaller magnitude (round toward zero) */
274         rnd_type = float_round_to_zero;
275         break;
276     case 2:
277         /* Round toward +infinite */
278         rnd_type = float_round_up;
279         break;
280     default:
281     case 3:
282         /* Round toward -infinite */
283         rnd_type = float_round_down;
284         break;
285     }
286     set_float_rounding_mode(rnd_type, &env->fp_status);
287 }
288
289 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
290 {
291     int prev;
292
293     prev = (env->fpscr >> bit) & 1;
294     env->fpscr &= ~(1 << bit);
295     if (prev == 1) {
296         switch (bit) {
297         case FPSCR_RN1:
298         case FPSCR_RN:
299             fpscr_set_rounding_mode(env);
300             break;
301         default:
302             break;
303         }
304     }
305 }
306
307 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
308 {
309     int prev;
310
311     prev = (env->fpscr >> bit) & 1;
312     env->fpscr |= 1 << bit;
313     if (prev == 0) {
314         switch (bit) {
315         case FPSCR_VX:
316             env->fpscr |= 1 << FPSCR_FX;
317             if (fpscr_ve) {
318                 goto raise_ve;
319             }
320             break;
321         case FPSCR_OX:
322             env->fpscr |= 1 << FPSCR_FX;
323             if (fpscr_oe) {
324                 goto raise_oe;
325             }
326             break;
327         case FPSCR_UX:
328             env->fpscr |= 1 << FPSCR_FX;
329             if (fpscr_ue) {
330                 goto raise_ue;
331             }
332             break;
333         case FPSCR_ZX:
334             env->fpscr |= 1 << FPSCR_FX;
335             if (fpscr_ze) {
336                 goto raise_ze;
337             }
338             break;
339         case FPSCR_XX:
340             env->fpscr |= 1 << FPSCR_FX;
341             if (fpscr_xe) {
342                 goto raise_xe;
343             }
344             break;
345         case FPSCR_VXSNAN:
346         case FPSCR_VXISI:
347         case FPSCR_VXIDI:
348         case FPSCR_VXZDZ:
349         case FPSCR_VXIMZ:
350         case FPSCR_VXVC:
351         case FPSCR_VXSOFT:
352         case FPSCR_VXSQRT:
353         case FPSCR_VXCVI:
354             env->fpscr |= 1 << FPSCR_VX;
355             env->fpscr |= 1 << FPSCR_FX;
356             if (fpscr_ve != 0) {
357                 goto raise_ve;
358             }
359             break;
360         case FPSCR_VE:
361             if (fpscr_vx != 0) {
362             raise_ve:
363                 env->error_code = POWERPC_EXCP_FP;
364                 if (fpscr_vxsnan) {
365                     env->error_code |= POWERPC_EXCP_FP_VXSNAN;
366                 }
367                 if (fpscr_vxisi) {
368                     env->error_code |= POWERPC_EXCP_FP_VXISI;
369                 }
370                 if (fpscr_vxidi) {
371                     env->error_code |= POWERPC_EXCP_FP_VXIDI;
372                 }
373                 if (fpscr_vxzdz) {
374                     env->error_code |= POWERPC_EXCP_FP_VXZDZ;
375                 }
376                 if (fpscr_vximz) {
377                     env->error_code |= POWERPC_EXCP_FP_VXIMZ;
378                 }
379                 if (fpscr_vxvc) {
380                     env->error_code |= POWERPC_EXCP_FP_VXVC;
381                 }
382                 if (fpscr_vxsoft) {
383                     env->error_code |= POWERPC_EXCP_FP_VXSOFT;
384                 }
385                 if (fpscr_vxsqrt) {
386                     env->error_code |= POWERPC_EXCP_FP_VXSQRT;
387                 }
388                 if (fpscr_vxcvi) {
389                     env->error_code |= POWERPC_EXCP_FP_VXCVI;
390                 }
391                 goto raise_excp;
392             }
393             break;
394         case FPSCR_OE:
395             if (fpscr_ox != 0) {
396             raise_oe:
397                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
398                 goto raise_excp;
399             }
400             break;
401         case FPSCR_UE:
402             if (fpscr_ux != 0) {
403             raise_ue:
404                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
405                 goto raise_excp;
406             }
407             break;
408         case FPSCR_ZE:
409             if (fpscr_zx != 0) {
410             raise_ze:
411                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
412                 goto raise_excp;
413             }
414             break;
415         case FPSCR_XE:
416             if (fpscr_xx != 0) {
417             raise_xe:
418                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
419                 goto raise_excp;
420             }
421             break;
422         case FPSCR_RN1:
423         case FPSCR_RN:
424             fpscr_set_rounding_mode(env);
425             break;
426         default:
427             break;
428         raise_excp:
429             /* Update the floating-point enabled exception summary */
430             env->fpscr |= 1 << FPSCR_FEX;
431             /* We have to update Rc1 before raising the exception */
432             env->exception_index = POWERPC_EXCP_PROGRAM;
433             break;
434         }
435     }
436 }
437
438 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
439 {
440     target_ulong prev, new;
441     int i;
442
443     prev = env->fpscr;
444     new = (target_ulong)arg;
445     new &= ~0x60000000LL;
446     new |= prev & 0x60000000LL;
447     for (i = 0; i < sizeof(target_ulong) * 2; i++) {
448         if (mask & (1 << i)) {
449             env->fpscr &= ~(0xFLL << (4 * i));
450             env->fpscr |= new & (0xFLL << (4 * i));
451         }
452     }
453     /* Update VX and FEX */
454     if (fpscr_ix != 0) {
455         env->fpscr |= 1 << FPSCR_VX;
456     } else {
457         env->fpscr &= ~(1 << FPSCR_VX);
458     }
459     if ((fpscr_ex & fpscr_eex) != 0) {
460         env->fpscr |= 1 << FPSCR_FEX;
461         env->exception_index = POWERPC_EXCP_PROGRAM;
462         /* XXX: we should compute it properly */
463         env->error_code = POWERPC_EXCP_FP;
464     } else {
465         env->fpscr &= ~(1 << FPSCR_FEX);
466     }
467     fpscr_set_rounding_mode(env);
468 }
469
470 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
471 {
472     helper_store_fpscr(env, arg, mask);
473 }
474
475 void helper_float_check_status(CPUPPCState *env)
476 {
477     int status = get_float_exception_flags(&env->fp_status);
478
479     if (status & float_flag_divbyzero) {
480         float_zero_divide_excp(env);
481     } else if (status & float_flag_overflow) {
482         float_overflow_excp(env);
483     } else if (status & float_flag_underflow) {
484         float_underflow_excp(env);
485     } else if (status & float_flag_inexact) {
486         float_inexact_excp(env);
487     }
488
489     if (env->exception_index == POWERPC_EXCP_PROGRAM &&
490         (env->error_code & POWERPC_EXCP_FP)) {
491         /* Differred floating-point exception after target FPR update */
492         if (msr_fe0 != 0 || msr_fe1 != 0) {
493             helper_raise_exception_err(env, env->exception_index,
494                                        env->error_code);
495         }
496     }
497 }
498
499 void helper_reset_fpstatus(CPUPPCState *env)
500 {
501     set_float_exception_flags(0, &env->fp_status);
502 }
503
504 /* fadd - fadd. */
505 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
506 {
507     CPU_DoubleU farg1, farg2;
508
509     farg1.ll = arg1;
510     farg2.ll = arg2;
511
512     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
513                  float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
514         /* Magnitude subtraction of infinities */
515         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
516     } else {
517         if (unlikely(float64_is_signaling_nan(farg1.d) ||
518                      float64_is_signaling_nan(farg2.d))) {
519             /* sNaN addition */
520             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
521         }
522         farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
523     }
524
525     return farg1.ll;
526 }
527
528 /* fsub - fsub. */
529 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
530 {
531     CPU_DoubleU farg1, farg2;
532
533     farg1.ll = arg1;
534     farg2.ll = arg2;
535
536     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
537                  float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
538         /* Magnitude subtraction of infinities */
539         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
540     } else {
541         if (unlikely(float64_is_signaling_nan(farg1.d) ||
542                      float64_is_signaling_nan(farg2.d))) {
543             /* sNaN subtraction */
544             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
545         }
546         farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
547     }
548
549     return farg1.ll;
550 }
551
552 /* fmul - fmul. */
553 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
554 {
555     CPU_DoubleU farg1, farg2;
556
557     farg1.ll = arg1;
558     farg2.ll = arg2;
559
560     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
561                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
562         /* Multiplication of zero by infinity */
563         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
564     } else {
565         if (unlikely(float64_is_signaling_nan(farg1.d) ||
566                      float64_is_signaling_nan(farg2.d))) {
567             /* sNaN multiplication */
568             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
569         }
570         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
571     }
572
573     return farg1.ll;
574 }
575
576 /* fdiv - fdiv. */
577 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
578 {
579     CPU_DoubleU farg1, farg2;
580
581     farg1.ll = arg1;
582     farg2.ll = arg2;
583
584     if (unlikely(float64_is_infinity(farg1.d) &&
585                  float64_is_infinity(farg2.d))) {
586         /* Division of infinity by infinity */
587         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
588     } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
589         /* Division of zero by zero */
590         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
591     } else {
592         if (unlikely(float64_is_signaling_nan(farg1.d) ||
593                      float64_is_signaling_nan(farg2.d))) {
594             /* sNaN division */
595             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
596         }
597         farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
598     }
599
600     return farg1.ll;
601 }
602
603 /* fctiw - fctiw. */
604 uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg)
605 {
606     CPU_DoubleU farg;
607
608     farg.ll = arg;
609
610     if (unlikely(float64_is_signaling_nan(farg.d))) {
611         /* sNaN conversion */
612         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
613                                         POWERPC_EXCP_FP_VXCVI, 1);
614     } else if (unlikely(float64_is_quiet_nan(farg.d) ||
615                         float64_is_infinity(farg.d))) {
616         /* qNan / infinity conversion */
617         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
618     } else {
619         farg.ll = float64_to_int32(farg.d, &env->fp_status);
620         /* XXX: higher bits are not supposed to be significant.
621          *     to make tests easier, return the same as a real PowerPC 750
622          */
623         farg.ll |= 0xFFF80000ULL << 32;
624     }
625     return farg.ll;
626 }
627
628 /* fctiwz - fctiwz. */
629 uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg)
630 {
631     CPU_DoubleU farg;
632
633     farg.ll = arg;
634
635     if (unlikely(float64_is_signaling_nan(farg.d))) {
636         /* sNaN conversion */
637         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
638                                         POWERPC_EXCP_FP_VXCVI, 1);
639     } else if (unlikely(float64_is_quiet_nan(farg.d) ||
640                         float64_is_infinity(farg.d))) {
641         /* qNan / infinity conversion */
642         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
643     } else {
644         farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
645         /* XXX: higher bits are not supposed to be significant.
646          *     to make tests easier, return the same as a real PowerPC 750
647          */
648         farg.ll |= 0xFFF80000ULL << 32;
649     }
650     return farg.ll;
651 }
652
653 #if defined(TARGET_PPC64)
654 /* fcfid - fcfid. */
655 uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg)
656 {
657     CPU_DoubleU farg;
658
659     farg.d = int64_to_float64(arg, &env->fp_status);
660     return farg.ll;
661 }
662
663 /* fctid - fctid. */
664 uint64_t helper_fctid(CPUPPCState *env, uint64_t arg)
665 {
666     CPU_DoubleU farg;
667
668     farg.ll = arg;
669
670     if (unlikely(float64_is_signaling_nan(farg.d))) {
671         /* sNaN conversion */
672         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
673                                         POWERPC_EXCP_FP_VXCVI, 1);
674     } else if (unlikely(float64_is_quiet_nan(farg.d) ||
675                         float64_is_infinity(farg.d))) {
676         /* qNan / infinity conversion */
677         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
678     } else {
679         farg.ll = float64_to_int64(farg.d, &env->fp_status);
680     }
681     return farg.ll;
682 }
683
684 /* fctidz - fctidz. */
685 uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg)
686 {
687     CPU_DoubleU farg;
688
689     farg.ll = arg;
690
691     if (unlikely(float64_is_signaling_nan(farg.d))) {
692         /* sNaN conversion */
693         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
694                                         POWERPC_EXCP_FP_VXCVI, 1);
695     } else if (unlikely(float64_is_quiet_nan(farg.d) ||
696                         float64_is_infinity(farg.d))) {
697         /* qNan / infinity conversion */
698         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
699     } else {
700         farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
701     }
702     return farg.ll;
703 }
704
705 #endif
706
707 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
708                               int rounding_mode)
709 {
710     CPU_DoubleU farg;
711
712     farg.ll = arg;
713
714     if (unlikely(float64_is_signaling_nan(farg.d))) {
715         /* sNaN round */
716         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
717                                         POWERPC_EXCP_FP_VXCVI, 1);
718     } else if (unlikely(float64_is_quiet_nan(farg.d) ||
719                         float64_is_infinity(farg.d))) {
720         /* qNan / infinity round */
721         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
722     } else {
723         set_float_rounding_mode(rounding_mode, &env->fp_status);
724         farg.ll = float64_round_to_int(farg.d, &env->fp_status);
725         /* Restore rounding mode from FPSCR */
726         fpscr_set_rounding_mode(env);
727     }
728     return farg.ll;
729 }
730
731 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
732 {
733     return do_fri(env, arg, float_round_nearest_even);
734 }
735
736 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
737 {
738     return do_fri(env, arg, float_round_to_zero);
739 }
740
741 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
742 {
743     return do_fri(env, arg, float_round_up);
744 }
745
746 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
747 {
748     return do_fri(env, arg, float_round_down);
749 }
750
751 /* fmadd - fmadd. */
752 uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
753                       uint64_t arg3)
754 {
755     CPU_DoubleU farg1, farg2, farg3;
756
757     farg1.ll = arg1;
758     farg2.ll = arg2;
759     farg3.ll = arg3;
760
761     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
762                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
763         /* Multiplication of zero by infinity */
764         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
765     } else {
766         if (unlikely(float64_is_signaling_nan(farg1.d) ||
767                      float64_is_signaling_nan(farg2.d) ||
768                      float64_is_signaling_nan(farg3.d))) {
769             /* sNaN operation */
770             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
771         }
772         /* This is the way the PowerPC specification defines it */
773         float128 ft0_128, ft1_128;
774
775         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
776         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
777         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
778         if (unlikely(float128_is_infinity(ft0_128) &&
779                      float64_is_infinity(farg3.d) &&
780                      float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
781             /* Magnitude subtraction of infinities */
782             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
783         } else {
784             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
785             ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
786             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
787         }
788     }
789
790     return farg1.ll;
791 }
792
793 /* fmsub - fmsub. */
794 uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
795                       uint64_t arg3)
796 {
797     CPU_DoubleU farg1, farg2, farg3;
798
799     farg1.ll = arg1;
800     farg2.ll = arg2;
801     farg3.ll = arg3;
802
803     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
804                  (float64_is_zero(farg1.d) &&
805                   float64_is_infinity(farg2.d)))) {
806         /* Multiplication of zero by infinity */
807         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
808     } else {
809         if (unlikely(float64_is_signaling_nan(farg1.d) ||
810                      float64_is_signaling_nan(farg2.d) ||
811                      float64_is_signaling_nan(farg3.d))) {
812             /* sNaN operation */
813             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
814         }
815         /* This is the way the PowerPC specification defines it */
816         float128 ft0_128, ft1_128;
817
818         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
819         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
820         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
821         if (unlikely(float128_is_infinity(ft0_128) &&
822                      float64_is_infinity(farg3.d) &&
823                      float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
824             /* Magnitude subtraction of infinities */
825             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
826         } else {
827             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
828             ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
829             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
830         }
831     }
832     return farg1.ll;
833 }
834
835 /* fnmadd - fnmadd. */
836 uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
837                        uint64_t arg3)
838 {
839     CPU_DoubleU farg1, farg2, farg3;
840
841     farg1.ll = arg1;
842     farg2.ll = arg2;
843     farg3.ll = arg3;
844
845     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
846                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
847         /* Multiplication of zero by infinity */
848         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
849     } else {
850         if (unlikely(float64_is_signaling_nan(farg1.d) ||
851                      float64_is_signaling_nan(farg2.d) ||
852                      float64_is_signaling_nan(farg3.d))) {
853             /* sNaN operation */
854             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
855         }
856         /* This is the way the PowerPC specification defines it */
857         float128 ft0_128, ft1_128;
858
859         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
860         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
861         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
862         if (unlikely(float128_is_infinity(ft0_128) &&
863                      float64_is_infinity(farg3.d) &&
864                      float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
865             /* Magnitude subtraction of infinities */
866             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
867         } else {
868             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
869             ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
870             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
871         }
872         if (likely(!float64_is_any_nan(farg1.d))) {
873             farg1.d = float64_chs(farg1.d);
874         }
875     }
876     return farg1.ll;
877 }
878
879 /* fnmsub - fnmsub. */
880 uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
881                        uint64_t arg3)
882 {
883     CPU_DoubleU farg1, farg2, farg3;
884
885     farg1.ll = arg1;
886     farg2.ll = arg2;
887     farg3.ll = arg3;
888
889     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
890                  (float64_is_zero(farg1.d) &&
891                   float64_is_infinity(farg2.d)))) {
892         /* Multiplication of zero by infinity */
893         farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
894     } else {
895         if (unlikely(float64_is_signaling_nan(farg1.d) ||
896                      float64_is_signaling_nan(farg2.d) ||
897                      float64_is_signaling_nan(farg3.d))) {
898             /* sNaN operation */
899             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
900         }
901         /* This is the way the PowerPC specification defines it */
902         float128 ft0_128, ft1_128;
903
904         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
905         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
906         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
907         if (unlikely(float128_is_infinity(ft0_128) &&
908                      float64_is_infinity(farg3.d) &&
909                      float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
910             /* Magnitude subtraction of infinities */
911             farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
912         } else {
913             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
914             ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
915             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
916         }
917         if (likely(!float64_is_any_nan(farg1.d))) {
918             farg1.d = float64_chs(farg1.d);
919         }
920     }
921     return farg1.ll;
922 }
923
924 /* frsp - frsp. */
925 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
926 {
927     CPU_DoubleU farg;
928     float32 f32;
929
930     farg.ll = arg;
931
932     if (unlikely(float64_is_signaling_nan(farg.d))) {
933         /* sNaN square root */
934         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
935     }
936     f32 = float64_to_float32(farg.d, &env->fp_status);
937     farg.d = float32_to_float64(f32, &env->fp_status);
938
939     return farg.ll;
940 }
941
942 /* fsqrt - fsqrt. */
943 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
944 {
945     CPU_DoubleU farg;
946
947     farg.ll = arg;
948
949     if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
950         /* Square root of a negative nonzero number */
951         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
952     } else {
953         if (unlikely(float64_is_signaling_nan(farg.d))) {
954             /* sNaN square root */
955             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
956         }
957         farg.d = float64_sqrt(farg.d, &env->fp_status);
958     }
959     return farg.ll;
960 }
961
962 /* fre - fre. */
963 uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
964 {
965     CPU_DoubleU farg;
966
967     farg.ll = arg;
968
969     if (unlikely(float64_is_signaling_nan(farg.d))) {
970         /* sNaN reciprocal */
971         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
972     }
973     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
974     return farg.d;
975 }
976
977 /* fres - fres. */
978 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
979 {
980     CPU_DoubleU farg;
981     float32 f32;
982
983     farg.ll = arg;
984
985     if (unlikely(float64_is_signaling_nan(farg.d))) {
986         /* sNaN reciprocal */
987         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
988     }
989     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
990     f32 = float64_to_float32(farg.d, &env->fp_status);
991     farg.d = float32_to_float64(f32, &env->fp_status);
992
993     return farg.ll;
994 }
995
996 /* frsqrte  - frsqrte. */
997 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
998 {
999     CPU_DoubleU farg;
1000     float32 f32;
1001
1002     farg.ll = arg;
1003
1004     if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1005         /* Reciprocal square root of a negative nonzero number */
1006         farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
1007     } else {
1008         if (unlikely(float64_is_signaling_nan(farg.d))) {
1009             /* sNaN reciprocal square root */
1010             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1011         }
1012         farg.d = float64_sqrt(farg.d, &env->fp_status);
1013         farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1014         f32 = float64_to_float32(farg.d, &env->fp_status);
1015         farg.d = float32_to_float64(f32, &env->fp_status);
1016     }
1017     return farg.ll;
1018 }
1019
1020 /* fsel - fsel. */
1021 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1022                      uint64_t arg3)
1023 {
1024     CPU_DoubleU farg1;
1025
1026     farg1.ll = arg1;
1027
1028     if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1029         !float64_is_any_nan(farg1.d)) {
1030         return arg2;
1031     } else {
1032         return arg3;
1033     }
1034 }
1035
1036 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1037                   uint32_t crfD)
1038 {
1039     CPU_DoubleU farg1, farg2;
1040     uint32_t ret = 0;
1041
1042     farg1.ll = arg1;
1043     farg2.ll = arg2;
1044
1045     if (unlikely(float64_is_any_nan(farg1.d) ||
1046                  float64_is_any_nan(farg2.d))) {
1047         ret = 0x01UL;
1048     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1049         ret = 0x08UL;
1050     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1051         ret = 0x04UL;
1052     } else {
1053         ret = 0x02UL;
1054     }
1055
1056     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1057     env->fpscr |= ret << FPSCR_FPRF;
1058     env->crf[crfD] = ret;
1059     if (unlikely(ret == 0x01UL
1060                  && (float64_is_signaling_nan(farg1.d) ||
1061                      float64_is_signaling_nan(farg2.d)))) {
1062         /* sNaN comparison */
1063         fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1064     }
1065 }
1066
1067 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1068                   uint32_t crfD)
1069 {
1070     CPU_DoubleU farg1, farg2;
1071     uint32_t ret = 0;
1072
1073     farg1.ll = arg1;
1074     farg2.ll = arg2;
1075
1076     if (unlikely(float64_is_any_nan(farg1.d) ||
1077                  float64_is_any_nan(farg2.d))) {
1078         ret = 0x01UL;
1079     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1080         ret = 0x08UL;
1081     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1082         ret = 0x04UL;
1083     } else {
1084         ret = 0x02UL;
1085     }
1086
1087     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1088     env->fpscr |= ret << FPSCR_FPRF;
1089     env->crf[crfD] = ret;
1090     if (unlikely(ret == 0x01UL)) {
1091         if (float64_is_signaling_nan(farg1.d) ||
1092             float64_is_signaling_nan(farg2.d)) {
1093             /* sNaN comparison */
1094             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1095                                   POWERPC_EXCP_FP_VXVC, 1);
1096         } else {
1097             /* qNaN comparison */
1098             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1099         }
1100     }
1101 }
1102
1103 /* Single-precision floating-point conversions */
1104 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1105 {
1106     CPU_FloatU u;
1107
1108     u.f = int32_to_float32(val, &env->vec_status);
1109
1110     return u.l;
1111 }
1112
1113 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1114 {
1115     CPU_FloatU u;
1116
1117     u.f = uint32_to_float32(val, &env->vec_status);
1118
1119     return u.l;
1120 }
1121
1122 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1123 {
1124     CPU_FloatU u;
1125
1126     u.l = val;
1127     /* NaN are not treated the same way IEEE 754 does */
1128     if (unlikely(float32_is_quiet_nan(u.f))) {
1129         return 0;
1130     }
1131
1132     return float32_to_int32(u.f, &env->vec_status);
1133 }
1134
1135 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1136 {
1137     CPU_FloatU u;
1138
1139     u.l = val;
1140     /* NaN are not treated the same way IEEE 754 does */
1141     if (unlikely(float32_is_quiet_nan(u.f))) {
1142         return 0;
1143     }
1144
1145     return float32_to_uint32(u.f, &env->vec_status);
1146 }
1147
1148 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1149 {
1150     CPU_FloatU u;
1151
1152     u.l = val;
1153     /* NaN are not treated the same way IEEE 754 does */
1154     if (unlikely(float32_is_quiet_nan(u.f))) {
1155         return 0;
1156     }
1157
1158     return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1159 }
1160
1161 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1162 {
1163     CPU_FloatU u;
1164
1165     u.l = val;
1166     /* NaN are not treated the same way IEEE 754 does */
1167     if (unlikely(float32_is_quiet_nan(u.f))) {
1168         return 0;
1169     }
1170
1171     return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1172 }
1173
1174 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1175 {
1176     CPU_FloatU u;
1177     float32 tmp;
1178
1179     u.f = int32_to_float32(val, &env->vec_status);
1180     tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1181     u.f = float32_div(u.f, tmp, &env->vec_status);
1182
1183     return u.l;
1184 }
1185
1186 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1187 {
1188     CPU_FloatU u;
1189     float32 tmp;
1190
1191     u.f = uint32_to_float32(val, &env->vec_status);
1192     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1193     u.f = float32_div(u.f, tmp, &env->vec_status);
1194
1195     return u.l;
1196 }
1197
1198 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1199 {
1200     CPU_FloatU u;
1201     float32 tmp;
1202
1203     u.l = val;
1204     /* NaN are not treated the same way IEEE 754 does */
1205     if (unlikely(float32_is_quiet_nan(u.f))) {
1206         return 0;
1207     }
1208     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1209     u.f = float32_mul(u.f, tmp, &env->vec_status);
1210
1211     return float32_to_int32(u.f, &env->vec_status);
1212 }
1213
1214 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1215 {
1216     CPU_FloatU u;
1217     float32 tmp;
1218
1219     u.l = val;
1220     /* NaN are not treated the same way IEEE 754 does */
1221     if (unlikely(float32_is_quiet_nan(u.f))) {
1222         return 0;
1223     }
1224     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1225     u.f = float32_mul(u.f, tmp, &env->vec_status);
1226
1227     return float32_to_uint32(u.f, &env->vec_status);
1228 }
1229
1230 #define HELPER_SPE_SINGLE_CONV(name)                              \
1231     uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1232     {                                                             \
1233         return e##name(env, val);                                 \
1234     }
1235 /* efscfsi */
1236 HELPER_SPE_SINGLE_CONV(fscfsi);
1237 /* efscfui */
1238 HELPER_SPE_SINGLE_CONV(fscfui);
1239 /* efscfuf */
1240 HELPER_SPE_SINGLE_CONV(fscfuf);
1241 /* efscfsf */
1242 HELPER_SPE_SINGLE_CONV(fscfsf);
1243 /* efsctsi */
1244 HELPER_SPE_SINGLE_CONV(fsctsi);
1245 /* efsctui */
1246 HELPER_SPE_SINGLE_CONV(fsctui);
1247 /* efsctsiz */
1248 HELPER_SPE_SINGLE_CONV(fsctsiz);
1249 /* efsctuiz */
1250 HELPER_SPE_SINGLE_CONV(fsctuiz);
1251 /* efsctsf */
1252 HELPER_SPE_SINGLE_CONV(fsctsf);
1253 /* efsctuf */
1254 HELPER_SPE_SINGLE_CONV(fsctuf);
1255
1256 #define HELPER_SPE_VECTOR_CONV(name)                            \
1257     uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1258     {                                                           \
1259         return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1260             (uint64_t)e##name(env, val);                        \
1261     }
1262 /* evfscfsi */
1263 HELPER_SPE_VECTOR_CONV(fscfsi);
1264 /* evfscfui */
1265 HELPER_SPE_VECTOR_CONV(fscfui);
1266 /* evfscfuf */
1267 HELPER_SPE_VECTOR_CONV(fscfuf);
1268 /* evfscfsf */
1269 HELPER_SPE_VECTOR_CONV(fscfsf);
1270 /* evfsctsi */
1271 HELPER_SPE_VECTOR_CONV(fsctsi);
1272 /* evfsctui */
1273 HELPER_SPE_VECTOR_CONV(fsctui);
1274 /* evfsctsiz */
1275 HELPER_SPE_VECTOR_CONV(fsctsiz);
1276 /* evfsctuiz */
1277 HELPER_SPE_VECTOR_CONV(fsctuiz);
1278 /* evfsctsf */
1279 HELPER_SPE_VECTOR_CONV(fsctsf);
1280 /* evfsctuf */
1281 HELPER_SPE_VECTOR_CONV(fsctuf);
1282
1283 /* Single-precision floating-point arithmetic */
1284 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1285 {
1286     CPU_FloatU u1, u2;
1287
1288     u1.l = op1;
1289     u2.l = op2;
1290     u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1291     return u1.l;
1292 }
1293
1294 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1295 {
1296     CPU_FloatU u1, u2;
1297
1298     u1.l = op1;
1299     u2.l = op2;
1300     u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1301     return u1.l;
1302 }
1303
1304 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1305 {
1306     CPU_FloatU u1, u2;
1307
1308     u1.l = op1;
1309     u2.l = op2;
1310     u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1311     return u1.l;
1312 }
1313
1314 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1315 {
1316     CPU_FloatU u1, u2;
1317
1318     u1.l = op1;
1319     u2.l = op2;
1320     u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1321     return u1.l;
1322 }
1323
1324 #define HELPER_SPE_SINGLE_ARITH(name)                                   \
1325     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1326     {                                                                   \
1327         return e##name(env, op1, op2);                                  \
1328     }
1329 /* efsadd */
1330 HELPER_SPE_SINGLE_ARITH(fsadd);
1331 /* efssub */
1332 HELPER_SPE_SINGLE_ARITH(fssub);
1333 /* efsmul */
1334 HELPER_SPE_SINGLE_ARITH(fsmul);
1335 /* efsdiv */
1336 HELPER_SPE_SINGLE_ARITH(fsdiv);
1337
1338 #define HELPER_SPE_VECTOR_ARITH(name)                                   \
1339     uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1340     {                                                                   \
1341         return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1342             (uint64_t)e##name(env, op1, op2);                           \
1343     }
1344 /* evfsadd */
1345 HELPER_SPE_VECTOR_ARITH(fsadd);
1346 /* evfssub */
1347 HELPER_SPE_VECTOR_ARITH(fssub);
1348 /* evfsmul */
1349 HELPER_SPE_VECTOR_ARITH(fsmul);
1350 /* evfsdiv */
1351 HELPER_SPE_VECTOR_ARITH(fsdiv);
1352
1353 /* Single-precision floating-point comparisons */
1354 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1355 {
1356     CPU_FloatU u1, u2;
1357
1358     u1.l = op1;
1359     u2.l = op2;
1360     return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1361 }
1362
1363 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1364 {
1365     CPU_FloatU u1, u2;
1366
1367     u1.l = op1;
1368     u2.l = op2;
1369     return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1370 }
1371
1372 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1373 {
1374     CPU_FloatU u1, u2;
1375
1376     u1.l = op1;
1377     u2.l = op2;
1378     return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1379 }
1380
1381 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1382 {
1383     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1384     return efscmplt(env, op1, op2);
1385 }
1386
1387 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1388 {
1389     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1390     return efscmpgt(env, op1, op2);
1391 }
1392
1393 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1394 {
1395     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1396     return efscmpeq(env, op1, op2);
1397 }
1398
1399 #define HELPER_SINGLE_SPE_CMP(name)                                     \
1400     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1401     {                                                                   \
1402         return e##name(env, op1, op2) << 2;                             \
1403     }
1404 /* efststlt */
1405 HELPER_SINGLE_SPE_CMP(fststlt);
1406 /* efststgt */
1407 HELPER_SINGLE_SPE_CMP(fststgt);
1408 /* efststeq */
1409 HELPER_SINGLE_SPE_CMP(fststeq);
1410 /* efscmplt */
1411 HELPER_SINGLE_SPE_CMP(fscmplt);
1412 /* efscmpgt */
1413 HELPER_SINGLE_SPE_CMP(fscmpgt);
1414 /* efscmpeq */
1415 HELPER_SINGLE_SPE_CMP(fscmpeq);
1416
1417 static inline uint32_t evcmp_merge(int t0, int t1)
1418 {
1419     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1420 }
1421
1422 #define HELPER_VECTOR_SPE_CMP(name)                                     \
1423     uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1424     {                                                                   \
1425         return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1426                            e##name(env, op1, op2));                     \
1427     }
1428 /* evfststlt */
1429 HELPER_VECTOR_SPE_CMP(fststlt);
1430 /* evfststgt */
1431 HELPER_VECTOR_SPE_CMP(fststgt);
1432 /* evfststeq */
1433 HELPER_VECTOR_SPE_CMP(fststeq);
1434 /* evfscmplt */
1435 HELPER_VECTOR_SPE_CMP(fscmplt);
1436 /* evfscmpgt */
1437 HELPER_VECTOR_SPE_CMP(fscmpgt);
1438 /* evfscmpeq */
1439 HELPER_VECTOR_SPE_CMP(fscmpeq);
1440
1441 /* Double-precision floating-point conversion */
1442 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1443 {
1444     CPU_DoubleU u;
1445
1446     u.d = int32_to_float64(val, &env->vec_status);
1447
1448     return u.ll;
1449 }
1450
1451 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1452 {
1453     CPU_DoubleU u;
1454
1455     u.d = int64_to_float64(val, &env->vec_status);
1456
1457     return u.ll;
1458 }
1459
1460 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1461 {
1462     CPU_DoubleU u;
1463
1464     u.d = uint32_to_float64(val, &env->vec_status);
1465
1466     return u.ll;
1467 }
1468
1469 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1470 {
1471     CPU_DoubleU u;
1472
1473     u.d = uint64_to_float64(val, &env->vec_status);
1474
1475     return u.ll;
1476 }
1477
1478 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1479 {
1480     CPU_DoubleU u;
1481
1482     u.ll = val;
1483     /* NaN are not treated the same way IEEE 754 does */
1484     if (unlikely(float64_is_any_nan(u.d))) {
1485         return 0;
1486     }
1487
1488     return float64_to_int32(u.d, &env->vec_status);
1489 }
1490
1491 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1492 {
1493     CPU_DoubleU u;
1494
1495     u.ll = val;
1496     /* NaN are not treated the same way IEEE 754 does */
1497     if (unlikely(float64_is_any_nan(u.d))) {
1498         return 0;
1499     }
1500
1501     return float64_to_uint32(u.d, &env->vec_status);
1502 }
1503
1504 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1505 {
1506     CPU_DoubleU u;
1507
1508     u.ll = val;
1509     /* NaN are not treated the same way IEEE 754 does */
1510     if (unlikely(float64_is_any_nan(u.d))) {
1511         return 0;
1512     }
1513
1514     return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1515 }
1516
1517 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1518 {
1519     CPU_DoubleU u;
1520
1521     u.ll = val;
1522     /* NaN are not treated the same way IEEE 754 does */
1523     if (unlikely(float64_is_any_nan(u.d))) {
1524         return 0;
1525     }
1526
1527     return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1528 }
1529
1530 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1531 {
1532     CPU_DoubleU u;
1533
1534     u.ll = val;
1535     /* NaN are not treated the same way IEEE 754 does */
1536     if (unlikely(float64_is_any_nan(u.d))) {
1537         return 0;
1538     }
1539
1540     return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1541 }
1542
1543 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1544 {
1545     CPU_DoubleU u;
1546
1547     u.ll = val;
1548     /* NaN are not treated the same way IEEE 754 does */
1549     if (unlikely(float64_is_any_nan(u.d))) {
1550         return 0;
1551     }
1552
1553     return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1554 }
1555
1556 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1557 {
1558     CPU_DoubleU u;
1559     float64 tmp;
1560
1561     u.d = int32_to_float64(val, &env->vec_status);
1562     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1563     u.d = float64_div(u.d, tmp, &env->vec_status);
1564
1565     return u.ll;
1566 }
1567
1568 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1569 {
1570     CPU_DoubleU u;
1571     float64 tmp;
1572
1573     u.d = uint32_to_float64(val, &env->vec_status);
1574     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1575     u.d = float64_div(u.d, tmp, &env->vec_status);
1576
1577     return u.ll;
1578 }
1579
1580 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1581 {
1582     CPU_DoubleU u;
1583     float64 tmp;
1584
1585     u.ll = val;
1586     /* NaN are not treated the same way IEEE 754 does */
1587     if (unlikely(float64_is_any_nan(u.d))) {
1588         return 0;
1589     }
1590     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1591     u.d = float64_mul(u.d, tmp, &env->vec_status);
1592
1593     return float64_to_int32(u.d, &env->vec_status);
1594 }
1595
1596 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1597 {
1598     CPU_DoubleU u;
1599     float64 tmp;
1600
1601     u.ll = val;
1602     /* NaN are not treated the same way IEEE 754 does */
1603     if (unlikely(float64_is_any_nan(u.d))) {
1604         return 0;
1605     }
1606     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1607     u.d = float64_mul(u.d, tmp, &env->vec_status);
1608
1609     return float64_to_uint32(u.d, &env->vec_status);
1610 }
1611
1612 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1613 {
1614     CPU_DoubleU u1;
1615     CPU_FloatU u2;
1616
1617     u1.ll = val;
1618     u2.f = float64_to_float32(u1.d, &env->vec_status);
1619
1620     return u2.l;
1621 }
1622
1623 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1624 {
1625     CPU_DoubleU u2;
1626     CPU_FloatU u1;
1627
1628     u1.l = val;
1629     u2.d = float32_to_float64(u1.f, &env->vec_status);
1630
1631     return u2.ll;
1632 }
1633
1634 /* Double precision fixed-point arithmetic */
1635 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1636 {
1637     CPU_DoubleU u1, u2;
1638
1639     u1.ll = op1;
1640     u2.ll = op2;
1641     u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1642     return u1.ll;
1643 }
1644
1645 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1646 {
1647     CPU_DoubleU u1, u2;
1648
1649     u1.ll = op1;
1650     u2.ll = op2;
1651     u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1652     return u1.ll;
1653 }
1654
1655 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1656 {
1657     CPU_DoubleU u1, u2;
1658
1659     u1.ll = op1;
1660     u2.ll = op2;
1661     u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1662     return u1.ll;
1663 }
1664
1665 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1666 {
1667     CPU_DoubleU u1, u2;
1668
1669     u1.ll = op1;
1670     u2.ll = op2;
1671     u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1672     return u1.ll;
1673 }
1674
1675 /* Double precision floating point helpers */
1676 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1677 {
1678     CPU_DoubleU u1, u2;
1679
1680     u1.ll = op1;
1681     u2.ll = op2;
1682     return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1683 }
1684
1685 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1686 {
1687     CPU_DoubleU u1, u2;
1688
1689     u1.ll = op1;
1690     u2.ll = op2;
1691     return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1692 }
1693
1694 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1695 {
1696     CPU_DoubleU u1, u2;
1697
1698     u1.ll = op1;
1699     u2.ll = op2;
1700     return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1701 }
1702
1703 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1704 {
1705     /* XXX: TODO: test special values (NaN, infinites, ...) */
1706     return helper_efdtstlt(env, op1, op2);
1707 }
1708
1709 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1710 {
1711     /* XXX: TODO: test special values (NaN, infinites, ...) */
1712     return helper_efdtstgt(env, op1, op2);
1713 }
1714
1715 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1716 {
1717     /* XXX: TODO: test special values (NaN, infinites, ...) */
1718     return helper_efdtsteq(env, op1, op2);
1719 }
1720
1721 #define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
1722     (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) |    \
1723      (((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
1724
1725 #define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
1726 #define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
1727 #define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
1728 #define xC(opcode) DECODE_SPLIT(opcode, 3, 1,  6, 5)
1729 #define BF(opcode) (((opcode) >> (31-8)) & 7)
1730
1731 typedef union _ppc_vsr_t {
1732     uint64_t u64[2];
1733     uint32_t u32[4];
1734     float32 f32[4];
1735     float64 f64[2];
1736 } ppc_vsr_t;
1737
1738 static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1739 {
1740     if (n < 32) {
1741         vsr->f64[0] = env->fpr[n];
1742         vsr->u64[1] = env->vsr[n];
1743     } else {
1744         vsr->u64[0] = env->avr[n-32].u64[0];
1745         vsr->u64[1] = env->avr[n-32].u64[1];
1746     }
1747 }
1748
1749 static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1750 {
1751     if (n < 32) {
1752         env->fpr[n] = vsr->f64[0];
1753         env->vsr[n] = vsr->u64[1];
1754     } else {
1755         env->avr[n-32].u64[0] = vsr->u64[0];
1756         env->avr[n-32].u64[1] = vsr->u64[1];
1757     }
1758 }
1759
1760 #define float64_to_float64(x, env) x
1761
1762
1763 /* VSX_ADD_SUB - VSX floating point add/subract
1764  *   name  - instruction mnemonic
1765  *   op    - operation (add or sub)
1766  *   nels  - number of elements (1, 2 or 4)
1767  *   tp    - type (float32 or float64)
1768  *   fld   - vsr_t field (f32 or f64)
1769  *   sfprf - set FPRF
1770  */
1771 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf)                          \
1772 void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1773 {                                                                            \
1774     ppc_vsr_t xt, xa, xb;                                                    \
1775     int i;                                                                   \
1776                                                                              \
1777     getVSR(xA(opcode), &xa, env);                                            \
1778     getVSR(xB(opcode), &xb, env);                                            \
1779     getVSR(xT(opcode), &xt, env);                                            \
1780     helper_reset_fpstatus(env);                                              \
1781                                                                              \
1782     for (i = 0; i < nels; i++) {                                             \
1783         float_status tstat = env->fp_status;                                 \
1784         set_float_exception_flags(0, &tstat);                                \
1785         xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &tstat);                 \
1786         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1787                                                                              \
1788         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1789             if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) {\
1790                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1791             } else if (tp##_is_signaling_nan(xa.fld[i]) ||                   \
1792                        tp##_is_signaling_nan(xb.fld[i])) {                   \
1793                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1794             }                                                                \
1795         }                                                                    \
1796                                                                              \
1797         if (sfprf) {                                                         \
1798             helper_compute_fprf(env, xt.fld[i], sfprf);                      \
1799         }                                                                    \
1800     }                                                                        \
1801     putVSR(xT(opcode), &xt, env);                                            \
1802     helper_float_check_status(env);                                          \
1803 }
1804
1805 VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1)
1806 VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0)
1807 VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0)
1808 VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1)
1809 VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0)
1810 VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0)
1811
1812 /* VSX_MUL - VSX floating point multiply
1813  *   op    - instruction mnemonic
1814  *   nels  - number of elements (1, 2 or 4)
1815  *   tp    - type (float32 or float64)
1816  *   fld   - vsr_t field (f32 or f64)
1817  *   sfprf - set FPRF
1818  */
1819 #define VSX_MUL(op, nels, tp, fld, sfprf)                                    \
1820 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1821 {                                                                            \
1822     ppc_vsr_t xt, xa, xb;                                                    \
1823     int i;                                                                   \
1824                                                                              \
1825     getVSR(xA(opcode), &xa, env);                                            \
1826     getVSR(xB(opcode), &xb, env);                                            \
1827     getVSR(xT(opcode), &xt, env);                                            \
1828     helper_reset_fpstatus(env);                                              \
1829                                                                              \
1830     for (i = 0; i < nels; i++) {                                             \
1831         float_status tstat = env->fp_status;                                 \
1832         set_float_exception_flags(0, &tstat);                                \
1833         xt.fld[i] = tp##_mul(xa.fld[i], xb.fld[i], &tstat);                  \
1834         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1835                                                                              \
1836         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1837             if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(xb.fld[i])) ||  \
1838                 (tp##_is_infinity(xb.fld[i]) && tp##_is_zero(xa.fld[i]))) {  \
1839                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1840             } else if (tp##_is_signaling_nan(xa.fld[i]) ||                   \
1841                        tp##_is_signaling_nan(xb.fld[i])) {                   \
1842                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1843             }                                                                \
1844         }                                                                    \
1845                                                                              \
1846         if (sfprf) {                                                         \
1847             helper_compute_fprf(env, xt.fld[i], sfprf);                      \
1848         }                                                                    \
1849     }                                                                        \
1850                                                                              \
1851     putVSR(xT(opcode), &xt, env);                                            \
1852     helper_float_check_status(env);                                          \
1853 }
1854
1855 VSX_MUL(xsmuldp, 1, float64, f64, 1)
1856 VSX_MUL(xvmuldp, 2, float64, f64, 0)
1857 VSX_MUL(xvmulsp, 4, float32, f32, 0)
1858
1859 /* VSX_DIV - VSX floating point divide
1860  *   op    - instruction mnemonic
1861  *   nels  - number of elements (1, 2 or 4)
1862  *   tp    - type (float32 or float64)
1863  *   fld   - vsr_t field (f32 or f64)
1864  *   sfprf - set FPRF
1865  */
1866 #define VSX_DIV(op, nels, tp, fld, sfprf)                                     \
1867 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1868 {                                                                             \
1869     ppc_vsr_t xt, xa, xb;                                                     \
1870     int i;                                                                    \
1871                                                                               \
1872     getVSR(xA(opcode), &xa, env);                                             \
1873     getVSR(xB(opcode), &xb, env);                                             \
1874     getVSR(xT(opcode), &xt, env);                                             \
1875     helper_reset_fpstatus(env);                                               \
1876                                                                               \
1877     for (i = 0; i < nels; i++) {                                              \
1878         float_status tstat = env->fp_status;                                  \
1879         set_float_exception_flags(0, &tstat);                                 \
1880         xt.fld[i] = tp##_div(xa.fld[i], xb.fld[i], &tstat);                   \
1881         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1882                                                                               \
1883         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1884             if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) { \
1885                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1886             } else if (tp##_is_zero(xa.fld[i]) &&                             \
1887                 tp##_is_zero(xb.fld[i])) {                                    \
1888                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1889             } else if (tp##_is_signaling_nan(xa.fld[i]) ||                    \
1890                 tp##_is_signaling_nan(xb.fld[i])) {                           \
1891                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1892             }                                                                 \
1893         }                                                                     \
1894                                                                               \
1895         if (sfprf) {                                                          \
1896             helper_compute_fprf(env, xt.fld[i], sfprf);                       \
1897         }                                                                     \
1898     }                                                                         \
1899                                                                               \
1900     putVSR(xT(opcode), &xt, env);                                             \
1901     helper_float_check_status(env);                                           \
1902 }
1903
1904 VSX_DIV(xsdivdp, 1, float64, f64, 1)
1905 VSX_DIV(xvdivdp, 2, float64, f64, 0)
1906 VSX_DIV(xvdivsp, 4, float32, f32, 0)
1907
1908 /* VSX_RE  - VSX floating point reciprocal estimate
1909  *   op    - instruction mnemonic
1910  *   nels  - number of elements (1, 2 or 4)
1911  *   tp    - type (float32 or float64)
1912  *   fld   - vsr_t field (f32 or f64)
1913  *   sfprf - set FPRF
1914  */
1915 #define VSX_RE(op, nels, tp, fld, sfprf)                                      \
1916 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1917 {                                                                             \
1918     ppc_vsr_t xt, xb;                                                         \
1919     int i;                                                                    \
1920                                                                               \
1921     getVSR(xB(opcode), &xb, env);                                             \
1922     getVSR(xT(opcode), &xt, env);                                             \
1923     helper_reset_fpstatus(env);                                               \
1924                                                                               \
1925     for (i = 0; i < nels; i++) {                                              \
1926         if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) {                     \
1927                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1928         }                                                                     \
1929         xt.fld[i] = tp##_div(tp##_one, xb.fld[i], &env->fp_status);           \
1930         if (sfprf) {                                                          \
1931             helper_compute_fprf(env, xt.fld[0], sfprf);                       \
1932         }                                                                     \
1933     }                                                                         \
1934                                                                               \
1935     putVSR(xT(opcode), &xt, env);                                             \
1936     helper_float_check_status(env);                                           \
1937 }
1938
1939 VSX_RE(xsredp, 1, float64, f64, 1)
1940 VSX_RE(xvredp, 2, float64, f64, 0)
1941 VSX_RE(xvresp, 4, float32, f32, 0)
1942
1943 /* VSX_SQRT - VSX floating point square root
1944  *   op    - instruction mnemonic
1945  *   nels  - number of elements (1, 2 or 4)
1946  *   tp    - type (float32 or float64)
1947  *   fld   - vsr_t field (f32 or f64)
1948  *   sfprf - set FPRF
1949  */
1950 #define VSX_SQRT(op, nels, tp, fld, sfprf)                                   \
1951 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1952 {                                                                            \
1953     ppc_vsr_t xt, xb;                                                        \
1954     int i;                                                                   \
1955                                                                              \
1956     getVSR(xB(opcode), &xb, env);                                            \
1957     getVSR(xT(opcode), &xt, env);                                            \
1958     helper_reset_fpstatus(env);                                              \
1959                                                                              \
1960     for (i = 0; i < nels; i++) {                                             \
1961         float_status tstat = env->fp_status;                                 \
1962         set_float_exception_flags(0, &tstat);                                \
1963         xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat);                            \
1964         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1965                                                                              \
1966         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1967             if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) {        \
1968                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
1969             } else if (tp##_is_signaling_nan(xb.fld[i])) {                   \
1970                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1971             }                                                                \
1972         }                                                                    \
1973                                                                              \
1974         if (sfprf) {                                                         \
1975             helper_compute_fprf(env, xt.fld[i], sfprf);                      \
1976         }                                                                    \
1977     }                                                                        \
1978                                                                              \
1979     putVSR(xT(opcode), &xt, env);                                            \
1980     helper_float_check_status(env);                                          \
1981 }
1982
1983 VSX_SQRT(xssqrtdp, 1, float64, f64, 1)
1984 VSX_SQRT(xvsqrtdp, 2, float64, f64, 0)
1985 VSX_SQRT(xvsqrtsp, 4, float32, f32, 0)
1986
1987 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
1988  *   op    - instruction mnemonic
1989  *   nels  - number of elements (1, 2 or 4)
1990  *   tp    - type (float32 or float64)
1991  *   fld   - vsr_t field (f32 or f64)
1992  *   sfprf - set FPRF
1993  */
1994 #define VSX_RSQRTE(op, nels, tp, fld, sfprf)                                 \
1995 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1996 {                                                                            \
1997     ppc_vsr_t xt, xb;                                                        \
1998     int i;                                                                   \
1999                                                                              \
2000     getVSR(xB(opcode), &xb, env);                                            \
2001     getVSR(xT(opcode), &xt, env);                                            \
2002     helper_reset_fpstatus(env);                                              \
2003                                                                              \
2004     for (i = 0; i < nels; i++) {                                             \
2005         float_status tstat = env->fp_status;                                 \
2006         set_float_exception_flags(0, &tstat);                                \
2007         xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat);                            \
2008         xt.fld[i] = tp##_div(tp##_one, xt.fld[i], &tstat);                   \
2009         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2010                                                                              \
2011         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2012             if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) {        \
2013                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2014             } else if (tp##_is_signaling_nan(xb.fld[i])) {                   \
2015                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2016             }                                                                \
2017         }                                                                    \
2018                                                                              \
2019         if (sfprf) {                                                         \
2020             helper_compute_fprf(env, xt.fld[i], sfprf);                      \
2021         }                                                                    \
2022     }                                                                        \
2023                                                                              \
2024     putVSR(xT(opcode), &xt, env);                                            \
2025     helper_float_check_status(env);                                          \
2026 }
2027
2028 VSX_RSQRTE(xsrsqrtedp, 1, float64, f64, 1)
2029 VSX_RSQRTE(xvrsqrtedp, 2, float64, f64, 0)
2030 VSX_RSQRTE(xvrsqrtesp, 4, float32, f32, 0)
2031
2032 static inline int ppc_float32_get_unbiased_exp(float32 f)
2033 {
2034     return ((f >> 23) & 0xFF) - 127;
2035 }
2036
2037 static inline int ppc_float64_get_unbiased_exp(float64 f)
2038 {
2039     return ((f >> 52) & 0x7FF) - 1023;
2040 }
2041
2042 /* VSX_TDIV - VSX floating point test for divide
2043  *   op    - instruction mnemonic
2044  *   nels  - number of elements (1, 2 or 4)
2045  *   tp    - type (float32 or float64)
2046  *   fld   - vsr_t field (f32 or f64)
2047  *   emin  - minimum unbiased exponent
2048  *   emax  - maximum unbiased exponent
2049  *   nbits - number of fraction bits
2050  */
2051 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2052 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2053 {                                                                       \
2054     ppc_vsr_t xa, xb;                                                   \
2055     int i;                                                              \
2056     int fe_flag = 0;                                                    \
2057     int fg_flag = 0;                                                    \
2058                                                                         \
2059     getVSR(xA(opcode), &xa, env);                                       \
2060     getVSR(xB(opcode), &xb, env);                                       \
2061                                                                         \
2062     for (i = 0; i < nels; i++) {                                        \
2063         if (unlikely(tp##_is_infinity(xa.fld[i]) ||                     \
2064                      tp##_is_infinity(xb.fld[i]) ||                     \
2065                      tp##_is_zero(xb.fld[i]))) {                        \
2066             fe_flag = 1;                                                \
2067             fg_flag = 1;                                                \
2068         } else {                                                        \
2069             int e_a = ppc_##tp##_get_unbiased_exp(xa.fld[i]);           \
2070             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]);           \
2071                                                                         \
2072             if (unlikely(tp##_is_any_nan(xa.fld[i]) ||                  \
2073                          tp##_is_any_nan(xb.fld[i]))) {                 \
2074                 fe_flag = 1;                                            \
2075             } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2076                 fe_flag = 1;                                            \
2077             } else if (!tp##_is_zero(xa.fld[i]) &&                      \
2078                        (((e_a - e_b) >= emax) ||                        \
2079                         ((e_a - e_b) <= (emin+1)) ||                    \
2080                          (e_a <= (emin+nbits)))) {                      \
2081                 fe_flag = 1;                                            \
2082             }                                                           \
2083                                                                         \
2084             if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) {        \
2085                 /* XB is not zero because of the above check and */     \
2086                 /* so must be denormalized.                      */     \
2087                 fg_flag = 1;                                            \
2088             }                                                           \
2089         }                                                               \
2090     }                                                                   \
2091                                                                         \
2092     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2093 }
2094
2095 VSX_TDIV(xstdivdp, 1, float64, f64, -1022, 1023, 52)
2096 VSX_TDIV(xvtdivdp, 2, float64, f64, -1022, 1023, 52)
2097 VSX_TDIV(xvtdivsp, 4, float32, f32, -126, 127, 23)
2098
2099 /* VSX_TSQRT - VSX floating point test for square root
2100  *   op    - instruction mnemonic
2101  *   nels  - number of elements (1, 2 or 4)
2102  *   tp    - type (float32 or float64)
2103  *   fld   - vsr_t field (f32 or f64)
2104  *   emin  - minimum unbiased exponent
2105  *   emax  - maximum unbiased exponent
2106  *   nbits - number of fraction bits
2107  */
2108 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2109 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2110 {                                                                       \
2111     ppc_vsr_t xa, xb;                                                   \
2112     int i;                                                              \
2113     int fe_flag = 0;                                                    \
2114     int fg_flag = 0;                                                    \
2115                                                                         \
2116     getVSR(xA(opcode), &xa, env);                                       \
2117     getVSR(xB(opcode), &xb, env);                                       \
2118                                                                         \
2119     for (i = 0; i < nels; i++) {                                        \
2120         if (unlikely(tp##_is_infinity(xb.fld[i]) ||                     \
2121                      tp##_is_zero(xb.fld[i]))) {                        \
2122             fe_flag = 1;                                                \
2123             fg_flag = 1;                                                \
2124         } else {                                                        \
2125             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]);           \
2126                                                                         \
2127             if (unlikely(tp##_is_any_nan(xb.fld[i]))) {                 \
2128                 fe_flag = 1;                                            \
2129             } else if (unlikely(tp##_is_zero(xb.fld[i]))) {             \
2130                 fe_flag = 1;                                            \
2131             } else if (unlikely(tp##_is_neg(xb.fld[i]))) {              \
2132                 fe_flag = 1;                                            \
2133             } else if (!tp##_is_zero(xb.fld[i]) &&                      \
2134                       (e_b <= (emin+nbits))) {                          \
2135                 fe_flag = 1;                                            \
2136             }                                                           \
2137                                                                         \
2138             if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) {        \
2139                 /* XB is not zero because of the above check and */     \
2140                 /* therefore must be denormalized.               */     \
2141                 fg_flag = 1;                                            \
2142             }                                                           \
2143         }                                                               \
2144     }                                                                   \
2145                                                                         \
2146     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2147 }
2148
2149 VSX_TSQRT(xstsqrtdp, 1, float64, f64, -1022, 52)
2150 VSX_TSQRT(xvtsqrtdp, 2, float64, f64, -1022, 52)
2151 VSX_TSQRT(xvtsqrtsp, 4, float32, f32, -126, 23)
2152
2153 /* VSX_MADD - VSX floating point muliply/add variations
2154  *   op    - instruction mnemonic
2155  *   nels  - number of elements (1, 2 or 4)
2156  *   tp    - type (float32 or float64)
2157  *   fld   - vsr_t field (f32 or f64)
2158  *   maddflgs - flags for the float*muladd routine that control the
2159  *           various forms (madd, msub, nmadd, nmsub)
2160  *   afrm  - A form (1=A, 0=M)
2161  *   sfprf - set FPRF
2162  */
2163 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf)                    \
2164 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2165 {                                                                             \
2166     ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2167     ppc_vsr_t *b, *c;                                                         \
2168     int i;                                                                    \
2169                                                                               \
2170     if (afrm) { /* AxB + T */                                                 \
2171         b = &xb;                                                              \
2172         c = &xt_in;                                                           \
2173     } else { /* AxT + B */                                                    \
2174         b = &xt_in;                                                           \
2175         c = &xb;                                                              \
2176     }                                                                         \
2177                                                                               \
2178     getVSR(xA(opcode), &xa, env);                                             \
2179     getVSR(xB(opcode), &xb, env);                                             \
2180     getVSR(xT(opcode), &xt_in, env);                                          \
2181                                                                               \
2182     xt_out = xt_in;                                                           \
2183                                                                               \
2184     helper_reset_fpstatus(env);                                               \
2185                                                                               \
2186     for (i = 0; i < nels; i++) {                                              \
2187         float_status tstat = env->fp_status;                                  \
2188         set_float_exception_flags(0, &tstat);                                 \
2189         xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i],          \
2190                                      maddflgs, &tstat);                       \
2191         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2192                                                                               \
2193         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2194             if (tp##_is_signaling_nan(xa.fld[i]) ||                           \
2195                 tp##_is_signaling_nan(b->fld[i]) ||                           \
2196                 tp##_is_signaling_nan(c->fld[i])) {                           \
2197                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
2198                 tstat.float_exception_flags &= ~float_flag_invalid;           \
2199             }                                                                 \
2200             if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(b->fld[i])) ||   \
2201                 (tp##_is_zero(xa.fld[i]) && tp##_is_infinity(b->fld[i]))) {   \
2202                 xt_out.fld[i] = float64_to_##tp(fload_invalid_op_excp(env,    \
2203                     POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status);          \
2204                 tstat.float_exception_flags &= ~float_flag_invalid;           \
2205             }                                                                 \
2206             if ((tstat.float_exception_flags & float_flag_invalid) &&         \
2207                 ((tp##_is_infinity(xa.fld[i]) ||                              \
2208                   tp##_is_infinity(b->fld[i])) &&                             \
2209                   tp##_is_infinity(c->fld[i]))) {                             \
2210                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);     \
2211             }                                                                 \
2212         }                                                                     \
2213         if (sfprf) {                                                          \
2214             helper_compute_fprf(env, xt_out.fld[i], sfprf);                   \
2215         }                                                                     \
2216     }                                                                         \
2217     putVSR(xT(opcode), &xt_out, env);                                         \
2218     helper_float_check_status(env);                                           \
2219 }
2220
2221 #define MADD_FLGS 0
2222 #define MSUB_FLGS float_muladd_negate_c
2223 #define NMADD_FLGS float_muladd_negate_result
2224 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
2225
2226 VSX_MADD(xsmaddadp, 1, float64, f64, MADD_FLGS, 1, 1)
2227 VSX_MADD(xsmaddmdp, 1, float64, f64, MADD_FLGS, 0, 1)
2228 VSX_MADD(xsmsubadp, 1, float64, f64, MSUB_FLGS, 1, 1)
2229 VSX_MADD(xsmsubmdp, 1, float64, f64, MSUB_FLGS, 0, 1)
2230 VSX_MADD(xsnmaddadp, 1, float64, f64, NMADD_FLGS, 1, 1)
2231 VSX_MADD(xsnmaddmdp, 1, float64, f64, NMADD_FLGS, 0, 1)
2232 VSX_MADD(xsnmsubadp, 1, float64, f64, NMSUB_FLGS, 1, 1)
2233 VSX_MADD(xsnmsubmdp, 1, float64, f64, NMSUB_FLGS, 0, 1)
2234
2235 VSX_MADD(xvmaddadp, 2, float64, f64, MADD_FLGS, 1, 0)
2236 VSX_MADD(xvmaddmdp, 2, float64, f64, MADD_FLGS, 0, 0)
2237 VSX_MADD(xvmsubadp, 2, float64, f64, MSUB_FLGS, 1, 0)
2238 VSX_MADD(xvmsubmdp, 2, float64, f64, MSUB_FLGS, 0, 0)
2239 VSX_MADD(xvnmaddadp, 2, float64, f64, NMADD_FLGS, 1, 0)
2240 VSX_MADD(xvnmaddmdp, 2, float64, f64, NMADD_FLGS, 0, 0)
2241 VSX_MADD(xvnmsubadp, 2, float64, f64, NMSUB_FLGS, 1, 0)
2242 VSX_MADD(xvnmsubmdp, 2, float64, f64, NMSUB_FLGS, 0, 0)
2243
2244 VSX_MADD(xvmaddasp, 4, float32, f32, MADD_FLGS, 1, 0)
2245 VSX_MADD(xvmaddmsp, 4, float32, f32, MADD_FLGS, 0, 0)
2246 VSX_MADD(xvmsubasp, 4, float32, f32, MSUB_FLGS, 1, 0)
2247 VSX_MADD(xvmsubmsp, 4, float32, f32, MSUB_FLGS, 0, 0)
2248 VSX_MADD(xvnmaddasp, 4, float32, f32, NMADD_FLGS, 1, 0)
2249 VSX_MADD(xvnmaddmsp, 4, float32, f32, NMADD_FLGS, 0, 0)
2250 VSX_MADD(xvnmsubasp, 4, float32, f32, NMSUB_FLGS, 1, 0)
2251 VSX_MADD(xvnmsubmsp, 4, float32, f32, NMSUB_FLGS, 0, 0)
2252
2253 #define VSX_SCALAR_CMP(op, ordered)                                      \
2254 void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2255 {                                                                        \
2256     ppc_vsr_t xa, xb;                                                    \
2257     uint32_t cc = 0;                                                     \
2258                                                                          \
2259     getVSR(xA(opcode), &xa, env);                                        \
2260     getVSR(xB(opcode), &xb, env);                                        \
2261                                                                          \
2262     if (unlikely(float64_is_any_nan(xa.f64[0]) ||                        \
2263                  float64_is_any_nan(xb.f64[0]))) {                       \
2264         if (float64_is_signaling_nan(xa.f64[0]) ||                       \
2265             float64_is_signaling_nan(xb.f64[0])) {                       \
2266             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2267         }                                                                \
2268         if (ordered) {                                                   \
2269             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);         \
2270         }                                                                \
2271         cc = 1;                                                          \
2272     } else {                                                             \
2273         if (float64_lt(xa.f64[0], xb.f64[0], &env->fp_status)) {         \
2274             cc = 8;                                                      \
2275         } else if (!float64_le(xa.f64[0], xb.f64[0], &env->fp_status)) { \
2276             cc = 4;                                                      \
2277         } else {                                                         \
2278             cc = 2;                                                      \
2279         }                                                                \
2280     }                                                                    \
2281                                                                          \
2282     env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2283     env->fpscr |= cc << FPSCR_FPRF;                                      \
2284     env->crf[BF(opcode)] = cc;                                           \
2285                                                                          \
2286     helper_float_check_status(env);                                      \
2287 }
2288
2289 VSX_SCALAR_CMP(xscmpodp, 1)
2290 VSX_SCALAR_CMP(xscmpudp, 0)
2291
2292 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ul)
2293 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
2294
2295 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2296  *   name  - instruction mnemonic
2297  *   op    - operation (max or min)
2298  *   nels  - number of elements (1, 2 or 4)
2299  *   tp    - type (float32 or float64)
2300  *   fld   - vsr_t field (f32 or f64)
2301  */
2302 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2303 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2304 {                                                                             \
2305     ppc_vsr_t xt, xa, xb;                                                     \
2306     int i;                                                                    \
2307                                                                               \
2308     getVSR(xA(opcode), &xa, env);                                             \
2309     getVSR(xB(opcode), &xb, env);                                             \
2310     getVSR(xT(opcode), &xt, env);                                             \
2311                                                                               \
2312     for (i = 0; i < nels; i++) {                                              \
2313         xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &env->fp_status);         \
2314         if (unlikely(tp##_is_signaling_nan(xa.fld[i]) ||                      \
2315                      tp##_is_signaling_nan(xb.fld[i]))) {                     \
2316             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2317         }                                                                     \
2318     }                                                                         \
2319                                                                               \
2320     putVSR(xT(opcode), &xt, env);                                             \
2321     helper_float_check_status(env);                                           \
2322 }
2323
2324 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, f64)
2325 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, f64)
2326 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, f32)
2327 VSX_MAX_MIN(xsmindp, minnum, 1, float64, f64)
2328 VSX_MAX_MIN(xvmindp, minnum, 2, float64, f64)
2329 VSX_MAX_MIN(xvminsp, minnum, 4, float32, f32)
This page took 0.156203 seconds and 4 git commands to generate.