2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /*****************************************************************************/
23 /* Floating point operations helpers */
24 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
30 d.d = float32_to_float64(f.f, &env->fp_status);
34 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
40 f.f = float64_to_float32(d.d, &env->fp_status);
44 static inline int isden(float64 d)
50 return ((u.ll >> 52) & 0x7FF) == 0;
53 uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf)
60 isneg = float64_is_neg(farg.d);
61 if (unlikely(float64_is_any_nan(farg.d))) {
62 if (float64_is_signaling_nan(farg.d)) {
63 /* Signaling NaN: flags are undefined */
69 } else if (unlikely(float64_is_infinity(farg.d))) {
77 if (float64_is_zero(farg.d)) {
86 /* Denormalized numbers */
89 /* Normalized numbers */
100 /* We update FPSCR_FPRF */
101 env->fpscr &= ~(0x1F << FPSCR_FPRF);
102 env->fpscr |= ret << FPSCR_FPRF;
104 /* We just need fpcc to update Rc1 */
108 /* Floating-point invalid operations exception */
109 static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
117 case POWERPC_EXCP_FP_VXSNAN:
118 env->fpscr |= 1 << FPSCR_VXSNAN;
120 case POWERPC_EXCP_FP_VXSOFT:
121 env->fpscr |= 1 << FPSCR_VXSOFT;
123 case POWERPC_EXCP_FP_VXISI:
124 /* Magnitude subtraction of infinities */
125 env->fpscr |= 1 << FPSCR_VXISI;
127 case POWERPC_EXCP_FP_VXIDI:
128 /* Division of infinity by infinity */
129 env->fpscr |= 1 << FPSCR_VXIDI;
131 case POWERPC_EXCP_FP_VXZDZ:
132 /* Division of zero by zero */
133 env->fpscr |= 1 << FPSCR_VXZDZ;
135 case POWERPC_EXCP_FP_VXIMZ:
136 /* Multiplication of zero by infinity */
137 env->fpscr |= 1 << FPSCR_VXIMZ;
139 case POWERPC_EXCP_FP_VXVC:
140 /* Ordered comparison of NaN */
141 env->fpscr |= 1 << FPSCR_VXVC;
143 env->fpscr &= ~(0xF << FPSCR_FPCC);
144 env->fpscr |= 0x11 << FPSCR_FPCC;
146 /* We must update the target FPR before raising the exception */
148 env->exception_index = POWERPC_EXCP_PROGRAM;
149 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
150 /* Update the floating-point enabled exception summary */
151 env->fpscr |= 1 << FPSCR_FEX;
152 /* Exception is differed */
156 case POWERPC_EXCP_FP_VXSQRT:
157 /* Square root of a negative number */
158 env->fpscr |= 1 << FPSCR_VXSQRT;
160 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
162 /* Set the result to quiet NaN */
163 ret = 0x7FF8000000000000ULL;
165 env->fpscr &= ~(0xF << FPSCR_FPCC);
166 env->fpscr |= 0x11 << FPSCR_FPCC;
170 case POWERPC_EXCP_FP_VXCVI:
171 /* Invalid conversion */
172 env->fpscr |= 1 << FPSCR_VXCVI;
173 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
175 /* Set the result to quiet NaN */
176 ret = 0x7FF8000000000000ULL;
178 env->fpscr &= ~(0xF << FPSCR_FPCC);
179 env->fpscr |= 0x11 << FPSCR_FPCC;
184 /* Update the floating-point invalid operation summary */
185 env->fpscr |= 1 << FPSCR_VX;
186 /* Update the floating-point exception summary */
187 env->fpscr |= 1 << FPSCR_FX;
189 /* Update the floating-point enabled exception summary */
190 env->fpscr |= 1 << FPSCR_FEX;
191 if (msr_fe0 != 0 || msr_fe1 != 0) {
192 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
193 POWERPC_EXCP_FP | op);
199 static inline void float_zero_divide_excp(CPUPPCState *env)
201 env->fpscr |= 1 << FPSCR_ZX;
202 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
203 /* Update the floating-point exception summary */
204 env->fpscr |= 1 << FPSCR_FX;
206 /* Update the floating-point enabled exception summary */
207 env->fpscr |= 1 << FPSCR_FEX;
208 if (msr_fe0 != 0 || msr_fe1 != 0) {
209 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
210 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
215 static inline void float_overflow_excp(CPUPPCState *env)
217 env->fpscr |= 1 << FPSCR_OX;
218 /* Update the floating-point exception summary */
219 env->fpscr |= 1 << FPSCR_FX;
221 /* XXX: should adjust the result */
222 /* Update the floating-point enabled exception summary */
223 env->fpscr |= 1 << FPSCR_FEX;
224 /* We must update the target FPR before raising the exception */
225 env->exception_index = POWERPC_EXCP_PROGRAM;
226 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
228 env->fpscr |= 1 << FPSCR_XX;
229 env->fpscr |= 1 << FPSCR_FI;
233 static inline void float_underflow_excp(CPUPPCState *env)
235 env->fpscr |= 1 << FPSCR_UX;
236 /* Update the floating-point exception summary */
237 env->fpscr |= 1 << FPSCR_FX;
239 /* XXX: should adjust the result */
240 /* Update the floating-point enabled exception summary */
241 env->fpscr |= 1 << FPSCR_FEX;
242 /* We must update the target FPR before raising the exception */
243 env->exception_index = POWERPC_EXCP_PROGRAM;
244 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
248 static inline void float_inexact_excp(CPUPPCState *env)
250 env->fpscr |= 1 << FPSCR_XX;
251 /* Update the floating-point exception summary */
252 env->fpscr |= 1 << FPSCR_FX;
254 /* Update the floating-point enabled exception summary */
255 env->fpscr |= 1 << FPSCR_FEX;
256 /* We must update the target FPR before raising the exception */
257 env->exception_index = POWERPC_EXCP_PROGRAM;
258 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
262 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
266 /* Set rounding mode */
269 /* Best approximation (round to nearest) */
270 rnd_type = float_round_nearest_even;
273 /* Smaller magnitude (round toward zero) */
274 rnd_type = float_round_to_zero;
277 /* Round toward +infinite */
278 rnd_type = float_round_up;
282 /* Round toward -infinite */
283 rnd_type = float_round_down;
286 set_float_rounding_mode(rnd_type, &env->fp_status);
289 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
293 prev = (env->fpscr >> bit) & 1;
294 env->fpscr &= ~(1 << bit);
299 fpscr_set_rounding_mode(env);
307 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
311 prev = (env->fpscr >> bit) & 1;
312 env->fpscr |= 1 << bit;
316 env->fpscr |= 1 << FPSCR_FX;
322 env->fpscr |= 1 << FPSCR_FX;
328 env->fpscr |= 1 << FPSCR_FX;
334 env->fpscr |= 1 << FPSCR_FX;
340 env->fpscr |= 1 << FPSCR_FX;
354 env->fpscr |= 1 << FPSCR_VX;
355 env->fpscr |= 1 << FPSCR_FX;
363 env->error_code = POWERPC_EXCP_FP;
365 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
368 env->error_code |= POWERPC_EXCP_FP_VXISI;
371 env->error_code |= POWERPC_EXCP_FP_VXIDI;
374 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
377 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
380 env->error_code |= POWERPC_EXCP_FP_VXVC;
383 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
386 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
389 env->error_code |= POWERPC_EXCP_FP_VXCVI;
397 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
404 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
411 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
418 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
424 fpscr_set_rounding_mode(env);
429 /* Update the floating-point enabled exception summary */
430 env->fpscr |= 1 << FPSCR_FEX;
431 /* We have to update Rc1 before raising the exception */
432 env->exception_index = POWERPC_EXCP_PROGRAM;
438 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
440 target_ulong prev, new;
444 new = (target_ulong)arg;
445 new &= ~0x60000000LL;
446 new |= prev & 0x60000000LL;
447 for (i = 0; i < sizeof(target_ulong) * 2; i++) {
448 if (mask & (1 << i)) {
449 env->fpscr &= ~(0xFLL << (4 * i));
450 env->fpscr |= new & (0xFLL << (4 * i));
453 /* Update VX and FEX */
455 env->fpscr |= 1 << FPSCR_VX;
457 env->fpscr &= ~(1 << FPSCR_VX);
459 if ((fpscr_ex & fpscr_eex) != 0) {
460 env->fpscr |= 1 << FPSCR_FEX;
461 env->exception_index = POWERPC_EXCP_PROGRAM;
462 /* XXX: we should compute it properly */
463 env->error_code = POWERPC_EXCP_FP;
465 env->fpscr &= ~(1 << FPSCR_FEX);
467 fpscr_set_rounding_mode(env);
470 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
472 helper_store_fpscr(env, arg, mask);
475 void helper_float_check_status(CPUPPCState *env)
477 int status = get_float_exception_flags(&env->fp_status);
479 if (status & float_flag_divbyzero) {
480 float_zero_divide_excp(env);
481 } else if (status & float_flag_overflow) {
482 float_overflow_excp(env);
483 } else if (status & float_flag_underflow) {
484 float_underflow_excp(env);
485 } else if (status & float_flag_inexact) {
486 float_inexact_excp(env);
489 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
490 (env->error_code & POWERPC_EXCP_FP)) {
491 /* Differred floating-point exception after target FPR update */
492 if (msr_fe0 != 0 || msr_fe1 != 0) {
493 helper_raise_exception_err(env, env->exception_index,
499 void helper_reset_fpstatus(CPUPPCState *env)
501 set_float_exception_flags(0, &env->fp_status);
505 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
507 CPU_DoubleU farg1, farg2;
512 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
513 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
514 /* Magnitude subtraction of infinities */
515 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
517 if (unlikely(float64_is_signaling_nan(farg1.d) ||
518 float64_is_signaling_nan(farg2.d))) {
520 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
522 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
529 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
531 CPU_DoubleU farg1, farg2;
536 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
537 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
538 /* Magnitude subtraction of infinities */
539 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
541 if (unlikely(float64_is_signaling_nan(farg1.d) ||
542 float64_is_signaling_nan(farg2.d))) {
543 /* sNaN subtraction */
544 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
546 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
553 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
555 CPU_DoubleU farg1, farg2;
560 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
561 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
562 /* Multiplication of zero by infinity */
563 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
565 if (unlikely(float64_is_signaling_nan(farg1.d) ||
566 float64_is_signaling_nan(farg2.d))) {
567 /* sNaN multiplication */
568 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
570 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
577 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
579 CPU_DoubleU farg1, farg2;
584 if (unlikely(float64_is_infinity(farg1.d) &&
585 float64_is_infinity(farg2.d))) {
586 /* Division of infinity by infinity */
587 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
588 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
589 /* Division of zero by zero */
590 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
592 if (unlikely(float64_is_signaling_nan(farg1.d) ||
593 float64_is_signaling_nan(farg2.d))) {
595 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
597 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
604 uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg)
610 if (unlikely(float64_is_signaling_nan(farg.d))) {
611 /* sNaN conversion */
612 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
613 POWERPC_EXCP_FP_VXCVI, 1);
614 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
615 float64_is_infinity(farg.d))) {
616 /* qNan / infinity conversion */
617 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
619 farg.ll = float64_to_int32(farg.d, &env->fp_status);
620 /* XXX: higher bits are not supposed to be significant.
621 * to make tests easier, return the same as a real PowerPC 750
623 farg.ll |= 0xFFF80000ULL << 32;
628 /* fctiwz - fctiwz. */
629 uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg)
635 if (unlikely(float64_is_signaling_nan(farg.d))) {
636 /* sNaN conversion */
637 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
638 POWERPC_EXCP_FP_VXCVI, 1);
639 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
640 float64_is_infinity(farg.d))) {
641 /* qNan / infinity conversion */
642 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
644 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
645 /* XXX: higher bits are not supposed to be significant.
646 * to make tests easier, return the same as a real PowerPC 750
648 farg.ll |= 0xFFF80000ULL << 32;
653 #if defined(TARGET_PPC64)
655 uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg)
659 farg.d = int64_to_float64(arg, &env->fp_status);
664 uint64_t helper_fctid(CPUPPCState *env, uint64_t arg)
670 if (unlikely(float64_is_signaling_nan(farg.d))) {
671 /* sNaN conversion */
672 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
673 POWERPC_EXCP_FP_VXCVI, 1);
674 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
675 float64_is_infinity(farg.d))) {
676 /* qNan / infinity conversion */
677 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
679 farg.ll = float64_to_int64(farg.d, &env->fp_status);
684 /* fctidz - fctidz. */
685 uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg)
691 if (unlikely(float64_is_signaling_nan(farg.d))) {
692 /* sNaN conversion */
693 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
694 POWERPC_EXCP_FP_VXCVI, 1);
695 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
696 float64_is_infinity(farg.d))) {
697 /* qNan / infinity conversion */
698 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
700 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
707 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
714 if (unlikely(float64_is_signaling_nan(farg.d))) {
716 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
717 POWERPC_EXCP_FP_VXCVI, 1);
718 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
719 float64_is_infinity(farg.d))) {
720 /* qNan / infinity round */
721 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
723 set_float_rounding_mode(rounding_mode, &env->fp_status);
724 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
725 /* Restore rounding mode from FPSCR */
726 fpscr_set_rounding_mode(env);
731 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
733 return do_fri(env, arg, float_round_nearest_even);
736 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
738 return do_fri(env, arg, float_round_to_zero);
741 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
743 return do_fri(env, arg, float_round_up);
746 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
748 return do_fri(env, arg, float_round_down);
752 uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
755 CPU_DoubleU farg1, farg2, farg3;
761 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
762 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
763 /* Multiplication of zero by infinity */
764 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
766 if (unlikely(float64_is_signaling_nan(farg1.d) ||
767 float64_is_signaling_nan(farg2.d) ||
768 float64_is_signaling_nan(farg3.d))) {
770 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
772 /* This is the way the PowerPC specification defines it */
773 float128 ft0_128, ft1_128;
775 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
776 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
777 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
778 if (unlikely(float128_is_infinity(ft0_128) &&
779 float64_is_infinity(farg3.d) &&
780 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
781 /* Magnitude subtraction of infinities */
782 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
784 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
785 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
786 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
794 uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
797 CPU_DoubleU farg1, farg2, farg3;
803 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
804 (float64_is_zero(farg1.d) &&
805 float64_is_infinity(farg2.d)))) {
806 /* Multiplication of zero by infinity */
807 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
809 if (unlikely(float64_is_signaling_nan(farg1.d) ||
810 float64_is_signaling_nan(farg2.d) ||
811 float64_is_signaling_nan(farg3.d))) {
813 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
815 /* This is the way the PowerPC specification defines it */
816 float128 ft0_128, ft1_128;
818 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
819 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
820 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
821 if (unlikely(float128_is_infinity(ft0_128) &&
822 float64_is_infinity(farg3.d) &&
823 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
824 /* Magnitude subtraction of infinities */
825 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
827 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
828 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
829 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
835 /* fnmadd - fnmadd. */
836 uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
839 CPU_DoubleU farg1, farg2, farg3;
845 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
846 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
847 /* Multiplication of zero by infinity */
848 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
850 if (unlikely(float64_is_signaling_nan(farg1.d) ||
851 float64_is_signaling_nan(farg2.d) ||
852 float64_is_signaling_nan(farg3.d))) {
854 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
856 /* This is the way the PowerPC specification defines it */
857 float128 ft0_128, ft1_128;
859 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
860 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
861 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
862 if (unlikely(float128_is_infinity(ft0_128) &&
863 float64_is_infinity(farg3.d) &&
864 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
865 /* Magnitude subtraction of infinities */
866 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
868 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
869 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
870 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
872 if (likely(!float64_is_any_nan(farg1.d))) {
873 farg1.d = float64_chs(farg1.d);
879 /* fnmsub - fnmsub. */
880 uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
883 CPU_DoubleU farg1, farg2, farg3;
889 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
890 (float64_is_zero(farg1.d) &&
891 float64_is_infinity(farg2.d)))) {
892 /* Multiplication of zero by infinity */
893 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
895 if (unlikely(float64_is_signaling_nan(farg1.d) ||
896 float64_is_signaling_nan(farg2.d) ||
897 float64_is_signaling_nan(farg3.d))) {
899 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
901 /* This is the way the PowerPC specification defines it */
902 float128 ft0_128, ft1_128;
904 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
905 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
906 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
907 if (unlikely(float128_is_infinity(ft0_128) &&
908 float64_is_infinity(farg3.d) &&
909 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
910 /* Magnitude subtraction of infinities */
911 farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
913 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
914 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
915 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
917 if (likely(!float64_is_any_nan(farg1.d))) {
918 farg1.d = float64_chs(farg1.d);
925 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
932 if (unlikely(float64_is_signaling_nan(farg.d))) {
933 /* sNaN square root */
934 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
936 f32 = float64_to_float32(farg.d, &env->fp_status);
937 farg.d = float32_to_float64(f32, &env->fp_status);
943 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
949 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
950 /* Square root of a negative nonzero number */
951 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
953 if (unlikely(float64_is_signaling_nan(farg.d))) {
954 /* sNaN square root */
955 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
957 farg.d = float64_sqrt(farg.d, &env->fp_status);
963 uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
969 if (unlikely(float64_is_signaling_nan(farg.d))) {
970 /* sNaN reciprocal */
971 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
973 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
978 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
985 if (unlikely(float64_is_signaling_nan(farg.d))) {
986 /* sNaN reciprocal */
987 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
989 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
990 f32 = float64_to_float32(farg.d, &env->fp_status);
991 farg.d = float32_to_float64(f32, &env->fp_status);
996 /* frsqrte - frsqrte. */
997 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
1004 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1005 /* Reciprocal square root of a negative nonzero number */
1006 farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
1008 if (unlikely(float64_is_signaling_nan(farg.d))) {
1009 /* sNaN reciprocal square root */
1010 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1012 farg.d = float64_sqrt(farg.d, &env->fp_status);
1013 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1014 f32 = float64_to_float32(farg.d, &env->fp_status);
1015 farg.d = float32_to_float64(f32, &env->fp_status);
1021 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1028 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1029 !float64_is_any_nan(farg1.d)) {
1036 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1039 CPU_DoubleU farg1, farg2;
1045 if (unlikely(float64_is_any_nan(farg1.d) ||
1046 float64_is_any_nan(farg2.d))) {
1048 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1050 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1056 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1057 env->fpscr |= ret << FPSCR_FPRF;
1058 env->crf[crfD] = ret;
1059 if (unlikely(ret == 0x01UL
1060 && (float64_is_signaling_nan(farg1.d) ||
1061 float64_is_signaling_nan(farg2.d)))) {
1062 /* sNaN comparison */
1063 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1067 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1070 CPU_DoubleU farg1, farg2;
1076 if (unlikely(float64_is_any_nan(farg1.d) ||
1077 float64_is_any_nan(farg2.d))) {
1079 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1081 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1087 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1088 env->fpscr |= ret << FPSCR_FPRF;
1089 env->crf[crfD] = ret;
1090 if (unlikely(ret == 0x01UL)) {
1091 if (float64_is_signaling_nan(farg1.d) ||
1092 float64_is_signaling_nan(farg2.d)) {
1093 /* sNaN comparison */
1094 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1095 POWERPC_EXCP_FP_VXVC, 1);
1097 /* qNaN comparison */
1098 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1103 /* Single-precision floating-point conversions */
1104 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1108 u.f = int32_to_float32(val, &env->vec_status);
1113 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1117 u.f = uint32_to_float32(val, &env->vec_status);
1122 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1127 /* NaN are not treated the same way IEEE 754 does */
1128 if (unlikely(float32_is_quiet_nan(u.f))) {
1132 return float32_to_int32(u.f, &env->vec_status);
1135 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1140 /* NaN are not treated the same way IEEE 754 does */
1141 if (unlikely(float32_is_quiet_nan(u.f))) {
1145 return float32_to_uint32(u.f, &env->vec_status);
1148 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1153 /* NaN are not treated the same way IEEE 754 does */
1154 if (unlikely(float32_is_quiet_nan(u.f))) {
1158 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1161 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1166 /* NaN are not treated the same way IEEE 754 does */
1167 if (unlikely(float32_is_quiet_nan(u.f))) {
1171 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1174 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1179 u.f = int32_to_float32(val, &env->vec_status);
1180 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1181 u.f = float32_div(u.f, tmp, &env->vec_status);
1186 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1191 u.f = uint32_to_float32(val, &env->vec_status);
1192 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1193 u.f = float32_div(u.f, tmp, &env->vec_status);
1198 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1204 /* NaN are not treated the same way IEEE 754 does */
1205 if (unlikely(float32_is_quiet_nan(u.f))) {
1208 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1209 u.f = float32_mul(u.f, tmp, &env->vec_status);
1211 return float32_to_int32(u.f, &env->vec_status);
1214 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1220 /* NaN are not treated the same way IEEE 754 does */
1221 if (unlikely(float32_is_quiet_nan(u.f))) {
1224 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1225 u.f = float32_mul(u.f, tmp, &env->vec_status);
1227 return float32_to_uint32(u.f, &env->vec_status);
1230 #define HELPER_SPE_SINGLE_CONV(name) \
1231 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1233 return e##name(env, val); \
1236 HELPER_SPE_SINGLE_CONV(fscfsi);
1238 HELPER_SPE_SINGLE_CONV(fscfui);
1240 HELPER_SPE_SINGLE_CONV(fscfuf);
1242 HELPER_SPE_SINGLE_CONV(fscfsf);
1244 HELPER_SPE_SINGLE_CONV(fsctsi);
1246 HELPER_SPE_SINGLE_CONV(fsctui);
1248 HELPER_SPE_SINGLE_CONV(fsctsiz);
1250 HELPER_SPE_SINGLE_CONV(fsctuiz);
1252 HELPER_SPE_SINGLE_CONV(fsctsf);
1254 HELPER_SPE_SINGLE_CONV(fsctuf);
1256 #define HELPER_SPE_VECTOR_CONV(name) \
1257 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1259 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1260 (uint64_t)e##name(env, val); \
1263 HELPER_SPE_VECTOR_CONV(fscfsi);
1265 HELPER_SPE_VECTOR_CONV(fscfui);
1267 HELPER_SPE_VECTOR_CONV(fscfuf);
1269 HELPER_SPE_VECTOR_CONV(fscfsf);
1271 HELPER_SPE_VECTOR_CONV(fsctsi);
1273 HELPER_SPE_VECTOR_CONV(fsctui);
1275 HELPER_SPE_VECTOR_CONV(fsctsiz);
1277 HELPER_SPE_VECTOR_CONV(fsctuiz);
1279 HELPER_SPE_VECTOR_CONV(fsctsf);
1281 HELPER_SPE_VECTOR_CONV(fsctuf);
1283 /* Single-precision floating-point arithmetic */
1284 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1290 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1294 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1300 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1304 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1310 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1314 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1320 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1324 #define HELPER_SPE_SINGLE_ARITH(name) \
1325 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1327 return e##name(env, op1, op2); \
1330 HELPER_SPE_SINGLE_ARITH(fsadd);
1332 HELPER_SPE_SINGLE_ARITH(fssub);
1334 HELPER_SPE_SINGLE_ARITH(fsmul);
1336 HELPER_SPE_SINGLE_ARITH(fsdiv);
1338 #define HELPER_SPE_VECTOR_ARITH(name) \
1339 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1341 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1342 (uint64_t)e##name(env, op1, op2); \
1345 HELPER_SPE_VECTOR_ARITH(fsadd);
1347 HELPER_SPE_VECTOR_ARITH(fssub);
1349 HELPER_SPE_VECTOR_ARITH(fsmul);
1351 HELPER_SPE_VECTOR_ARITH(fsdiv);
1353 /* Single-precision floating-point comparisons */
1354 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1360 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1363 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1369 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1372 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1378 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1381 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1383 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1384 return efscmplt(env, op1, op2);
1387 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1389 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1390 return efscmpgt(env, op1, op2);
1393 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1395 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1396 return efscmpeq(env, op1, op2);
1399 #define HELPER_SINGLE_SPE_CMP(name) \
1400 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1402 return e##name(env, op1, op2) << 2; \
1405 HELPER_SINGLE_SPE_CMP(fststlt);
1407 HELPER_SINGLE_SPE_CMP(fststgt);
1409 HELPER_SINGLE_SPE_CMP(fststeq);
1411 HELPER_SINGLE_SPE_CMP(fscmplt);
1413 HELPER_SINGLE_SPE_CMP(fscmpgt);
1415 HELPER_SINGLE_SPE_CMP(fscmpeq);
1417 static inline uint32_t evcmp_merge(int t0, int t1)
1419 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1422 #define HELPER_VECTOR_SPE_CMP(name) \
1423 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1425 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1426 e##name(env, op1, op2)); \
1429 HELPER_VECTOR_SPE_CMP(fststlt);
1431 HELPER_VECTOR_SPE_CMP(fststgt);
1433 HELPER_VECTOR_SPE_CMP(fststeq);
1435 HELPER_VECTOR_SPE_CMP(fscmplt);
1437 HELPER_VECTOR_SPE_CMP(fscmpgt);
1439 HELPER_VECTOR_SPE_CMP(fscmpeq);
1441 /* Double-precision floating-point conversion */
1442 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1446 u.d = int32_to_float64(val, &env->vec_status);
1451 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1455 u.d = int64_to_float64(val, &env->vec_status);
1460 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1464 u.d = uint32_to_float64(val, &env->vec_status);
1469 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1473 u.d = uint64_to_float64(val, &env->vec_status);
1478 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1483 /* NaN are not treated the same way IEEE 754 does */
1484 if (unlikely(float64_is_any_nan(u.d))) {
1488 return float64_to_int32(u.d, &env->vec_status);
1491 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1496 /* NaN are not treated the same way IEEE 754 does */
1497 if (unlikely(float64_is_any_nan(u.d))) {
1501 return float64_to_uint32(u.d, &env->vec_status);
1504 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1509 /* NaN are not treated the same way IEEE 754 does */
1510 if (unlikely(float64_is_any_nan(u.d))) {
1514 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1517 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1522 /* NaN are not treated the same way IEEE 754 does */
1523 if (unlikely(float64_is_any_nan(u.d))) {
1527 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1530 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1535 /* NaN are not treated the same way IEEE 754 does */
1536 if (unlikely(float64_is_any_nan(u.d))) {
1540 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1543 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1548 /* NaN are not treated the same way IEEE 754 does */
1549 if (unlikely(float64_is_any_nan(u.d))) {
1553 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1556 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1561 u.d = int32_to_float64(val, &env->vec_status);
1562 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1563 u.d = float64_div(u.d, tmp, &env->vec_status);
1568 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1573 u.d = uint32_to_float64(val, &env->vec_status);
1574 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1575 u.d = float64_div(u.d, tmp, &env->vec_status);
1580 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1586 /* NaN are not treated the same way IEEE 754 does */
1587 if (unlikely(float64_is_any_nan(u.d))) {
1590 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1591 u.d = float64_mul(u.d, tmp, &env->vec_status);
1593 return float64_to_int32(u.d, &env->vec_status);
1596 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1602 /* NaN are not treated the same way IEEE 754 does */
1603 if (unlikely(float64_is_any_nan(u.d))) {
1606 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1607 u.d = float64_mul(u.d, tmp, &env->vec_status);
1609 return float64_to_uint32(u.d, &env->vec_status);
1612 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1618 u2.f = float64_to_float32(u1.d, &env->vec_status);
1623 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1629 u2.d = float32_to_float64(u1.f, &env->vec_status);
1634 /* Double precision fixed-point arithmetic */
1635 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1641 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1645 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1651 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1655 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1661 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1665 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1671 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1675 /* Double precision floating point helpers */
1676 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1682 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1685 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1691 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1694 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1700 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1703 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1705 /* XXX: TODO: test special values (NaN, infinites, ...) */
1706 return helper_efdtstlt(env, op1, op2);
1709 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1711 /* XXX: TODO: test special values (NaN, infinites, ...) */
1712 return helper_efdtstgt(env, op1, op2);
1715 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1717 /* XXX: TODO: test special values (NaN, infinites, ...) */
1718 return helper_efdtsteq(env, op1, op2);
1721 #define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
1722 (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
1723 (((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
1725 #define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
1726 #define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
1727 #define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
1728 #define xC(opcode) DECODE_SPLIT(opcode, 3, 1, 6, 5)
1729 #define BF(opcode) (((opcode) >> (31-8)) & 7)
1731 typedef union _ppc_vsr_t {
1738 static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1741 vsr->f64[0] = env->fpr[n];
1742 vsr->u64[1] = env->vsr[n];
1744 vsr->u64[0] = env->avr[n-32].u64[0];
1745 vsr->u64[1] = env->avr[n-32].u64[1];
1749 static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
1752 env->fpr[n] = vsr->f64[0];
1753 env->vsr[n] = vsr->u64[1];
1755 env->avr[n-32].u64[0] = vsr->u64[0];
1756 env->avr[n-32].u64[1] = vsr->u64[1];
1760 #define float64_to_float64(x, env) x