* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
-#include "helper.h"
+#include "exec/helper-proto.h"
+
+#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
+#define float32_snan_to_qnan(x) ((x) | 0x00400000)
/*****************************************************************************/
/* Floating point operations helpers */
return ((u.ll >> 52) & 0x7FF) == 0;
}
-uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf)
+static inline int ppc_float32_get_unbiased_exp(float32 f)
+{
+ return ((f >> 23) & 0xFF) - 127;
+}
+
+static inline int ppc_float64_get_unbiased_exp(float64 f)
+{
+ return ((f >> 52) & 0x7FF) - 1023;
+}
+
+void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
int isneg;
- int ret;
+ int fprf;
farg.ll = arg;
isneg = float64_is_neg(farg.d);
if (unlikely(float64_is_any_nan(farg.d))) {
if (float64_is_signaling_nan(farg.d)) {
/* Signaling NaN: flags are undefined */
- ret = 0x00;
+ fprf = 0x00;
} else {
/* Quiet NaN */
- ret = 0x11;
+ fprf = 0x11;
}
} else if (unlikely(float64_is_infinity(farg.d))) {
/* +/- infinity */
if (isneg) {
- ret = 0x09;
+ fprf = 0x09;
} else {
- ret = 0x05;
+ fprf = 0x05;
}
} else {
if (float64_is_zero(farg.d)) {
/* +/- zero */
if (isneg) {
- ret = 0x12;
+ fprf = 0x12;
} else {
- ret = 0x02;
+ fprf = 0x02;
}
} else {
if (isden(farg.d)) {
/* Denormalized numbers */
- ret = 0x10;
+ fprf = 0x10;
} else {
/* Normalized numbers */
- ret = 0x00;
+ fprf = 0x00;
}
if (isneg) {
- ret |= 0x08;
+ fprf |= 0x08;
} else {
- ret |= 0x04;
+ fprf |= 0x04;
}
}
}
- if (set_fprf) {
- /* We update FPSCR_FPRF */
- env->fpscr &= ~(0x1F << FPSCR_FPRF);
- env->fpscr |= ret << FPSCR_FPRF;
- }
- /* We just need fpcc to update Rc1 */
- return ret & 0xF;
+ /* We update FPSCR_FPRF */
+ env->fpscr &= ~(0x1F << FPSCR_FPRF);
+ env->fpscr |= fprf << FPSCR_FPRF;
}
/* Floating-point invalid operations exception */
static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
int set_fpcc)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint64_t ret = 0;
int ve;
}
/* We must update the target FPR before raising the exception */
if (ve != 0) {
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* Update the floating-point invalid operation summary */
env->fpscr |= 1 << FPSCR_VX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (ve != 0) {
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
env->fpscr |= 1 << FPSCR_ZX;
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ze != 0) {
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
static inline void float_overflow_excp(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
env->fpscr |= 1 << FPSCR_OX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_oe != 0) {
/* XXX: should adjust the result */
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We must update the target FPR before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
} else {
env->fpscr |= 1 << FPSCR_XX;
static inline void float_underflow_excp(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
env->fpscr |= 1 << FPSCR_UX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ue != 0) {
/* XXX: should adjust the result */
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We must update the target FPR before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
}
}
static inline void float_inexact_excp(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
env->fpscr |= 1 << FPSCR_XX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_xe != 0) {
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We must update the target FPR before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
}
}
void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
int prev;
prev = (env->fpscr >> bit) & 1;
if (prev == 0) {
switch (bit) {
case FPSCR_VX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ve) {
goto raise_ve;
}
break;
case FPSCR_OX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_oe) {
goto raise_oe;
}
break;
case FPSCR_UX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ue) {
goto raise_ue;
}
break;
case FPSCR_ZX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ze) {
goto raise_ze;
}
break;
case FPSCR_XX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_xe) {
goto raise_xe;
}
case FPSCR_VXSQRT:
case FPSCR_VXCVI:
env->fpscr |= 1 << FPSCR_VX;
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ve != 0) {
goto raise_ve;
}
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We have to update Rc1 before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
break;
}
}
void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
target_ulong prev, new;
int i;
}
if ((fpscr_ex & fpscr_eex) != 0) {
env->fpscr |= 1 << FPSCR_FEX;
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
/* XXX: we should compute it properly */
env->error_code = POWERPC_EXCP_FP;
} else {
void helper_float_check_status(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
int status = get_float_exception_flags(&env->fp_status);
if (status & float_flag_divbyzero) {
float_inexact_excp(env);
}
- if (env->exception_index == POWERPC_EXCP_PROGRAM &&
+ if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
(env->error_code & POWERPC_EXCP_FP)) {
/* Differred floating-point exception after target FPR update */
if (msr_fe0 != 0 || msr_fe1 != 0) {
- helper_raise_exception_err(env, env->exception_index,
+ helper_raise_exception_err(env, cs->exception_index,
env->error_code);
}
}
return farg1.ll;
}
-/* fctiw - fctiw. */
-uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg)
-{
- CPU_DoubleU farg;
- farg.ll = arg;
-
- if (unlikely(float64_is_signaling_nan(farg.d))) {
- /* sNaN conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
- POWERPC_EXCP_FP_VXCVI, 1);
- } else if (unlikely(float64_is_quiet_nan(farg.d) ||
- float64_is_infinity(farg.d))) {
- /* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
- } else {
- farg.ll = float64_to_int32(farg.d, &env->fp_status);
- /* XXX: higher bits are not supposed to be significant.
- * to make tests easier, return the same as a real PowerPC 750
- */
- farg.ll |= 0xFFF80000ULL << 32;
- }
- return farg.ll;
-}
-
-/* fctiwz - fctiwz. */
-uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg)
-{
- CPU_DoubleU farg;
-
- farg.ll = arg;
-
- if (unlikely(float64_is_signaling_nan(farg.d))) {
- /* sNaN conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
- POWERPC_EXCP_FP_VXCVI, 1);
- } else if (unlikely(float64_is_quiet_nan(farg.d) ||
- float64_is_infinity(farg.d))) {
- /* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
- } else {
- farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
- /* XXX: higher bits are not supposed to be significant.
- * to make tests easier, return the same as a real PowerPC 750
- */
- farg.ll |= 0xFFF80000ULL << 32;
- }
- return farg.ll;
-}
-
-#if defined(TARGET_PPC64)
-/* fcfid - fcfid. */
-uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg)
-{
- CPU_DoubleU farg;
-
- farg.d = int64_to_float64(arg, &env->fp_status);
- return farg.ll;
-}
-
-/* fctid - fctid. */
-uint64_t helper_fctid(CPUPPCState *env, uint64_t arg)
-{
- CPU_DoubleU farg;
-
- farg.ll = arg;
-
- if (unlikely(float64_is_signaling_nan(farg.d))) {
- /* sNaN conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
- POWERPC_EXCP_FP_VXCVI, 1);
- } else if (unlikely(float64_is_quiet_nan(farg.d) ||
- float64_is_infinity(farg.d))) {
- /* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
- } else {
- farg.ll = float64_to_int64(farg.d, &env->fp_status);
- }
- return farg.ll;
-}
-
-/* fctidz - fctidz. */
-uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg)
-{
- CPU_DoubleU farg;
-
- farg.ll = arg;
-
- if (unlikely(float64_is_signaling_nan(farg.d))) {
- /* sNaN conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
- POWERPC_EXCP_FP_VXCVI, 1);
- } else if (unlikely(float64_is_quiet_nan(farg.d) ||
- float64_is_infinity(farg.d))) {
- /* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
- } else {
- farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
- }
- return farg.ll;
-}
+#define FPU_FCTI(op, cvt, nanval) \
+uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
+{ \
+ CPU_DoubleU farg; \
+ \
+ farg.ll = arg; \
+ farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
+ \
+ if (unlikely(env->fp_status.float_exception_flags)) { \
+ if (float64_is_any_nan(arg)) { \
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
+ if (float64_is_signaling_nan(arg)) { \
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
+ } \
+ farg.ll = nanval; \
+ } else if (env->fp_status.float_exception_flags & \
+ float_flag_invalid) { \
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
+ } \
+ helper_float_check_status(env); \
+ } \
+ return farg.ll; \
+ }
-#endif
+FPU_FCTI(fctiw, int32, 0x80000000U)
+FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
+FPU_FCTI(fctiwu, uint32, 0x00000000U)
+FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
+FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
+FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
+FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
+FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
+
+#define FPU_FCFI(op, cvtr, is_single) \
+uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
+{ \
+ CPU_DoubleU farg; \
+ \
+ if (is_single) { \
+ float32 tmp = cvtr(arg, &env->fp_status); \
+ farg.d = float32_to_float64(tmp, &env->fp_status); \
+ } else { \
+ farg.d = cvtr(arg, &env->fp_status); \
+ } \
+ helper_float_check_status(env); \
+ return farg.ll; \
+}
+
+FPU_FCFI(fcfid, int64_to_float64, 0)
+FPU_FCFI(fcfids, int64_to_float32, 1)
+FPU_FCFI(fcfidu, uint64_to_float64, 0)
+FPU_FCFI(fcfidus, uint64_to_float32, 1)
static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
int rounding_mode)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN round */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
- POWERPC_EXCP_FP_VXCVI, 1);
- } else if (unlikely(float64_is_quiet_nan(farg.d) ||
- float64_is_infinity(farg.d))) {
- /* qNan / infinity round */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ farg.ll = arg | 0x0008000000000000ULL;
} else {
+ int inexact = get_float_exception_flags(&env->fp_status) &
+ float_flag_inexact;
set_float_rounding_mode(rounding_mode, &env->fp_status);
farg.ll = float64_round_to_int(farg.d, &env->fp_status);
/* Restore rounding mode from FPSCR */
fpscr_set_rounding_mode(env);
+
+ /* fri* does not set FPSCR[XX] */
+ if (!inexact) {
+ env->fp_status.float_exception_flags &= ~float_flag_inexact;
+ }
}
+ helper_float_check_status(env);
return farg.ll;
}
uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
{
- return do_fri(env, arg, float_round_nearest_even);
+ return do_fri(env, arg, float_round_ties_away);
}
uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
- if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
- /* Square root of a negative nonzero number */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
- } else {
+ if (unlikely(float64_is_any_nan(farg.d))) {
if (unlikely(float64_is_signaling_nan(farg.d))) {
- /* sNaN square root */
+ /* sNaN reciprocal square root */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ farg.ll = float64_snan_to_qnan(farg.ll);
}
+ } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
+ /* Square root of a negative nonzero number */
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ } else {
farg.d = float64_sqrt(farg.d, &env->fp_status);
}
return farg.ll;
uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
- float32 f32;
farg.ll = arg;
- if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
- /* Reciprocal square root of a negative nonzero number */
- farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
- } else {
+ if (unlikely(float64_is_any_nan(farg.d))) {
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal square root */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ farg.ll = float64_snan_to_qnan(farg.ll);
}
+ } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
+ /* Reciprocal square root of a negative nonzero number */
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ } else {
farg.d = float64_sqrt(farg.d, &env->fp_status);
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
- f32 = float64_to_float32(farg.d, &env->fp_status);
- farg.d = float32_to_float64(f32, &env->fp_status);
}
+
return farg.ll;
}
}
}
+uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
+{
+ int fe_flag = 0;
+ int fg_flag = 0;
+
+ if (unlikely(float64_is_infinity(fra) ||
+ float64_is_infinity(frb) ||
+ float64_is_zero(frb))) {
+ fe_flag = 1;
+ fg_flag = 1;
+ } else {
+ int e_a = ppc_float64_get_unbiased_exp(fra);
+ int e_b = ppc_float64_get_unbiased_exp(frb);
+
+ if (unlikely(float64_is_any_nan(fra) ||
+ float64_is_any_nan(frb))) {
+ fe_flag = 1;
+ } else if ((e_b <= -1022) || (e_b >= 1021)) {
+ fe_flag = 1;
+ } else if (!float64_is_zero(fra) &&
+ (((e_a - e_b) >= 1023) ||
+ ((e_a - e_b) <= -1021) ||
+ (e_a <= -970))) {
+ fe_flag = 1;
+ }
+
+ if (unlikely(float64_is_zero_or_denormal(frb))) {
+ /* XB is not zero because of the above check and */
+ /* so must be denormalized. */
+ fg_flag = 1;
+ }
+ }
+
+ return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
+}
+
+uint32_t helper_ftsqrt(uint64_t frb)
+{
+ int fe_flag = 0;
+ int fg_flag = 0;
+
+ if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
+ fe_flag = 1;
+ fg_flag = 1;
+ } else {
+ int e_b = ppc_float64_get_unbiased_exp(frb);
+
+ if (unlikely(float64_is_any_nan(frb))) {
+ fe_flag = 1;
+ } else if (unlikely(float64_is_zero(frb))) {
+ fe_flag = 1;
+ } else if (unlikely(float64_is_neg(frb))) {
+ fe_flag = 1;
+ } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
+ fe_flag = 1;
+ }
+
+ if (unlikely(float64_is_zero_or_denormal(frb))) {
+ /* XB is not zero because of the above check and */
+ /* therefore must be denormalized. */
+ fg_flag = 1;
+ }
+ }
+
+ return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
+}
+
void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
uint32_t crfD)
{
float64 f64[2];
} ppc_vsr_t;
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VsrW(i) u32[i]
+#define VsrD(i) u64[i]
+#else
+#define VsrW(i) u32[3-(i)]
+#define VsrD(i) u64[1-(i)]
+#endif
+
static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
{
if (n < 32) {
- vsr->f64[0] = env->fpr[n];
- vsr->u64[1] = env->vsr[n];
+ vsr->VsrD(0) = env->fpr[n];
+ vsr->VsrD(1) = env->vsr[n];
} else {
vsr->u64[0] = env->avr[n-32].u64[0];
vsr->u64[1] = env->avr[n-32].u64[1];
static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
{
if (n < 32) {
- env->fpr[n] = vsr->f64[0];
- env->vsr[n] = vsr->u64[1];
+ env->fpr[n] = vsr->VsrD(0);
+ env->vsr[n] = vsr->VsrD(1);
} else {
env->avr[n-32].u64[0] = vsr->u64[0];
env->avr[n-32].u64[1] = vsr->u64[1];
* op - operation (add or sub)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &tstat); \
+ xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) {\
+ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
putVSR(xT(opcode), &xt, env); \
helper_float_check_status(env); \
}
-VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1, 0)
-VSX_ADD_SUB(xsaddsp, add, 1, float64, f64, 1, 1)
-VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0, 0)
-VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0, 0)
-VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1, 0)
-VSX_ADD_SUB(xssubsp, sub, 1, float64, f64, 1, 1)
-VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0, 0)
-VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0, 0)
+VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
/* VSX_MUL - VSX floating point multiply
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_mul(xa.fld[i], xb.fld[i], &tstat); \
+ xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(xb.fld[i])) || \
- (tp##_is_infinity(xb.fld[i]) && tp##_is_zero(xa.fld[i]))) { \
+ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
+ (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_MUL(xsmuldp, 1, float64, f64, 1, 0)
-VSX_MUL(xsmulsp, 1, float64, f64, 1, 1)
-VSX_MUL(xvmuldp, 2, float64, f64, 0, 0)
-VSX_MUL(xvmulsp, 4, float32, f32, 0, 0)
+VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
+VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
+VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
+VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
/* VSX_DIV - VSX floating point divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_div(xa.fld[i], xb.fld[i], &tstat); \
+ xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) { \
+ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
- } else if (tp##_is_zero(xa.fld[i]) && \
- tp##_is_zero(xb.fld[i])) { \
+ } else if (tp##_is_zero(xa.fld) && \
+ tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_DIV(xsdivdp, 1, float64, f64, 1, 0)
-VSX_DIV(xsdivsp, 1, float64, f64, 1, 1)
-VSX_DIV(xvdivdp, 2, float64, f64, 0, 0)
-VSX_DIV(xvdivsp, 4, float32, f32, 0, 0)
+VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
+VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
+VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
+VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
/* VSX_RE - VSX floating point reciprocal estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
-#define VSX_RE(op, nels, tp, fld, sfprf) \
+#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
- xt.fld[i] = tp##_div(tp##_one, xb.fld[i], &env->fp_status); \
+ xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[0], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_RE(xsredp, 1, float64, f64, 1)
-VSX_RE(xvredp, 2, float64, f64, 0)
-VSX_RE(xvresp, 4, float32, f32, 0)
+VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
+VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
+VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
+VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
/* VSX_SQRT - VSX floating point square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
-#define VSX_SQRT(op, nels, tp, fld, sfprf) \
+#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat); \
+ xt.fld = tp##_sqrt(xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) { \
+ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
- } else if (tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_SQRT(xssqrtdp, 1, float64, f64, 1)
-VSX_SQRT(xvsqrtdp, 2, float64, f64, 0)
-VSX_SQRT(xvsqrtsp, 4, float32, f32, 0)
+VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
+VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
+VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
+VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
-#define VSX_RSQRTE(op, nels, tp, fld, sfprf) \
+#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat); \
- xt.fld[i] = tp##_div(tp##_one, xt.fld[i], &tstat); \
+ xt.fld = tp##_sqrt(xb.fld, &tstat); \
+ xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) { \
+ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
- } else if (tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_RSQRTE(xsrsqrtedp, 1, float64, f64, 1)
-VSX_RSQRTE(xvrsqrtedp, 2, float64, f64, 0)
-VSX_RSQRTE(xvrsqrtesp, 4, float32, f32, 0)
-
-static inline int ppc_float32_get_unbiased_exp(float32 f)
-{
- return ((f >> 23) & 0xFF) - 127;
-}
-
-static inline int ppc_float64_get_unbiased_exp(float64 f)
-{
- return ((f >> 52) & 0x7FF) - 1023;
-}
+VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
+VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
+VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
+VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
/* VSX_TDIV - VSX floating point test for divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* emin - minimum unbiased exponent
* emax - maximum unbiased exponent
* nbits - number of fraction bits
getVSR(xB(opcode), &xb, env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xa.fld[i]) || \
- tp##_is_infinity(xb.fld[i]) || \
- tp##_is_zero(xb.fld[i]))) { \
+ if (unlikely(tp##_is_infinity(xa.fld) || \
+ tp##_is_infinity(xb.fld) || \
+ tp##_is_zero(xb.fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_a = ppc_##tp##_get_unbiased_exp(xa.fld[i]); \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]); \
+ int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
\
- if (unlikely(tp##_is_any_nan(xa.fld[i]) || \
- tp##_is_any_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_any_nan(xa.fld) || \
+ tp##_is_any_nan(xb.fld))) { \
fe_flag = 1; \
} else if ((e_b <= emin) || (e_b >= (emax-2))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xa.fld[i]) && \
+ } else if (!tp##_is_zero(xa.fld) && \
(((e_a - e_b) >= emax) || \
((e_a - e_b) <= (emin+1)) || \
(e_a <= (emin+nbits)))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
/* XB is not zero because of the above check and */ \
/* so must be denormalized. */ \
fg_flag = 1; \
env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
}
-VSX_TDIV(xstdivdp, 1, float64, f64, -1022, 1023, 52)
-VSX_TDIV(xvtdivdp, 2, float64, f64, -1022, 1023, 52)
-VSX_TDIV(xvtdivsp, 4, float32, f32, -126, 127, 23)
+VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
+VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
+VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
/* VSX_TSQRT - VSX floating point test for square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* emin - minimum unbiased exponent
* emax - maximum unbiased exponent
* nbits - number of fraction bits
getVSR(xB(opcode), &xb, env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xb.fld[i]) || \
- tp##_is_zero(xb.fld[i]))) { \
+ if (unlikely(tp##_is_infinity(xb.fld) || \
+ tp##_is_zero(xb.fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
\
- if (unlikely(tp##_is_any_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_any_nan(xb.fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_zero(xb.fld[i]))) { \
+ } else if (unlikely(tp##_is_zero(xb.fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_neg(xb.fld[i]))) { \
+ } else if (unlikely(tp##_is_neg(xb.fld))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xb.fld[i]) && \
+ } else if (!tp##_is_zero(xb.fld) && \
(e_b <= (emin+nbits))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
/* XB is not zero because of the above check and */ \
/* therefore must be denormalized. */ \
fg_flag = 1; \
env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
}
-VSX_TSQRT(xstsqrtdp, 1, float64, f64, -1022, 52)
-VSX_TSQRT(xvtsqrtdp, 2, float64, f64, -1022, 52)
-VSX_TSQRT(xvtsqrtsp, 4, float32, f32, -126, 23)
+VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
+VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
+VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
/* VSX_MADD - VSX floating point muliply/add variations
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
* afrm - A form (1=A, 0=M)
* sfprf - set FPRF
*/
-#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf) \
+#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt_in, xa, xb, xt_out; \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i], \
- maddflgs, &tstat); \
+ if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
+ /* Avoid double rounding errors by rounding the intermediate */ \
+ /* result to odd. */ \
+ set_float_rounding_mode(float_round_to_zero, &tstat); \
+ xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ xt_out.fld |= (get_float_exception_flags(&tstat) & \
+ float_flag_inexact) != 0; \
+ } else { \
+ xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ } \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(b->fld[i]) || \
- tp##_is_signaling_nan(c->fld[i])) { \
+ if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(b->fld) || \
+ tp##_is_signaling_nan(c->fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
- if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(b->fld[i])) || \
- (tp##_is_zero(xa.fld[i]) && tp##_is_infinity(b->fld[i]))) { \
- xt_out.fld[i] = float64_to_##tp(fload_invalid_op_excp(env, \
+ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \
+ (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \
+ xt_out.fld = float64_to_##tp(fload_invalid_op_excp(env, \
POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
if ((tstat.float_exception_flags & float_flag_invalid) && \
- ((tp##_is_infinity(xa.fld[i]) || \
- tp##_is_infinity(b->fld[i])) && \
- tp##_is_infinity(c->fld[i]))) { \
+ ((tp##_is_infinity(xa.fld) || \
+ tp##_is_infinity(b->fld)) && \
+ tp##_is_infinity(c->fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
} \
} \
+ \
+ if (r2sp) { \
+ xt_out.fld = helper_frsp(env, xt_out.fld); \
+ } \
+ \
if (sfprf) { \
- helper_compute_fprf(env, xt_out.fld[i], sfprf); \
+ helper_compute_fprf(env, xt_out.fld); \
} \
} \
putVSR(xT(opcode), &xt_out, env); \
#define NMADD_FLGS float_muladd_negate_result
#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
-VSX_MADD(xsmaddadp, 1, float64, f64, MADD_FLGS, 1, 1)
-VSX_MADD(xsmaddmdp, 1, float64, f64, MADD_FLGS, 0, 1)
-VSX_MADD(xsmsubadp, 1, float64, f64, MSUB_FLGS, 1, 1)
-VSX_MADD(xsmsubmdp, 1, float64, f64, MSUB_FLGS, 0, 1)
-VSX_MADD(xsnmaddadp, 1, float64, f64, NMADD_FLGS, 1, 1)
-VSX_MADD(xsnmaddmdp, 1, float64, f64, NMADD_FLGS, 0, 1)
-VSX_MADD(xsnmsubadp, 1, float64, f64, NMSUB_FLGS, 1, 1)
-VSX_MADD(xsnmsubmdp, 1, float64, f64, NMSUB_FLGS, 0, 1)
-
-VSX_MADD(xvmaddadp, 2, float64, f64, MADD_FLGS, 1, 0)
-VSX_MADD(xvmaddmdp, 2, float64, f64, MADD_FLGS, 0, 0)
-VSX_MADD(xvmsubadp, 2, float64, f64, MSUB_FLGS, 1, 0)
-VSX_MADD(xvmsubmdp, 2, float64, f64, MSUB_FLGS, 0, 0)
-VSX_MADD(xvnmaddadp, 2, float64, f64, NMADD_FLGS, 1, 0)
-VSX_MADD(xvnmaddmdp, 2, float64, f64, NMADD_FLGS, 0, 0)
-VSX_MADD(xvnmsubadp, 2, float64, f64, NMSUB_FLGS, 1, 0)
-VSX_MADD(xvnmsubmdp, 2, float64, f64, NMSUB_FLGS, 0, 0)
-
-VSX_MADD(xvmaddasp, 4, float32, f32, MADD_FLGS, 1, 0)
-VSX_MADD(xvmaddmsp, 4, float32, f32, MADD_FLGS, 0, 0)
-VSX_MADD(xvmsubasp, 4, float32, f32, MSUB_FLGS, 1, 0)
-VSX_MADD(xvmsubmsp, 4, float32, f32, MSUB_FLGS, 0, 0)
-VSX_MADD(xvnmaddasp, 4, float32, f32, NMADD_FLGS, 1, 0)
-VSX_MADD(xvnmaddmsp, 4, float32, f32, NMADD_FLGS, 0, 0)
-VSX_MADD(xvnmsubasp, 4, float32, f32, NMSUB_FLGS, 1, 0)
-VSX_MADD(xvnmsubmsp, 4, float32, f32, NMSUB_FLGS, 0, 0)
+VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
+VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
+VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
+VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
+VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
+VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
+
+VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
+VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
+VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
+VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
+VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
+VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
+
+VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
+
+VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
#define VSX_SCALAR_CMP(op, ordered) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xA(opcode), &xa, env); \
getVSR(xB(opcode), &xb, env); \
\
- if (unlikely(float64_is_any_nan(xa.f64[0]) || \
- float64_is_any_nan(xb.f64[0]))) { \
- if (float64_is_signaling_nan(xa.f64[0]) || \
- float64_is_signaling_nan(xb.f64[0])) { \
+ if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
+ float64_is_any_nan(xb.VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa.VsrD(0)) || \
+ float64_is_signaling_nan(xb.VsrD(0))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (ordered) { \
} \
cc = 1; \
} else { \
- if (float64_lt(xa.f64[0], xb.f64[0], &env->fp_status)) { \
+ if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
cc = 8; \
- } else if (!float64_le(xa.f64[0], xb.f64[0], &env->fp_status)) { \
+ } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), \
+ &env->fp_status)) { \
cc = 4; \
} else { \
cc = 2; \
VSX_SCALAR_CMP(xscmpodp, 1)
VSX_SCALAR_CMP(xscmpudp, 0)
-#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ul)
-#define float32_snan_to_qnan(x) ((x) | 0x00400000)
-
/* VSX_MAX_MIN - VSX floating point maximum/minimum
* name - instruction mnemonic
* op - operation (max or min)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
*/
#define VSX_MAX_MIN(name, op, nels, tp, fld) \
void helper_##name(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &env->fp_status); \
- if (unlikely(tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i]))) { \
+ xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
+ if (unlikely(tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
} \
helper_float_check_status(env); \
}
-VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, f64)
-VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, f64)
-VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, f32)
-VSX_MAX_MIN(xsmindp, minnum, 1, float64, f64)
-VSX_MAX_MIN(xvmindp, minnum, 2, float64, f64)
-VSX_MAX_MIN(xvminsp, minnum, 4, float32, f32)
+VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
/* VSX_CMP - VSX floating point compare
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* cmp - comparison operation
* svxvc - set VXVC bit
*/
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_any_nan(xa.fld[i]) || \
- tp##_is_any_nan(xb.fld[i]))) { \
- if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ if (unlikely(tp##_is_any_nan(xa.fld) || \
+ tp##_is_any_nan(xb.fld))) { \
+ if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (svxvc) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
} \
- xt.fld[i] = 0; \
+ xt.fld = 0; \
all_true = 0; \
} else { \
- if (tp##_##cmp(xb.fld[i], xa.fld[i], &env->fp_status) == 1) { \
- xt.fld[i] = -1; \
+ if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == 1) { \
+ xt.fld = -1; \
all_false = 0; \
} else { \
- xt.fld[i] = 0; \
+ xt.fld = 0; \
all_true = 0; \
} \
} \
helper_float_check_status(env); \
}
-VSX_CMP(xvcmpeqdp, 2, float64, f64, eq, 0)
-VSX_CMP(xvcmpgedp, 2, float64, f64, le, 1)
-VSX_CMP(xvcmpgtdp, 2, float64, f64, lt, 1)
-VSX_CMP(xvcmpeqsp, 4, float32, f32, eq, 0)
-VSX_CMP(xvcmpgesp, 4, float32, f32, le, 1)
-VSX_CMP(xvcmpgtsp, 4, float32, f32, lt, 1)
-
-#if defined(HOST_WORDS_BIGENDIAN)
-#define JOFFSET 0
-#else
-#define JOFFSET 1
-#endif
+VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0)
+VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1)
+VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1)
+VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
+VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
+VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
* op - instruction mnemonic
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = 2*i + JOFFSET; \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb.sfld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (sfprf) { \
helper_compute_fprf(env, ttp##_to_float64(xt.tfld, \
- &env->fp_status), sfprf); \
+ &env->fp_status)); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, f64[i], f32[j], 1)
-VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, f32[j], f64[i], 1)
-VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, f64[i], f32[j], 0)
-VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, f32[j], f64[i], 0)
+VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
+VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
+VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
+VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
+
+uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
+{
+ float_status tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+
+ return (uint64_t)float64_to_float32(xb, &tstat) << 32;
+}
+
+uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
+{
+ float_status tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+
+ return float32_to_float64(xb >> 32, &tstat);
+}
/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
* op - instruction mnemonic
* ttp - target type (int32, uint32, int64 or uint64)
* sfld - source vsr_t field
* tfld - target vsr_t field
- * jdef - definition of the j index (i or 2*i)
* rnan - resulting NaN
*/
-#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, jdef, rnan) \
+#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = jdef; \
if (unlikely(stp##_is_any_nan(xb.sfld))) { \
if (stp##_is_signaling_nan(xb.sfld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
xt.tfld = rnan; \
} else { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
+ &env->fp_status); \
if (env->fp_status.float_exception_flags & float_flag_invalid) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
} \
helper_float_check_status(env); \
}
-VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, f64[j], u64[i], i, \
- 0x8000000000000000ul)
-VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, f64[i], u32[j], \
- 2*i + JOFFSET, 0x80000000l)
-VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, f64[j], u64[i], i, 0ul)
-VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, f64[i], u32[j], \
- 2*i + JOFFSET, 0)
-VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, f64[j], u64[i], i, \
- 0x8000000000000000ul)
-VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, f64[i], u32[j], \
- 2*i + JOFFSET, 0x80000000l)
-VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, f64[j], u64[i], i, 0ul)
-VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, f64[i], u32[j], \
- 2*i + JOFFSET, 0)
-VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, f32[j], u64[i], \
- 2*i + JOFFSET, 0x8000000000000000ul)
-VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, f32[j], u32[j], i, \
- 0x80000000l)
-VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, f32[j], u64[i], \
- 2*i + JOFFSET, 0ul)
-VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, f32[j], u32[i], i, 0)
+VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
+VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
+VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
* op - instruction mnemonic
* jdef - definition of the j index (i or 2*i)
* sfprf - set FPRF
*/
-#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf) \
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = jdef; \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ if (r2sp) { \
+ xt.tfld = helper_frsp(env, xt.tfld); \
+ } \
if (sfprf) { \
- helper_compute_fprf(env, xt.tfld, sfprf); \
+ helper_compute_fprf(env, xt.tfld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1)
-VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1)
-VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0)
-VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \
- 2*i + JOFFSET, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \
- 2*i + JOFFSET, 0)
-VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \
- 2*i + JOFFSET, 0)
-VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \
- 2*i + JOFFSET, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0)
+VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
/* For "use current rounding mode", define a value that will not be one of
* the existing rounding model enums.
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* rmode - rounding mode
* sfprf - set FPRF
*/
} \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
- xt.fld[i] = tp##_snan_to_qnan(xb.fld[i]); \
+ xt.fld = tp##_snan_to_qnan(xb.fld); \
} else { \
- xt.fld[i] = tp##_round_to_int(xb.fld[i], &env->fp_status); \
+ xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
} \
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld); \
} \
} \
\
helper_float_check_status(env); \
}
-VSX_ROUND(xsrdpi, 1, float64, f64, float_round_nearest_even, 1)
-VSX_ROUND(xsrdpic, 1, float64, f64, FLOAT_ROUND_CURRENT, 1)
-VSX_ROUND(xsrdpim, 1, float64, f64, float_round_down, 1)
-VSX_ROUND(xsrdpip, 1, float64, f64, float_round_up, 1)
-VSX_ROUND(xsrdpiz, 1, float64, f64, float_round_to_zero, 1)
+VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_nearest_even, 1)
+VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
+VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
+VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
+VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
-VSX_ROUND(xvrdpi, 2, float64, f64, float_round_nearest_even, 0)
-VSX_ROUND(xvrdpic, 2, float64, f64, FLOAT_ROUND_CURRENT, 0)
-VSX_ROUND(xvrdpim, 2, float64, f64, float_round_down, 0)
-VSX_ROUND(xvrdpip, 2, float64, f64, float_round_up, 0)
-VSX_ROUND(xvrdpiz, 2, float64, f64, float_round_to_zero, 0)
+VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
+VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
+VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
-VSX_ROUND(xvrspi, 4, float32, f32, float_round_nearest_even, 0)
-VSX_ROUND(xvrspic, 4, float32, f32, FLOAT_ROUND_CURRENT, 0)
-VSX_ROUND(xvrspim, 4, float32, f32, float_round_down, 0)
-VSX_ROUND(xvrspip, 4, float32, f32, float_round_up, 0)
-VSX_ROUND(xvrspiz, 4, float32, f32, float_round_to_zero, 0)
+VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
+VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
+VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
+
+uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
+{
+ helper_reset_fpstatus(env);
+
+ uint64_t xt = helper_frsp(env, xb);
+
+ helper_compute_fprf(env, xt);
+ helper_float_check_status(env);
+ return xt;
+}