]> Git Repo - qemu.git/blobdiff - target-mips/op.c
MIPS FPU support (Marius Goeger)
[qemu.git] / target-mips / op.c
index 3f9b364620d22c1e07e621bcadde43a8892f4ed1..bc7f81949f5c3d1b7698c7d8ab403f0458d44c10 100644 (file)
@@ -2,6 +2,7 @@
  *  MIPS emulation micro-operations for qemu.
  * 
  *  Copyright (c) 2004-2005 Jocelyn Mayer
+ *  Copyright (c) 2006 Marius Groeger (FPU operations)
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -149,6 +150,143 @@ CALL_FROM_TB2(func, arg0, arg1);
 #include "op_template.c"
 #undef TN
 
+#ifdef MIPS_USES_FPU
+
+#define SFREG 0
+#define DFREG 0
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 1
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 2
+#define DFREG 2
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 3
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 4
+#define DFREG 4
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 5
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 6
+#define DFREG 6
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 7
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 8
+#define DFREG 8
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 9
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 10
+#define DFREG 10
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 11
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 12
+#define DFREG 12
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 13
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 14
+#define DFREG 14
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 15
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 16
+#define DFREG 16
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 17
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 18
+#define DFREG 18
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 19
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 20
+#define DFREG 20
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 21
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 22
+#define DFREG 22
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 23
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 24
+#define DFREG 24
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 25
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 26
+#define DFREG 26
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 27
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 28
+#define DFREG 28
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 29
+#include "fop_template.c"
+#undef SFREG
+#define SFREG 30
+#define DFREG 30
+#include "fop_template.c"
+#undef SFREG
+#undef DFREG
+#define SFREG 31
+#include "fop_template.c"
+#undef SFREG
+
+#define FTN
+#include "fop_template.c"
+#undef FTN
+
+#endif
+
 void op_dup_T0 (void)
 {
     T2 = T0;
@@ -562,6 +700,353 @@ void op_mtc0 (void)
     RETURN();
 }
 
+#ifdef MIPS_USES_FPU
+
+#if 0
+# define DEBUG_FPU_STATE() CALL_FROM_TB1(dump_fpu, env)
+#else
+# define DEBUG_FPU_STATE() do { } while(0)
+#endif
+
+void op_cp1_enabled(void)
+{
+    if (!(env->CP0_Status & (1 << CP0St_CU1))) {
+        CALL_FROM_TB2(do_raise_exception_err, EXCP_CpU, 1);
+    }
+    RETURN();
+}
+
+/* CP1 functions */
+void op_cfc1 (void)
+{
+    if (T1 == 0) {
+        T0 = env->fcr0;
+    }
+    else {
+        /* fetch fcr31, masking unused bits */
+        T0 = env->fcr31 & 0x0183FFFF;
+    }
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+/* convert MIPS rounding mode in FCR31 to IEEE library */
+unsigned int ieee_rm[] = { 
+    float_round_nearest_even,
+    float_round_to_zero,
+    float_round_up,
+    float_round_down
+};
+
+#define RESTORE_ROUNDING_MODE \
+    set_float_rounding_mode(ieee_rm[env->fcr31 & 3], &env->fp_status)
+
+void op_ctc1 (void)
+{
+    if (T1 == 0) {
+        /* XXX should this throw an exception?
+         * don't write to FCR0.
+         * env->fcr0 = T0; 
+         */
+    }
+    else {
+        /* store new fcr31, masking unused bits */  
+        env->fcr31 = T0 & 0x0183FFFF;
+
+        /* set rounding mode */
+        RESTORE_ROUNDING_MODE;
+
+#ifndef CONFIG_SOFTFLOAT
+        /* no floating point exception for native float */
+        SET_FP_ENABLE(env->fcr31, 0);
+#endif
+    }
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+void op_mfc1 (void)
+{
+    T0 = WT0;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+void op_mtc1 (void)
+{
+    WT0 = T0;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+/* Float support.
+   Single precition routines have a "s" suffix, double precision a
+   "d" suffix.  */
+
+#define FLOAT_OP(name, p) void OPPROTO op_float_##name##_##p(void)
+
+FLOAT_OP(cvtd, w)
+{
+    FDT2 = int32_to_float64(WT0, &env->fp_status);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(cvts, w)
+{
+    FST2 = int32_to_float32(WT0, &env->fp_status);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(cvtw, s)
+{
+    WT2 = float32_to_int32(FST0, &env->fp_status);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(cvtw, d)
+{
+    WT2 = float64_to_int32(FDT0, &env->fp_status);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+FLOAT_OP(roundw, d)
+{
+    set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+    WT2 = float64_round_to_int(FDT0, &env->fp_status);
+    RESTORE_ROUNDING_MODE;
+
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(roundw, s)
+{
+    set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+    WT2 = float32_round_to_int(FST0, &env->fp_status);
+    RESTORE_ROUNDING_MODE;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+FLOAT_OP(truncw, d)
+{
+    WT2 = float64_to_int32_round_to_zero(FDT0, &env->fp_status);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(truncw, s)
+{
+    WT2 = float32_to_int32_round_to_zero(FST0, &env->fp_status);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+FLOAT_OP(ceilw, d)
+{
+    set_float_rounding_mode(float_round_up, &env->fp_status);
+    WT2 = float64_round_to_int(FDT0, &env->fp_status);
+    RESTORE_ROUNDING_MODE;
+
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(ceilw, s)
+{
+    set_float_rounding_mode(float_round_up, &env->fp_status);
+    WT2 = float32_round_to_int(FST0, &env->fp_status);
+    RESTORE_ROUNDING_MODE;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+FLOAT_OP(floorw, d)
+{
+    set_float_rounding_mode(float_round_down, &env->fp_status);
+    WT2 = float64_round_to_int(FDT0, &env->fp_status);
+    RESTORE_ROUNDING_MODE;
+
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(floorw, s)
+{
+    set_float_rounding_mode(float_round_down, &env->fp_status);
+    WT2 = float32_round_to_int(FST0, &env->fp_status);
+    RESTORE_ROUNDING_MODE;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+/* binary operations */
+#define FLOAT_BINOP(name) \
+FLOAT_OP(name, d)         \
+{                         \
+    FDT2 = float64_ ## name (FDT0, FDT1, &env->fp_status);    \
+    DEBUG_FPU_STATE();    \
+}                         \
+FLOAT_OP(name, s)         \
+{                         \
+    FST2 = float32_ ## name (FST0, FST1, &env->fp_status);    \
+    DEBUG_FPU_STATE();    \
+}
+FLOAT_BINOP(add)
+FLOAT_BINOP(sub)
+FLOAT_BINOP(mul)
+FLOAT_BINOP(div)
+#undef FLOAT_BINOP
+
+/* unary operations, modifying fp status  */
+#define FLOAT_UNOP(name)  \
+FLOAT_OP(name, d)         \
+{                         \
+    FDT2 = float64_ ## name(FDT0, &env->fp_status);   \
+    DEBUG_FPU_STATE();    \
+}                         \
+FLOAT_OP(name, s)         \
+{                         \
+    FST2 = float32_ ## name(FST0, &env->fp_status);   \
+    DEBUG_FPU_STATE();    \
+}
+FLOAT_UNOP(sqrt)
+#undef FLOAT_UNOP
+
+/* unary operations, not modifying fp status  */
+#define FLOAT_UNOP(name)  \
+FLOAT_OP(name, d)         \
+{                         \
+    FDT2 = float64_ ## name(FDT0);   \
+    DEBUG_FPU_STATE();    \
+}                         \
+FLOAT_OP(name, s)         \
+{                         \
+    FST2 = float32_ ## name(FST0);   \
+    DEBUG_FPU_STATE();    \
+}
+FLOAT_UNOP(abs)
+FLOAT_UNOP(chs)
+#undef FLOAT_UNOP
+
+FLOAT_OP(mov, d)
+{
+    FDT2 = FDT0;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+FLOAT_OP(mov, s)
+{
+    FST2 = FST0;
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+#ifdef CONFIG_SOFTFLOAT
+#define clear_invalid() do {                                \
+    int flags = get_float_exception_flags(&env->fp_status); \
+    flags &= ~float_flag_invalid;                           \
+    set_float_exception_flags(flags, &env->fp_status);      \
+} while(0)
+#else
+#define clear_invalid() do { } while(0)
+#endif
+
+extern void dump_fpu_s(CPUState *env);
+
+#define FOP_COND(fmt, op, sig, cond)           \
+void op_cmp_ ## fmt ## _ ## op (void)          \
+{                                              \
+    if (cond)                                  \
+        SET_FP_COND(env->fcr31);               \
+    else                                       \
+        CLEAR_FP_COND(env->fcr31);             \
+    if (!sig)                                  \
+        clear_invalid();                       \
+    /*CALL_FROM_TB1(dump_fpu_s, env);*/ \
+    DEBUG_FPU_STATE();                         \
+    RETURN();                                  \
+}
+
+flag float64_is_unordered(float64 a, float64 b STATUS_PARAM)
+{
+    extern flag float64_is_nan( float64 a );
+    if (float64_is_nan(a) || float64_is_nan(b)) {
+        float_raise(float_flag_invalid, status);
+        return 1;
+    }
+    else {
+        return 0;
+    }
+}
+
+FOP_COND(d, f,   0,                                                      0) 
+FOP_COND(d, un,  0, float64_is_unordered(FDT1, FDT0, &env->fp_status))
+FOP_COND(d, eq,  0,                                                      float64_eq(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, ueq, 0, float64_is_unordered(FDT1, FDT0, &env->fp_status) || float64_eq(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, olt, 0,                                                      float64_lt(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, ult, 0, float64_is_unordered(FDT1, FDT0, &env->fp_status) || float64_lt(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, ole, 0,                                                      float64_le(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, ule, 0, float64_is_unordered(FDT1, FDT0, &env->fp_status) || float64_le(FDT0, FDT1, &env->fp_status))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float*_is_unordered() is still called
+ */
+FOP_COND(d, sf,  1,                                                      (float64_is_unordered(FDT0, FDT1, &env->fp_status), 0))
+FOP_COND(d, ngle,1, float64_is_unordered(FDT1, FDT0, &env->fp_status))
+FOP_COND(d, seq, 1,                                                      float64_eq(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, ngl, 1, float64_is_unordered(FDT1, FDT0, &env->fp_status) || float64_eq(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, lt,  1,                                                      float64_lt(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, nge, 1, float64_is_unordered(FDT1, FDT0, &env->fp_status) || float64_lt(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, le,  1,                                                      float64_le(FDT0, FDT1, &env->fp_status))
+FOP_COND(d, ngt, 1, float64_is_unordered(FDT1, FDT0, &env->fp_status) || float64_le(FDT0, FDT1, &env->fp_status))
+
+flag float32_is_unordered(float32 a, float32 b STATUS_PARAM)
+{
+    extern flag float32_is_nan( float32 a );
+    if (float32_is_nan(a) || float32_is_nan(b)) {
+        float_raise(float_flag_invalid, status);
+        return 1;
+    }
+    else {
+        return 0;
+    }
+}
+
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float*_is_unordered() is still called
+ */
+FOP_COND(s, f,   0,                                                      0) 
+FOP_COND(s, un,  0, float32_is_unordered(FST1, FST0, &env->fp_status))
+FOP_COND(s, eq,  0,                                                      float32_eq(FST0, FST1, &env->fp_status))
+FOP_COND(s, ueq, 0, float32_is_unordered(FST1, FST0, &env->fp_status) || float32_eq(FST0, FST1, &env->fp_status))
+FOP_COND(s, olt, 0,                                                      float32_lt(FST0, FST1, &env->fp_status))
+FOP_COND(s, ult, 0, float32_is_unordered(FST1, FST0, &env->fp_status) || float32_lt(FST0, FST1, &env->fp_status))
+FOP_COND(s, ole, 0,                                                      float32_le(FST0, FST1, &env->fp_status))
+FOP_COND(s, ule, 0, float32_is_unordered(FST1, FST0, &env->fp_status) || float32_le(FST0, FST1, &env->fp_status))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float*_is_unordered() is still called
+ */
+FOP_COND(s, sf,  1,                                                      (float32_is_unordered(FST0, FST1, &env->fp_status), 0))
+FOP_COND(s, ngle,1, float32_is_unordered(FST1, FST0, &env->fp_status))
+FOP_COND(s, seq, 1,                                                      float32_eq(FST0, FST1, &env->fp_status))
+FOP_COND(s, ngl, 1, float32_is_unordered(FST1, FST0, &env->fp_status) || float32_eq(FST0, FST1, &env->fp_status))
+FOP_COND(s, lt,  1,                                                      float32_lt(FST0, FST1, &env->fp_status))
+FOP_COND(s, nge, 1, float32_is_unordered(FST1, FST0, &env->fp_status) || float32_lt(FST0, FST1, &env->fp_status))
+FOP_COND(s, le,  1,                                                      float32_le(FST0, FST1, &env->fp_status))
+FOP_COND(s, ngt, 1, float32_is_unordered(FST1, FST0, &env->fp_status) || float32_le(FST0, FST1, &env->fp_status))
+
+void op_bc1f (void)
+{
+    T0 = ! IS_FP_COND_SET(env->fcr31);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+
+void op_bc1t (void)
+{
+    T0 = IS_FP_COND_SET(env->fcr31);
+    DEBUG_FPU_STATE();
+    RETURN();
+}
+#endif /* MIPS_USES_FPU */
+
 #if defined(MIPS_USES_R4K_TLB)
 void op_tlbwi (void)
 {
This page took 0.035305 seconds and 4 git commands to generate.