]> Git Repo - qemu.git/blobdiff - target-sh4/op_helper.c
Merge remote branch 'linux-user/linux-user-for-upstream' into staging-tmp
[qemu.git] / target-sh4 / op_helper.c
index d3bde5c6492d2fa7ef2543aecc78b853f845118b..529df0ca9ed33b5b526ffc47cbdf4f0cc25f2689 100644 (file)
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  */
 #include <assert.h>
+#include <stdlib.h>
 #include "exec.h"
+#include "helper.h"
 
 #ifndef CONFIG_USER_ONLY
 
@@ -88,16 +89,29 @@ void helper_raise_slot_illegal_instruction(void)
     cpu_loop_exit();
 }
 
+void helper_raise_fpu_disable(void)
+{
+  env->exception_index = 0x800;
+  cpu_loop_exit();
+}
+
+void helper_raise_slot_fpu_disable(void)
+{
+  env->exception_index = 0x820;
+  cpu_loop_exit();
+}
+
 void helper_debug(void)
 {
     env->exception_index = EXCP_DEBUG;
     cpu_loop_exit();
 }
 
-void helper_sleep(void)
+void helper_sleep(uint32_t next_pc)
 {
     env->halted = 1;
     env->exception_index = EXCP_HLT;
+    env->pc = next_pc;
     cpu_loop_exit();
 }
 
@@ -108,6 +122,57 @@ void helper_trapa(uint32_t tra)
     cpu_loop_exit();
 }
 
+void helper_movcal(uint32_t address, uint32_t value)
+{
+    if (cpu_sh4_is_cached (env, address))
+    {
+       memory_content *r = malloc (sizeof(memory_content));
+       r->address = address;
+       r->value = value;
+       r->next = NULL;
+
+       *(env->movcal_backup_tail) = r;
+       env->movcal_backup_tail = &(r->next);
+    }
+}
+
+void helper_discard_movcal_backup(void)
+{
+    memory_content *current = env->movcal_backup;
+
+    while(current)
+    {
+       memory_content *next = current->next;
+       free (current);
+       env->movcal_backup = current = next;
+       if (current == NULL)
+           env->movcal_backup_tail = &(env->movcal_backup);
+    } 
+}
+
+void helper_ocbi(uint32_t address)
+{
+    memory_content **current = &(env->movcal_backup);
+    while (*current)
+    {
+       uint32_t a = (*current)->address;
+       if ((a & ~0x1F) == (address & ~0x1F))
+       {
+           memory_content *next = (*current)->next;
+           stl(a, (*current)->value);
+           
+           if (next == NULL)
+           {
+               env->movcal_backup_tail = current;
+           }
+
+           free (*current);
+           *current = next;
+           break;
+       }
+    }
+}
+
 uint32_t helper_addc(uint32_t arg0, uint32_t arg1)
 {
     uint32_t tmp0, tmp1;
@@ -366,6 +431,16 @@ uint32_t helper_subv(uint32_t arg0, uint32_t arg1)
     return arg1;
 }
 
+static inline void set_t(void)
+{
+    env->sr |= SR_T;
+}
+
+static inline void clr_t(void)
+{
+    env->sr &= ~SR_T;
+}
+
 void helper_ld_fpscr(uint32_t val)
 {
     env->fpscr = val & 0x003fffff;
@@ -374,3 +449,220 @@ void helper_ld_fpscr(uint32_t val)
     else
        set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
 }
+
+uint32_t helper_fabs_FT(uint32_t t0)
+{
+    CPU_FloatU f;
+    f.l = t0;
+    f.f = float32_abs(f.f);
+    return f.l;
+}
+
+uint64_t helper_fabs_DT(uint64_t t0)
+{
+    CPU_DoubleU d;
+    d.ll = t0;
+    d.d = float64_abs(d.d);
+    return d.ll;
+}
+
+uint32_t helper_fadd_FT(uint32_t t0, uint32_t t1)
+{
+    CPU_FloatU f0, f1;
+    f0.l = t0;
+    f1.l = t1;
+    f0.f = float32_add(f0.f, f1.f, &env->fp_status);
+    return f0.l;
+}
+
+uint64_t helper_fadd_DT(uint64_t t0, uint64_t t1)
+{
+    CPU_DoubleU d0, d1;
+    d0.ll = t0;
+    d1.ll = t1;
+    d0.d = float64_add(d0.d, d1.d, &env->fp_status);
+    return d0.ll;
+}
+
+void helper_fcmp_eq_FT(uint32_t t0, uint32_t t1)
+{
+    CPU_FloatU f0, f1;
+    f0.l = t0;
+    f1.l = t1;
+
+    if (float32_compare(f0.f, f1.f, &env->fp_status) == 0)
+       set_t();
+    else
+       clr_t();
+}
+
+void helper_fcmp_eq_DT(uint64_t t0, uint64_t t1)
+{
+    CPU_DoubleU d0, d1;
+    d0.ll = t0;
+    d1.ll = t1;
+
+    if (float64_compare(d0.d, d1.d, &env->fp_status) == 0)
+       set_t();
+    else
+       clr_t();
+}
+
+void helper_fcmp_gt_FT(uint32_t t0, uint32_t t1)
+{
+    CPU_FloatU f0, f1;
+    f0.l = t0;
+    f1.l = t1;
+
+    if (float32_compare(f0.f, f1.f, &env->fp_status) == 1)
+       set_t();
+    else
+       clr_t();
+}
+
+void helper_fcmp_gt_DT(uint64_t t0, uint64_t t1)
+{
+    CPU_DoubleU d0, d1;
+    d0.ll = t0;
+    d1.ll = t1;
+
+    if (float64_compare(d0.d, d1.d, &env->fp_status) == 1)
+       set_t();
+    else
+       clr_t();
+}
+
+uint64_t helper_fcnvsd_FT_DT(uint32_t t0)
+{
+    CPU_DoubleU d;
+    CPU_FloatU f;
+    f.l = t0;
+    d.d = float32_to_float64(f.f, &env->fp_status);
+    return d.ll;
+}
+
+uint32_t helper_fcnvds_DT_FT(uint64_t t0)
+{
+    CPU_DoubleU d;
+    CPU_FloatU f;
+    d.ll = t0;
+    f.f = float64_to_float32(d.d, &env->fp_status);
+    return f.l;
+}
+
+uint32_t helper_fdiv_FT(uint32_t t0, uint32_t t1)
+{
+    CPU_FloatU f0, f1;
+    f0.l = t0;
+    f1.l = t1;
+    f0.f = float32_div(f0.f, f1.f, &env->fp_status);
+    return f0.l;
+}
+
+uint64_t helper_fdiv_DT(uint64_t t0, uint64_t t1)
+{
+    CPU_DoubleU d0, d1;
+    d0.ll = t0;
+    d1.ll = t1;
+    d0.d = float64_div(d0.d, d1.d, &env->fp_status);
+    return d0.ll;
+}
+
+uint32_t helper_float_FT(uint32_t t0)
+{
+    CPU_FloatU f;
+    f.f = int32_to_float32(t0, &env->fp_status);
+    return f.l;
+}
+
+uint64_t helper_float_DT(uint32_t t0)
+{
+    CPU_DoubleU d;
+    d.d = int32_to_float64(t0, &env->fp_status);
+    return d.ll;
+}
+
+uint32_t helper_fmac_FT(uint32_t t0, uint32_t t1, uint32_t t2)
+{
+    CPU_FloatU f0, f1, f2;
+    f0.l = t0;
+    f1.l = t1;
+    f2.l = t2;
+    f0.f = float32_mul(f0.f, f1.f, &env->fp_status);
+    f0.f = float32_add(f0.f, f2.f, &env->fp_status);
+    return f0.l;
+}
+
+uint32_t helper_fmul_FT(uint32_t t0, uint32_t t1)
+{
+    CPU_FloatU f0, f1;
+    f0.l = t0;
+    f1.l = t1;
+    f0.f = float32_mul(f0.f, f1.f, &env->fp_status);
+    return f0.l;
+}
+
+uint64_t helper_fmul_DT(uint64_t t0, uint64_t t1)
+{
+    CPU_DoubleU d0, d1;
+    d0.ll = t0;
+    d1.ll = t1;
+    d0.d = float64_mul(d0.d, d1.d, &env->fp_status);
+    return d0.ll;
+}
+
+uint32_t helper_fneg_T(uint32_t t0)
+{
+    CPU_FloatU f;
+    f.l = t0;
+    f.f = float32_chs(f.f);
+    return f.l;
+}
+
+uint32_t helper_fsqrt_FT(uint32_t t0)
+{
+    CPU_FloatU f;
+    f.l = t0;
+    f.f = float32_sqrt(f.f, &env->fp_status);
+    return f.l;
+}
+
+uint64_t helper_fsqrt_DT(uint64_t t0)
+{
+    CPU_DoubleU d;
+    d.ll = t0;
+    d.d = float64_sqrt(d.d, &env->fp_status);
+    return d.ll;
+}
+
+uint32_t helper_fsub_FT(uint32_t t0, uint32_t t1)
+{
+    CPU_FloatU f0, f1;
+    f0.l = t0;
+    f1.l = t1;
+    f0.f = float32_sub(f0.f, f1.f, &env->fp_status);
+    return f0.l;
+}
+
+uint64_t helper_fsub_DT(uint64_t t0, uint64_t t1)
+{
+    CPU_DoubleU d0, d1;
+    d0.ll = t0;
+    d1.ll = t1;
+    d0.d = float64_sub(d0.d, d1.d, &env->fp_status);
+    return d0.ll;
+}
+
+uint32_t helper_ftrc_FT(uint32_t t0)
+{
+    CPU_FloatU f;
+    f.l = t0;
+    return float32_to_int32_round_to_zero(f.f, &env->fp_status);
+}
+
+uint32_t helper_ftrc_DT(uint64_t t0)
+{
+    CPU_DoubleU d;
+    d.ll = t0;
+    return float64_to_int32_round_to_zero(d.d, &env->fp_status);
+}
This page took 0.031646 seconds and 4 git commands to generate.