]> Git Repo - qemu.git/blobdiff - cpu-exec.c
Add loop device ioctls (Gary Thomas).
[qemu.git] / cpu-exec.c
index 1c7356a0df55b01d68fb7d55126d25750a465083..25dcf2b12517e76f63955a4e6ef9a927a389a8f1 100644 (file)
@@ -40,6 +40,52 @@ int tb_invalidated_flag;
 //#define DEBUG_EXEC
 //#define DEBUG_SIGNAL
 
+#define SAVE_GLOBALS()
+#define RESTORE_GLOBALS()
+
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
+#include <features.h>
+#if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
+                           ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
+// Work around ugly bugs in glibc that mangle global register contents
+
+static volatile void *saved_env;
+static volatile unsigned long saved_t0, saved_i7;
+#undef SAVE_GLOBALS
+#define SAVE_GLOBALS() do {                                     \
+        saved_env = env;                                        \
+        saved_t0 = T0;                                          \
+        asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7));     \
+    } while(0)
+
+#undef RESTORE_GLOBALS
+#define RESTORE_GLOBALS() do {                                  \
+        env = (void *)saved_env;                                \
+        T0 = saved_t0;                                          \
+        asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7));     \
+    } while(0)
+
+static int sparc_setjmp(jmp_buf buf)
+{
+    int ret;
+
+    SAVE_GLOBALS();
+    ret = setjmp(buf);
+    RESTORE_GLOBALS();
+    return ret;
+}
+#undef setjmp
+#define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
+
+static void sparc_longjmp(jmp_buf buf, int val)
+{
+    SAVE_GLOBALS();
+    longjmp(buf, val);
+}
+#define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
+#endif
+#endif
+
 void cpu_loop_exit(void)
 {
     /* NOTE: the register at this point must be saved by hand because
@@ -74,7 +120,6 @@ void cpu_resume_from_signal(CPUState *env1, void *puc)
     longjmp(env->jmp_env, 1);
 }
 
-
 static TranslationBlock *tb_find_slow(target_ulong pc,
                                       target_ulong cs_base,
                                       uint64_t flags)
@@ -133,7 +178,9 @@ static TranslationBlock *tb_find_slow(target_ulong pc,
     tb->tc_ptr = tc_ptr;
     tb->cs_base = cs_base;
     tb->flags = flags;
-    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
+    SAVE_GLOBALS();
+    cpu_gen_code(env, tb, &code_gen_size);
+    RESTORE_GLOBALS();
     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
 
     /* check next page if needed */
@@ -202,8 +249,8 @@ static inline TranslationBlock *tb_find_fast(void)
     cs_base = 0;
     pc = env->pc;
 #elif defined(TARGET_SH4)
-    flags = env->sr & (SR_MD | SR_RB);
-    cs_base = 0;         /* XXXXX */
+    flags = env->flags;
+    cs_base = 0;
     pc = env->pc;
 #elif defined(TARGET_ALPHA)
     flags = env->ps;
@@ -232,6 +279,7 @@ static inline TranslationBlock *tb_find_fast(void)
     return tb;
 }
 
+#define BREAK_CHAIN T0 = 0
 
 /* main execution loop */
 
@@ -243,13 +291,9 @@ int cpu_exec(CPUState *env1)
 #if defined(reg_REGWPTR)
     uint32_t *saved_regwptr;
 #endif
-#endif
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-    int saved_i7;
-    target_ulong tmp_T0;
 #endif
     int ret, interrupt_request;
-    void (*gen_func)(void);
+    long (*gen_func)(void);
     TranslationBlock *tb;
     uint8_t *tc_ptr;
 
@@ -262,10 +306,7 @@ int cpu_exec(CPUState *env1)
 #define SAVE_HOST_REGS 1
 #include "hostregs_helper.h"
     env = env1;
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-    /* we also save i7 because longjmp may not restore it */
-    asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
-#endif
+    SAVE_GLOBALS();
 
     env_to_regs();
 #if defined(TARGET_I386)
@@ -375,10 +416,7 @@ int cpu_exec(CPUState *env1)
 
             T0 = 0; /* force lookup of first TB */
             for(;;) {
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                /* g1 can be modified by some libc? functions */
-                tmp_T0 = T0;
-#endif
+                SAVE_GLOBALS();
                 interrupt_request = env->interrupt_request;
                 if (__builtin_expect(interrupt_request, 0)
 #if defined(TARGET_I386)
@@ -405,11 +443,13 @@ int cpu_exec(CPUState *env1)
                         svm_check_intercept(SVM_EXIT_SMI);
                         env->interrupt_request &= ~CPU_INTERRUPT_SMI;
                         do_smm_enter();
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        BREAK_CHAIN;
+                    } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+                        !(env->hflags & HF_NMI_MASK)) {
+                        env->interrupt_request &= ~CPU_INTERRUPT_NMI;
+                        env->hflags |= HF_NMI_MASK;
+                        do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
+                        BREAK_CHAIN;
                     } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
                         (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
                         !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
@@ -423,11 +463,7 @@ int cpu_exec(CPUState *env1)
                         do_interrupt(intno, 0, 0, 0, 1);
                         /* ensure that no TB jump will be modified as
                            the program flow was changed */
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        BREAK_CHAIN;
 #if !defined(CONFIG_USER_ONLY)
                     } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
                         (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
@@ -441,11 +477,7 @@ int cpu_exec(CPUState *env1)
                         do_interrupt(intno, 0, 0, -1, 1);
                          stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
                                   ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                         tmp_T0 = 0;
-#else
-                         T0 = 0;
-#endif
+                        BREAK_CHAIN;
 #endif
                     }
 #elif defined(TARGET_PPC)
@@ -458,11 +490,7 @@ int cpu_exec(CPUState *env1)
                         ppc_hw_interrupt(env);
                         if (env->pending_interrupts == 0)
                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        BREAK_CHAIN;
                     }
 #elif defined(TARGET_MIPS)
                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -475,11 +503,7 @@ int cpu_exec(CPUState *env1)
                         env->exception_index = EXCP_EXT_INTERRUPT;
                         env->error_code = 0;
                         do_interrupt(env);
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        BREAK_CHAIN;
                     }
 #elif defined(TARGET_SPARC)
                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -496,11 +520,7 @@ int cpu_exec(CPUState *env1)
 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
                             cpu_check_irqs(env);
 #endif
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                            tmp_T0 = 0;
-#else
-                            T0 = 0;
-#endif
+                        BREAK_CHAIN;
                        }
                    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
                        //do_interrupt(0, 0, 0, 0, 0);
@@ -511,6 +531,7 @@ int cpu_exec(CPUState *env1)
                         && !(env->uncached_cpsr & CPSR_F)) {
                         env->exception_index = EXCP_FIQ;
                         do_interrupt(env);
+                        BREAK_CHAIN;
                     }
                     /* ARMv7-M interrupt return works by loading a magic value
                        into the PC.  On real hardware the load causes the
@@ -526,17 +547,22 @@ int cpu_exec(CPUState *env1)
                             || !(env->uncached_cpsr & CPSR_I))) {
                         env->exception_index = EXCP_IRQ;
                         do_interrupt(env);
+                        BREAK_CHAIN;
                     }
 #elif defined(TARGET_SH4)
-                   /* XXXXX */
+                    if (interrupt_request & CPU_INTERRUPT_HARD) {
+                        do_interrupt(env);
+                        BREAK_CHAIN;
+                    }
 #elif defined(TARGET_ALPHA)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         do_interrupt(env);
+                        BREAK_CHAIN;
                     }
 #elif defined(TARGET_CRIS)
                     if (interrupt_request & CPU_INTERRUPT_HARD) {
                         do_interrupt(env);
-                       env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+                        BREAK_CHAIN;
                     }
 #elif defined(TARGET_M68K)
                     if (interrupt_request & CPU_INTERRUPT_HARD
@@ -549,6 +575,7 @@ int cpu_exec(CPUState *env1)
                            first signalled.  */
                         env->exception_index = env->pending_vector;
                         do_interrupt(1);
+                        BREAK_CHAIN;
                     }
 #endif
                    /* Don't use the cached interupt_request value,
@@ -557,11 +584,7 @@ int cpu_exec(CPUState *env1)
                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
                         /* ensure that no TB jump will be modified as
                            the program flow was changed */
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                        tmp_T0 = 0;
-#else
-                        T0 = 0;
-#endif
+                        BREAK_CHAIN;
                     }
                     if (interrupt_request & CPU_INTERRUPT_EXIT) {
                         env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
@@ -612,9 +635,7 @@ int cpu_exec(CPUState *env1)
                             lookup_symbol(tb->pc));
                 }
 #endif
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-                T0 = tmp_T0;
-#endif
+                RESTORE_GLOBALS();
                 /* see if we can patch the calling TB. When the TB
                    spans two pages, we cannot safely do a direct
                    jump. */
@@ -642,6 +663,17 @@ int cpu_exec(CPUState *env1)
                                        "o0", "o1", "o2", "o3", "o4", "o5",
                                        "l0", "l1", "l2", "l3", "l4", "l5",
                                        "l6", "l7");
+#elif defined(__hppa__)
+                asm volatile ("ble  0(%%sr4,%1)\n"
+                              "copy %%r31,%%r18\n"
+                              "copy %%r28,%0\n"
+                              : "=r" (T0)
+                              : "r" (gen_func)
+                              : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+                                "r8", "r9", "r10", "r11", "r12", "r13",
+                                "r18", "r19", "r20", "r21", "r22", "r23",
+                                "r24", "r25", "r26", "r27", "r28", "r29",
+                                "r30", "r31");
 #elif defined(__arm__)
                 asm volatile ("mov pc, %0\n\t"
                               ".global exec_loop\n\t"
@@ -659,7 +691,7 @@ int cpu_exec(CPUState *env1)
                fp.gp = code_gen_buffer + 2 * (1 << 20);
                (*(void (*)(void)) &fp)();
 #else
-                gen_func();
+                T0 = gen_func();
 #endif
                 env->current_tb = NULL;
                 /* reset soft MMU for next block (it can currently
@@ -710,9 +742,7 @@ int cpu_exec(CPUState *env1)
 #endif
 
     /* restore global registers */
-#if defined(__sparc__) && !defined(HOST_SOLARIS)
-    asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
-#endif
+    RESTORE_GLOBALS();
 #include "hostregs_helper.h"
 
     /* fail safe : never use cpu_single_env outside cpu_exec() */
@@ -869,6 +899,8 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
        do it (XXX: use sigsetjmp) */
     sigprocmask(SIG_SETMASK, old_set, NULL);
     cpu_loop_exit();
+    /* never comes here */
+    return 1;
 }
 #elif defined(TARGET_SPARC)
 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
@@ -905,6 +937,8 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
        do it (XXX: use sigsetjmp) */
     sigprocmask(SIG_SETMASK, old_set, NULL);
     cpu_loop_exit();
+    /* never comes here */
+    return 1;
 }
 #elif defined (TARGET_PPC)
 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
@@ -1167,10 +1201,6 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
            a virtual CPU fault */
         cpu_restore_state(tb, env, pc, puc);
     }
-#if 0
-        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
-               env->nip, env->error_code, tb);
-#endif
     /* we restore the process signal mask as the sigreturn should
        do it (XXX: use sigsetjmp) */
     sigprocmask(SIG_SETMASK, old_set, NULL);
@@ -1475,6 +1505,24 @@ int cpu_signal_handler(int host_signum, void *pinfo,
                              is_write, &uc->uc_sigmask, puc);
 }
 
+#elif defined(__hppa__)
+
+int cpu_signal_handler(int host_signum, void *pinfo,
+                       void *puc)
+{
+    struct siginfo *info = pinfo;
+    struct ucontext *uc = puc;
+    unsigned long pc;
+    int is_write;
+
+    pc = uc->uc_mcontext.sc_iaoq[0];
+    /* FIXME: compute is_write */
+    is_write = 0;
+    return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
+                             is_write,
+                             &uc->uc_sigmask, puc);
+}
+
 #else
 
 #error host CPU specific signal handler needed
This page took 0.033104 seconds and 4 git commands to generate.