//#define DEBUG_EXEC
//#define DEBUG_SIGNAL
-#if defined(TARGET_ARM) || defined(TARGET_SPARC)
+#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K)
/* XXX: unify with i386 target */
void cpu_loop_exit(void)
{
longjmp(env->jmp_env, 1);
}
#endif
-#ifndef TARGET_SPARC
+#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
#define reg_T2
#endif
/* cannot fail at this point */
tb = tb_alloc(pc);
/* don't forget to invalidate previous TB info */
- T0 = 0;
+ tb_invalidated_flag = 1;
}
tc_ptr = code_gen_ptr;
tb->tc_ptr = tc_ptr;
tb_link_phys(tb, phys_pc, phys_page2);
found:
- if (tb_invalidated_flag) {
- /* as some TB could have been invalidated because
- of memory exceptions while generating the code, we
- must recompute the hash index here */
- T0 = 0;
- }
/* we add the TB in the virtual pc hash table */
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
spin_unlock(&tb_lock);
| (env->vfp.vec_stride << 4);
if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
flags |= (1 << 6);
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
+ flags |= (1 << 7);
cs_base = 0;
pc = env->regs[15];
#elif defined(TARGET_SPARC)
#ifdef TARGET_SPARC64
- flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
+ // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
+ flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
+ | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
#else
- flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);
+ // FPU enable . MMU enabled . MMU no-fault . Supervisor
+ flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
+ | env->psrs;
#endif
cs_base = env->npc;
pc = env->pc;
cs_base = 0;
pc = env->nip;
#elif defined(TARGET_MIPS)
- flags = env->hflags & (MIPS_HFLAGS_TMASK | MIPS_HFLAG_BMASK);
+ flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cs_base = 0;
pc = env->PC;
+#elif defined(TARGET_M68K)
+ flags = env->fpcr & M68K_FPCR_PREC;
+ cs_base = 0;
+ pc = env->pc;
+#elif defined(TARGET_SH4)
+ flags = env->sr & (SR_MD | SR_RB);
+ cs_base = 0; /* XXXXX */
+ pc = env->pc;
#else
#error unsupported CPU
#endif
if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags, 0)) {
tb = tb_find_slow(pc, cs_base, flags);
+ /* Note: we do it here to avoid a gcc bug on Mac OS X when
+ doing it in tb_find_slow */
+ if (tb_invalidated_flag) {
+ /* as some TB could have been invalidated because
+ of memory exceptions while generating the code, we
+ must recompute the hash index here */
+ T0 = 0;
+ }
}
return tb;
}
uint32_t *saved_regwptr;
#endif
#endif
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
int saved_i7, tmp_T0;
#endif
int ret, interrupt_request;
return EXCP_HALTED;
}
}
+#elif defined(TARGET_SPARC)
+ if (env1->halted) {
+ if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env1->psret != 0)) {
+ env1->halted = 0;
+ } else {
+ return EXCP_HALTED;
+ }
+ }
#elif defined(TARGET_ARM)
if (env1->halted) {
/* An interrupt wakes the CPU even if the I and F CPSR bits are
#if defined(reg_T2)
saved_T2 = T2;
#endif
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
/* we also save i7 because longjmp may not restore it */
asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
#endif
saved_regwptr = REGWPTR;
#endif
#elif defined(TARGET_PPC)
+#elif defined(TARGET_M68K)
+ env->cc_op = CC_OP_FLAGS;
+ env->cc_dest = env->sr & 0xf;
+ env->cc_x = (env->sr >> 4) & 1;
#elif defined(TARGET_MIPS)
+#elif defined(TARGET_SH4)
+ /* XXXXX */
#else
#error unsupported target CPU
#endif
do_interrupt(env->exception_index);
#elif defined(TARGET_ARM)
do_interrupt(env);
+#elif defined(TARGET_SH4)
+ do_interrupt(env);
#endif
}
env->exception_index = -1;
T0 = 0; /* force lookup of first TB */
for(;;) {
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
/* g1 can be modified by some libc? functions */
tmp_T0 = T0;
#endif
interrupt_request = env->interrupt_request;
if (__builtin_expect(interrupt_request, 0)) {
#if defined(TARGET_I386)
- /* if hardware interrupt pending, we execute it */
- if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ env->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter();
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
+ tmp_T0 = 0;
+#else
+ T0 = 0;
+#endif
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
do_interrupt(intno, 0, 0, 0, 1);
/* ensure that no TB jump will be modified as
the program flow was changed */
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
env->error_code = 0;
do_interrupt(env);
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
env->error_code = 0;
do_interrupt(env);
env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
env->error_code = 0;
do_interrupt(env);
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
do_interrupt(env->interrupt_index);
env->interrupt_index = 0;
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
} else if (interrupt_request & CPU_INTERRUPT_TIMER) {
//do_interrupt(0, 0, 0, 0, 0);
env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
- }
+ } else if (interrupt_request & CPU_INTERRUPT_HALT) {
+ env->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ env->halted = 1;
+ env->exception_index = EXCP_HLT;
+ cpu_loop_exit();
+ }
#elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ
&& !(env->uncached_cpsr & CPSR_F)) {
env->exception_index = EXCP_IRQ;
do_interrupt(env);
}
+#elif defined(TARGET_SH4)
+ /* XXXXX */
#endif
+ /* Don't use the cached interupt_request value,
+ do_interrupt may have updated the EXITTB flag. */
if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
cpu_dump_state(env, logfile, fprintf, 0);
#elif defined(TARGET_PPC)
cpu_dump_state(env, logfile, fprintf, 0);
+#elif defined(TARGET_M68K)
+ cpu_m68k_flush_flags(env, env->cc_op);
+ env->cc_op = CC_OP_FLAGS;
+ env->sr = (env->sr & 0xffe0)
+ | env->cc_dest | (env->cc_x << 4);
+ cpu_dump_state(env, logfile, fprintf, 0);
#elif defined(TARGET_MIPS)
cpu_dump_state(env, logfile, fprintf, 0);
+#elif defined(TARGET_SH4)
+ cpu_dump_state(env, logfile, fprintf, 0);
#else
#error unsupported target CPU
#endif
lookup_symbol(tb->pc));
}
#endif
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
T0 = tmp_T0;
#endif
/* see if we can patch the calling TB. When the TB
jump. */
{
if (T0 != 0 &&
+#if USE_KQEMU
+ (env->kqemu_enabled != 2) &&
+#endif
tb->page_addr[1] == -1
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
&& (tb->cflags & CF_CODE_COPY) ==
"mov %%o7,%%i0"
: /* no outputs */
: "r" (gen_func)
- : "i0", "i1", "i2", "i3", "i4", "i5");
+ : "i0", "i1", "i2", "i3", "i4", "i5",
+ "l0", "l1", "l2", "l3", "l4", "l5",
+ "l6", "l7");
#elif defined(__arm__)
asm volatile ("mov pc, %0\n\t"
".global exec_loop\n\t"
/* do not allow linking to another block */
T0 = 0;
}
+#endif
+#if defined(USE_KQEMU)
+#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
+ if (kqemu_is_ok(env) &&
+ (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
+ cpu_loop_exit();
+ }
#endif
}
} else {
REGWPTR = saved_regwptr;
#endif
#elif defined(TARGET_PPC)
+#elif defined(TARGET_M68K)
+ cpu_m68k_flush_flags(env, env->cc_op);
+ env->cc_op = CC_OP_FLAGS;
+ env->sr = (env->sr & 0xffe0)
+ | env->cc_dest | (env->cc_x << 4);
#elif defined(TARGET_MIPS)
+#elif defined(TARGET_SH4)
+ /* XXXXX */
#else
#error unsupported target CPU
#endif
-#ifdef __sparc__
+#if defined(__sparc__) && !defined(HOST_SOLARIS)
asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
#endif
T0 = saved_T0;
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
- if (is_write && page_unprotect(address, pc, puc)) {
+ if (is_write && page_unprotect(h2g(address), pc, puc)) {
return 1;
}
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
- if (is_write && page_unprotect(address, pc, puc)) {
+ if (is_write && page_unprotect(h2g(address), pc, puc)) {
return 1;
}
/* see if it is an MMU fault */
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
- if (is_write && page_unprotect(address, pc, puc)) {
+ if (is_write && page_unprotect(h2g(address), pc, puc)) {
return 1;
}
/* see if it is an MMU fault */
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
- if (is_write && page_unprotect(address, pc, puc)) {
+ if (is_write && page_unprotect(h2g(address), pc, puc)) {
return 1;
}
return 1;
}
+#elif defined(TARGET_M68K)
+static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
+ int is_write, sigset_t *old_set,
+ void *puc)
+{
+ TranslationBlock *tb;
+ int ret;
+
+ if (cpu_single_env)
+ env = cpu_single_env; /* XXX: find a correct solution for multithread */
+#if defined(DEBUG_SIGNAL)
+ printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
+ pc, address, is_write, *(unsigned long *)old_set);
+#endif
+ /* XXX: locking issue */
+ if (is_write && page_unprotect(address, pc, puc)) {
+ return 1;
+ }
+ /* see if it is an MMU fault */
+ ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
+ if (ret < 0)
+ return 0; /* not an MMU fault */
+ if (ret == 0)
+ return 1; /* the MMU fault was handled without causing real CPU fault */
+ /* now we have a real cpu fault */
+ tb = tb_find_pc(pc);
+ if (tb) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, pc, puc);
+ }
+ /* we restore the process signal mask as the sigreturn should
+ do it (XXX: use sigsetjmp) */
+ sigprocmask(SIG_SETMASK, old_set, NULL);
+ cpu_loop_exit();
+ /* never comes here */
+ return 1;
+}
+
#elif defined (TARGET_MIPS)
static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
int is_write, sigset_t *old_set,
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
- if (is_write && page_unprotect(address, pc, puc)) {
+ if (is_write && page_unprotect(h2g(address), pc, puc)) {
return 1;
}
return 1;
}
+#elif defined (TARGET_SH4)
+static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
+ int is_write, sigset_t *old_set,
+ void *puc)
+{
+ TranslationBlock *tb;
+ int ret;
+
+ if (cpu_single_env)
+ env = cpu_single_env; /* XXX: find a correct solution for multithread */
+#if defined(DEBUG_SIGNAL)
+ printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
+ pc, address, is_write, *(unsigned long *)old_set);
+#endif
+ /* XXX: locking issue */
+ if (is_write && page_unprotect(h2g(address), pc, puc)) {
+ return 1;
+ }
+
+ /* see if it is an MMU fault */
+ ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
+ if (ret < 0)
+ return 0; /* not an MMU fault */
+ if (ret == 0)
+ return 1; /* the MMU fault was handled without causing real CPU fault */
+
+ /* now we have a real cpu fault */
+ tb = tb_find_pc(pc);
+ if (tb) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, pc, puc);
+ }
+#if 0
+ printf("PF exception: NIP=0x%08x error=0x%x %p\n",
+ env->nip, env->error_code, tb);
+#endif
+ /* we restore the process signal mask as the sigreturn should
+ do it (XXX: use sigsetjmp) */
+ sigprocmask(SIG_SETMASK, old_set, NULL);
+ cpu_loop_exit();
+ /* never comes here */
+ return 1;
+}
#else
#error unsupported target CPU
#endif
is_write = 0;
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
- &uc->uc_sigmask);
+ &uc->uc_sigmask, puc);
}
#elif defined(__mc68000)
#ifndef __ISR_VALID
/* This ought to be in <bits/siginfo.h>... */
# define __ISR_VALID 1
-# define si_flags _sifields._sigfault._si_pad0
#endif
int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc)
case SIGSEGV:
case SIGBUS:
case SIGTRAP:
- if (info->si_code && (info->si_flags & __ISR_VALID))
+ if (info->si_code && (info->si_segvflags & __ISR_VALID))
/* ISR.W (write-access) is bit 33: */
is_write = (info->si_isr >> 33) & 1;
break;