*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include "config.h"
#define CPU_NO_GLOBAL_REGS
#endif
}
#endif
+ env->exception_index = -1;
longjmp(env->jmp_env, 1);
}
CPUWatchpoint *wp;
if (!env->watchpoint_hit)
- for (wp = env->watchpoints; wp != NULL; wp = wp->next)
+ TAILQ_FOREACH(wp, &env->watchpoints, entry)
wp->flags &= ~BP_WATCHPOINT_HIT;
if (debug_excp_handler)
if (ret == EXCP_DEBUG)
cpu_handle_debug_exception(env);
break;
- } else if (env->user_mode_only) {
+ } else {
+#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
#endif
ret = env->exception_index;
break;
- } else {
+#else
#if defined(TARGET_I386)
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
do_interrupt(env);
#elif defined(TARGET_M68K)
do_interrupt(0);
+#endif
#endif
}
env->exception_index = -1;
svm_check_intercept(SVM_EXIT_INTR);
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
- if (loglevel & CPU_LOG_TB_IN_ASM) {
- fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
- }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
do_interrupt(intno, 0, 0, 0, 1);
/* ensure that no TB jump will be modified as
the program flow was changed */
int intno;
/* FIXME: this should respect TPR */
svm_check_intercept(SVM_EXIT_VINTR);
- env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
- if (loglevel & CPU_LOG_TB_IN_ASM)
- fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt(intno, 0, 0, 0, 1);
+ env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
next_tb = 0;
#endif
}
}
}
#ifdef DEBUG_EXEC
- if ((loglevel & CPU_LOG_TB_CPU)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
/* restore flags in standard format */
regs_to_env();
#if defined(TARGET_I386)
env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
- cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
+ log_cpu_state(env, X86_DUMP_CCOP);
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_ARM)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_SPARC)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_PPC)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_M68K)
cpu_m68k_flush_flags(env, env->cc_op);
env->cc_op = CC_OP_FLAGS;
env->sr = (env->sr & 0xffe0)
| env->cc_dest | (env->cc_x << 4);
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_MIPS)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_SH4)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_ALPHA)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#elif defined(TARGET_CRIS)
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#else
#error unsupported target CPU
#endif
tb_invalidated_flag = 0;
}
#ifdef DEBUG_EXEC
- if ((loglevel & CPU_LOG_EXEC)) {
- fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
- (long)tb->tc_ptr, tb->pc,
- lookup_symbol(tb->pc));
- }
+ qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
+ (long)tb->tc_ptr, tb->pc,
+ lookup_symbol(tb->pc));
#endif
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
- do_raise_exception_err(env->exception_index, env->error_code);
+ cpu_loop_exit();
} else {
/* activate soft MMU for this block */
cpu_resume_from_signal(env, puc);
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
- do_raise_exception_err(env->exception_index, env->error_code);
+ cpu_loop_exit();
} else {
/* activate soft MMU for this block */
cpu_resume_from_signal(env, puc);
#elif defined(__x86_64__)
+#ifdef __NetBSD__
+#define REG_ERR _REG_ERR
+#define REG_TRAPNO _REG_TRAPNO
+
+#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
+#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
+#else
+#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
+#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
+#endif
+
int cpu_signal_handler(int host_signum, void *pinfo,
void *puc)
{
siginfo_t *info = pinfo;
- struct ucontext *uc = puc;
unsigned long pc;
+#ifdef __NetBSD__
+ ucontext_t *uc = puc;
+#else
+ struct ucontext *uc = puc;
+#endif
- pc = uc->uc_mcontext.gregs[REG_RIP];
+ pc = QEMU_UC_MACHINE_PC(uc);
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
- (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
+ QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
+ (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
&uc->uc_sigmask, puc);
}
-#elif defined(__powerpc__)
+#elif defined(_ARCH_PPC)
/***********************************************************************
* signal context platform-specific definitions