/*
- * i386 emulator main execution loop
+ * emulator main execution loop
*
* Copyright (c) 2003-2005 Fabrice Bellard
*
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
-#include "exec.h"
+#include "cpu.h"
#include "disas.h"
#include "tcg.h"
#include "qemu-barrier.h"
-
-#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
-// Work around ugly bugs in glibc that mangle global register contents
-#undef env
-#define env cpu_single_env
-#endif
+#include "qtest.h"
int tb_invalidated_flag;
//#define CONFIG_DEBUG_EXEC
-int qemu_cpu_has_work(CPUState *env)
+bool qemu_cpu_has_work(CPUArchState *env)
{
return cpu_has_work(env);
}
-void cpu_loop_exit(CPUState *env1)
+void cpu_loop_exit(CPUArchState *env)
{
- env1->current_tb = NULL;
- longjmp(env1->jmp_env, 1);
+ env->current_tb = NULL;
+ longjmp(env->jmp_env, 1);
}
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
#if defined(CONFIG_SOFTMMU)
-void cpu_resume_from_signal(CPUState *env1, void *puc)
+void cpu_resume_from_signal(CPUArchState *env, void *puc)
{
- env = env1;
-
/* XXX: restore cpu registers saved in host registers */
env->exception_index = -1;
/* Execute the code without caching the generated code. An interpreter
could be used if available. */
-static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
+static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
+ TranslationBlock *orig_tb)
{
- unsigned long next_tb;
+ tcg_target_ulong next_tb;
TranslationBlock *tb;
/* Should never happen.
max_cycles);
env->current_tb = tb;
/* execute the generated code */
- next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
+ next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
env->current_tb = NULL;
if ((next_tb & 3) == 2) {
tb_free(tb);
}
-static TranslationBlock *tb_find_slow(target_ulong pc,
+static TranslationBlock *tb_find_slow(CPUArchState *env,
+ target_ulong pc,
target_ulong cs_base,
uint64_t flags)
{
TranslationBlock *tb, **ptb1;
unsigned int h;
- tb_page_addr_t phys_pc, phys_page1, phys_page2;
+ tb_page_addr_t phys_pc, phys_page1;
target_ulong virt_page2;
tb_invalidated_flag = 0;
/* find translated block using physical mappings */
phys_pc = get_page_addr_code(env, pc);
phys_page1 = phys_pc & TARGET_PAGE_MASK;
- phys_page2 = -1;
h = tb_phys_hash_func(phys_pc);
ptb1 = &tb_phys_hash[h];
for(;;) {
tb->flags == flags) {
/* check next page if needed */
if (tb->page_addr[1] != -1) {
+ tb_page_addr_t phys_page2;
+
virt_page2 = (pc & TARGET_PAGE_MASK) +
TARGET_PAGE_SIZE;
phys_page2 = get_page_addr_code(env, virt_page2);
return tb;
}
-static inline TranslationBlock *tb_find_fast(void)
+static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{
TranslationBlock *tb;
target_ulong cs_base, pc;
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
- tb = tb_find_slow(pc, cs_base, flags);
+ tb = tb_find_slow(env, pc, cs_base, flags);
}
return tb;
}
static CPUDebugExcpHandler *debug_excp_handler;
-CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
+void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
{
- CPUDebugExcpHandler *old_handler = debug_excp_handler;
-
debug_excp_handler = handler;
- return old_handler;
}
-static void cpu_handle_debug_exception(CPUState *env)
+static void cpu_handle_debug_exception(CPUArchState *env)
{
CPUWatchpoint *wp;
volatile sig_atomic_t exit_request;
-int cpu_exec(CPUState *env1)
+int cpu_exec(CPUArchState *env)
{
- volatile host_reg_t saved_env_reg;
+#ifdef TARGET_PPC
+ CPUState *cpu = ENV_GET_CPU(env);
+#endif
int ret, interrupt_request;
TranslationBlock *tb;
uint8_t *tc_ptr;
- unsigned long next_tb;
+ tcg_target_ulong next_tb;
- if (env1->halted) {
- if (!cpu_has_work(env1)) {
+ if (env->halted) {
+ if (!cpu_has_work(env)) {
return EXCP_HALTED;
}
- env1->halted = 0;
+ env->halted = 0;
}
- cpu_single_env = env1;
-
- /* the access to env below is actually saving the global register's
- value, so that files not including target-xyz/exec.h are free to
- use it. */
- QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
- saved_env_reg = (host_reg_t) env;
- barrier();
- env = env1;
+ cpu_single_env = env;
if (unlikely(exit_request)) {
env->exit_request = 1;
#elif defined(TARGET_ARM)
#elif defined(TARGET_UNICORE32)
#elif defined(TARGET_PPC)
+ env->reserve_addr = -1;
#elif defined(TARGET_LM32)
#elif defined(TARGET_MICROBLAZE)
#elif defined(TARGET_MIPS)
+#elif defined(TARGET_OPENRISC)
#elif defined(TARGET_SH4)
#elif defined(TARGET_CRIS)
#elif defined(TARGET_S390X)
+#elif defined(TARGET_XTENSA)
/* XXXXX */
#else
#error unsupported target CPU
/* prepare setjmp context for exception handling */
for(;;) {
if (setjmp(env->jmp_env) == 0) {
-#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
-#undef env
- env = cpu_single_env;
-#define env cpu_single_env
-#endif
/* if an exception is pending, we execute it here */
if (env->exception_index >= 0) {
if (env->exception_index >= EXCP_INTERRUPT) {
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
- do_interrupt_user(env->exception_index,
- env->exception_is_int,
- env->error_code,
- env->exception_next_eip);
- /* successfully delivered */
- env->old_exception = -1;
+ do_interrupt(env);
#endif
ret = env->exception_index;
break;
#else
-#if defined(TARGET_I386)
- /* simulate a real cpu exception. On i386, it can
- trigger new exceptions, but we do not handle
- double or triple faults yet. */
- do_interrupt(env->exception_index,
- env->exception_is_int,
- env->error_code,
- env->exception_next_eip, 0);
- /* successfully delivered */
- env->old_exception = -1;
-#elif defined(TARGET_PPC)
- do_interrupt(env);
-#elif defined(TARGET_LM32)
do_interrupt(env);
-#elif defined(TARGET_MICROBLAZE)
- do_interrupt(env);
-#elif defined(TARGET_MIPS)
- do_interrupt(env);
-#elif defined(TARGET_SPARC)
- do_interrupt(env);
-#elif defined(TARGET_ARM)
- do_interrupt(env);
-#elif defined(TARGET_UNICORE32)
- do_interrupt(env);
-#elif defined(TARGET_SH4)
- do_interrupt(env);
-#elif defined(TARGET_ALPHA)
- do_interrupt(env);
-#elif defined(TARGET_CRIS)
- do_interrupt(env);
-#elif defined(TARGET_M68K)
- do_interrupt(0);
-#elif defined(TARGET_S390X)
- do_interrupt(env);
-#endif
env->exception_index = -1;
#endif
}
}
#endif
#if defined(TARGET_I386)
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(env->apic_state);
+ }
+#endif
if (interrupt_request & CPU_INTERRUPT_INIT) {
- svm_check_intercept(SVM_EXIT_INIT);
- do_cpu_init(env);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
+ 0);
+ do_cpu_init(x86_env_get_cpu(env));
env->exception_index = EXCP_HALTED;
cpu_loop_exit(env);
} else if (interrupt_request & CPU_INTERRUPT_SIPI) {
- do_cpu_sipi(env);
+ do_cpu_sipi(x86_env_get_cpu(env));
} else if (env->hflags2 & HF2_GIF_MASK) {
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK)) {
- svm_check_intercept(SVM_EXIT_SMI);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
+ 0);
env->interrupt_request &= ~CPU_INTERRUPT_SMI;
- do_smm_enter();
+ do_smm_enter(env);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
!(env->hflags2 & HF2_NMI_MASK)) {
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
env->hflags2 |= HF2_NMI_MASK;
- do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
next_tb = 0;
- } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+ } else if (interrupt_request & CPU_INTERRUPT_MCE) {
env->interrupt_request &= ~CPU_INTERRUPT_MCE;
- do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(((env->hflags2 & HF2_VINTR_MASK) &&
(env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
- svm_check_intercept(SVM_EXIT_INTR);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
+ 0);
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
-#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
-#undef env
- env = cpu_single_env;
-#define env cpu_single_env
-#endif
- do_interrupt(intno, 0, 0, 0, 1);
+ do_interrupt_x86_hardirq(env, intno, 1);
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
/* FIXME: this should respect TPR */
- svm_check_intercept(SVM_EXIT_VINTR);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
+ 0);
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
- do_interrupt(intno, 0, 0, 0, 1);
+ do_interrupt_x86_hardirq(env, intno, 1);
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
next_tb = 0;
#endif
}
}
#elif defined(TARGET_PPC)
-#if 0
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
- cpu_reset(env);
+ cpu_reset(cpu);
}
-#endif
if (interrupt_request & CPU_INTERRUPT_HARD) {
ppc_hw_interrupt(env);
if (env->pending_interrupts == 0)
do_interrupt(env);
next_tb = 0;
}
+#elif defined(TARGET_OPENRISC)
+ {
+ int idx = -1;
+ if ((interrupt_request & CPU_INTERRUPT_HARD)
+ && (env->sr & SR_IEE)) {
+ idx = EXCP_INT;
+ }
+ if ((interrupt_request & CPU_INTERRUPT_TIMER)
+ && (env->sr & SR_TEE)) {
+ idx = EXCP_TICK;
+ }
+ if (idx >= 0) {
+ env->exception_index = idx;
+ do_interrupt(env);
+ next_tb = 0;
+ }
+ }
#elif defined(TARGET_SPARC)
if (interrupt_request & CPU_INTERRUPT_HARD) {
if (cpu_interrupts_enabled(env) &&
next_tb = 0;
}
}
- }
+ }
#elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ
&& !(env->uncached_cpsr & CPSR_F)) {
#elif defined(TARGET_UNICORE32)
if (interrupt_request & CPU_INTERRUPT_HARD
&& !(env->uncached_asr & ASR_I)) {
+ env->exception_index = UC32_EXCP_INTR;
do_interrupt(env);
next_tb = 0;
}
{
int idx = -1;
/* ??? This hard-codes the OSF/1 interrupt levels. */
- switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
+ switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
case 0 ... 3:
if (interrupt_request & CPU_INTERRUPT_HARD) {
idx = EXCP_DEV_INTERRUPT;
do_interrupt(env);
next_tb = 0;
}
- if (interrupt_request & CPU_INTERRUPT_NMI
- && (env->pregs[PR_CCS] & M_FLAG)) {
- env->exception_index = EXCP_NMI;
- do_interrupt(env);
- next_tb = 0;
+ if (interrupt_request & CPU_INTERRUPT_NMI) {
+ unsigned int m_flag_archval;
+ if (env->pregs[PR_VR] < 32) {
+ m_flag_archval = M_FLAG_V10;
+ } else {
+ m_flag_archval = M_FLAG_V32;
+ }
+ if ((env->pregs[PR_CCS] & m_flag_archval)) {
+ env->exception_index = EXCP_NMI;
+ do_interrupt(env);
+ next_tb = 0;
+ }
}
#elif defined(TARGET_M68K)
if (interrupt_request & CPU_INTERRUPT_HARD
provide/save the vector when the interrupt is
first signalled. */
env->exception_index = env->pending_vector;
- do_interrupt(1);
+ do_interrupt_m68k_hardirq(env);
next_tb = 0;
}
#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
do_interrupt(env);
next_tb = 0;
}
+#elif defined(TARGET_XTENSA)
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ env->exception_index = EXC_IRQ;
+ do_interrupt(env);
+ next_tb = 0;
+ }
#endif
/* Don't use the cached interrupt_request value,
do_interrupt may have updated the EXITTB flag. */
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
/* restore flags in standard format */
#if defined(TARGET_I386)
- env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
- log_cpu_state(env, X86_DUMP_CCOP);
+ env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
+ | (DF & DF_MASK);
+ log_cpu_state(env, CPU_DUMP_CCOP);
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_M68K)
cpu_m68k_flush_flags(env, env->cc_op);
}
#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
spin_lock(&tb_lock);
- tb = tb_find_fast();
+ tb = tb_find_fast(env);
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
if (tb_invalidated_flag) {
tb_invalidated_flag = 0;
}
#ifdef CONFIG_DEBUG_EXEC
- qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
- (long)tb->tc_ptr, tb->pc,
+ qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
+ tb->tc_ptr, tb->pc,
lookup_symbol(tb->pc));
#endif
/* see if we can patch the calling TB. When the TB
barrier();
if (likely(!env->exit_request)) {
tc_ptr = tb->tc_ptr;
- /* execute the generated code */
-#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
-#undef env
- env = cpu_single_env;
-#define env cpu_single_env
-#endif
- next_tb = tcg_qemu_tb_exec(tc_ptr);
+ /* execute the generated code */
+ next_tb = tcg_qemu_tb_exec(env, tc_ptr);
if ((next_tb & 3) == 2) {
/* Instruction counter expired. */
int insns_left;
- tb = (TranslationBlock *)(long)(next_tb & ~3);
+ tb = (TranslationBlock *)(next_tb & ~3);
/* Restore PC. */
cpu_pc_from_tb(env, tb);
insns_left = env->icount_decr.u32;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
- cpu_exec_nocache(insns_left, tb);
+ cpu_exec_nocache(env, insns_left, tb);
}
env->exception_index = EXCP_INTERRUPT;
next_tb = 0;
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
} /* for(;;) */
+ } else {
+ /* Reload env after longjmp - the compiler may have smashed all
+ * local variables as longjmp is marked 'noreturn'. */
+ env = cpu_single_env;
}
} /* for(;;) */
#if defined(TARGET_I386)
/* restore flags in standard format */
- env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
+ env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
+ | (DF & DF_MASK);
#elif defined(TARGET_ARM)
/* XXX: Save/restore host fpu exception state?. */
#elif defined(TARGET_UNICORE32)
| env->cc_dest | (env->cc_x << 4);
#elif defined(TARGET_MICROBLAZE)
#elif defined(TARGET_MIPS)
+#elif defined(TARGET_OPENRISC)
#elif defined(TARGET_SH4)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_CRIS)
#elif defined(TARGET_S390X)
+#elif defined(TARGET_XTENSA)
/* XXXXX */
#else
#error unsupported target CPU
#endif
- /* restore global registers */
- barrier();
- env = (void *) saved_env_reg;
-
/* fail safe : never use cpu_single_env outside cpu_exec() */
cpu_single_env = NULL;
return ret;