*/
#include "config.h"
#include "cpu.h"
+#include "trace.h"
#include "disas/disas.h"
#include "tcg.h"
#include "qemu/atomic.h"
#include "sysemu/qtest.h"
+#include "qemu/timer.h"
+#include "exec/address-spaces.h"
+#include "exec/memory-internal.h"
+#include "qemu/rcu.h"
+#include "exec/tb-hash.h"
-//#define CONFIG_DEBUG_EXEC
+/* -icount align implementation. */
-bool qemu_cpu_has_work(CPUState *cpu)
+typedef struct SyncClocks {
+ int64_t diff_clk;
+ int64_t last_cpu_icount;
+ int64_t realtime_clock;
+} SyncClocks;
+
+#if !defined(CONFIG_USER_ONLY)
+/* Allow the guest to have a max 3ms advance.
+ * The difference between the 2 clocks could therefore
+ * oscillate around 0.
+ */
+#define VM_CLOCK_ADVANCE 3000000
+#define THRESHOLD_REDUCE 1.5
+#define MAX_DELAY_PRINT_RATE 2000000000LL
+#define MAX_NB_PRINTS 100
+
+static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
- return cpu_has_work(cpu);
+ int64_t cpu_icount;
+
+ if (!icount_align_option) {
+ return;
+ }
+
+ cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
+ sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
+ sc->last_cpu_icount = cpu_icount;
+
+ if (sc->diff_clk > VM_CLOCK_ADVANCE) {
+#ifndef _WIN32
+ struct timespec sleep_delay, rem_delay;
+ sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
+ sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
+ if (nanosleep(&sleep_delay, &rem_delay) < 0) {
+ sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
+ } else {
+ sc->diff_clk = 0;
+ }
+#else
+ Sleep(sc->diff_clk / SCALE_MS);
+ sc->diff_clk = 0;
+#endif
+ }
}
-void cpu_loop_exit(CPUArchState *env)
+static void print_delay(const SyncClocks *sc)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ static float threshold_delay;
+ static int64_t last_realtime_clock;
+ static int nb_prints;
+
+ if (icount_align_option &&
+ sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
+ nb_prints < MAX_NB_PRINTS) {
+ if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
+ (-sc->diff_clk / (float)1000000000LL <
+ (threshold_delay - THRESHOLD_REDUCE))) {
+ threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
+ printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
+ threshold_delay - 1,
+ threshold_delay);
+ nb_prints++;
+ last_realtime_clock = sc->realtime_clock;
+ }
+ }
+}
+static void init_delay_params(SyncClocks *sc,
+ const CPUState *cpu)
+{
+ if (!icount_align_option) {
+ return;
+ }
+ sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
+ sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
+ sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
+ if (sc->diff_clk < max_delay) {
+ max_delay = sc->diff_clk;
+ }
+ if (sc->diff_clk > max_advance) {
+ max_advance = sc->diff_clk;
+ }
+
+ /* Print every 2s max if the guest is late. We limit the number
+ of printed messages to NB_PRINT_MAX(currently 100) */
+ print_delay(sc);
+}
+#else
+static void align_clocks(SyncClocks *sc, const CPUState *cpu)
+{
+}
+
+static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
+{
+}
+#endif /* CONFIG USER ONLY */
+
+void cpu_loop_exit(CPUState *cpu)
+{
cpu->current_tb = NULL;
- longjmp(env->jmp_env, 1);
+ siglongjmp(cpu->jmp_env, 1);
}
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
#if defined(CONFIG_SOFTMMU)
-void cpu_resume_from_signal(CPUArchState *env, void *puc)
+void cpu_resume_from_signal(CPUState *cpu, void *puc)
{
/* XXX: restore cpu registers saved in host registers */
- env->exception_index = -1;
- longjmp(env->jmp_env, 1);
+ cpu->exception_index = -1;
+ siglongjmp(cpu->jmp_env, 1);
}
+
+void cpu_reload_memory_map(CPUState *cpu)
+{
+ AddressSpaceDispatch *d;
+
+ if (qemu_in_vcpu_thread()) {
+ /* Do not let the guest prolong the critical section as much as it
+ * as it desires.
+ *
+ * Currently, this is prevented by the I/O thread's periodinc kicking
+ * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
+ * but this will go away once TCG's execution moves out of the global
+ * mutex.
+ *
+ * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
+ * only protects cpu->as->dispatch. Since we reload it below, we can
+ * split the critical section.
+ */
+ rcu_read_unlock();
+ rcu_read_lock();
+ }
+
+ /* The CPU and TLB are protected by the iothread lock. */
+ d = atomic_rcu_read(&cpu->as->dispatch);
+ cpu->memory_dispatch = d;
+ tlb_flush(cpu, 1);
+}
+#endif
+
+/* Execute a TB, and fix up the CPU state afterwards if necessary */
+static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
+{
+ CPUArchState *env = cpu->env_ptr;
+ uintptr_t next_tb;
+
+#if defined(DEBUG_DISAS)
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
+#if defined(TARGET_I386)
+ log_cpu_state(cpu, CPU_DUMP_CCOP);
+#elif defined(TARGET_M68K)
+ /* ??? Should not modify env state for dumping. */
+ cpu_m68k_flush_flags(env, env->cc_op);
+ env->cc_op = CC_OP_FLAGS;
+ env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
+ log_cpu_state(cpu, 0);
+#else
+ log_cpu_state(cpu, 0);
#endif
+ }
+#endif /* DEBUG_DISAS */
+
+ cpu->can_do_io = 0;
+ next_tb = tcg_qemu_tb_exec(env, tb_ptr);
+ cpu->can_do_io = 1;
+ trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
+ next_tb & TB_EXIT_MASK);
+
+ if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
+ /* We didn't start executing this TB (eg because the instruction
+ * counter hit zero); we must restore the guest PC to the address
+ * of the start of the TB.
+ */
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
+ if (cc->synchronize_from_tb) {
+ cc->synchronize_from_tb(cpu, tb);
+ } else {
+ assert(cc->set_pc);
+ cc->set_pc(cpu, tb->pc);
+ }
+ }
+ if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
+ /* We were asked to stop executing TBs (probably a pending
+ * interrupt. We've now stopped, so clear the flag.
+ */
+ cpu->tcg_exit_req = 0;
+ }
+ return next_tb;
+}
/* Execute the code without caching the generated code. An interpreter
could be used if available. */
TranslationBlock *orig_tb)
{
CPUState *cpu = ENV_GET_CPU(env);
- tcg_target_ulong next_tb;
TranslationBlock *tb;
+ target_ulong pc = orig_tb->pc;
+ target_ulong cs_base = orig_tb->cs_base;
+ uint64_t flags = orig_tb->flags;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
- tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
- max_cycles);
+ /* tb_gen_code can flush our orig_tb, invalidate it now */
+ tb_phys_invalidate(orig_tb, -1);
+ tb = tb_gen_code(cpu, pc, cs_base, flags,
+ max_cycles | CF_NOCACHE);
cpu->current_tb = tb;
/* execute the generated code */
- next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
+ trace_exec_tb_nocache(tb, tb->pc);
+ cpu_tb_exec(cpu, tb->tc_ptr);
cpu->current_tb = NULL;
-
- if ((next_tb & 3) == 2) {
- /* Restore PC. This may happen if async event occurs before
- the TB starts executing. */
- cpu_pc_from_tb(env, tb);
- }
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
target_ulong cs_base,
uint64_t flags)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb, **ptb1;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
}
not_found:
/* if no translated code available, then translate it now */
- tb = tb_gen_code(env, pc, cs_base, flags, 0);
+ tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
found:
/* Move the last found TB to the head of the list */
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
}
/* we add the TB in the virtual pc hash table */
- env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
+ cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
int flags;
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(env, pc, cs_base, flags);
return tb;
}
-static CPUDebugExcpHandler *debug_excp_handler;
-
-void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
-{
- debug_excp_handler = handler;
-}
-
static void cpu_handle_debug_exception(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
- if (!env->watchpoint_hit) {
- QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ if (!cpu->watchpoint_hit) {
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
- if (debug_excp_handler) {
- debug_excp_handler(env);
- }
+
+ cc->debug_excp_handler(cpu);
}
/* main execution loop */
int cpu_exec(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+#ifdef TARGET_I386
+ X86CPU *x86_cpu = X86_CPU(cpu);
+#endif
int ret, interrupt_request;
TranslationBlock *tb;
uint8_t *tc_ptr;
- tcg_target_ulong next_tb;
+ uintptr_t next_tb;
+ SyncClocks sc;
+
+ /* This must be volatile so it is not trashed by longjmp() */
+ volatile bool have_tb_lock = false;
- if (env->halted) {
+ if (cpu->halted) {
if (!cpu_has_work(cpu)) {
return EXCP_HALTED;
}
- env->halted = 0;
+ cpu->halted = 0;
}
- cpu_single_env = env;
+ current_cpu = cpu;
+
+ /* As long as current_cpu is null, up to the assignment just above,
+ * requests by other threads to exit the execution loop are expected to
+ * be issued using the exit_request global. We must make sure that our
+ * evaluation of the global value is performed past the current_cpu
+ * value transition point, which requires a memory barrier as well as
+ * an instruction scheduling constraint on modern architectures. */
+ smp_mb();
+
+ rcu_read_lock();
if (unlikely(exit_request)) {
cpu->exit_request = 1;
}
-#if defined(TARGET_I386)
- /* put eflags in CPU temporary format */
- CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
- DF = 1 - (2 * ((env->eflags >> 10) & 1));
- CC_OP = CC_OP_EFLAGS;
- env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
-#elif defined(TARGET_SPARC)
-#elif defined(TARGET_M68K)
- env->cc_op = CC_OP_FLAGS;
- env->cc_dest = env->sr & 0xf;
- env->cc_x = (env->sr >> 4) & 1;
-#elif defined(TARGET_ALPHA)
-#elif defined(TARGET_ARM)
-#elif defined(TARGET_UNICORE32)
-#elif defined(TARGET_PPC)
- env->reserve_addr = -1;
-#elif defined(TARGET_LM32)
-#elif defined(TARGET_MICROBLAZE)
-#elif defined(TARGET_MIPS)
-#elif defined(TARGET_OPENRISC)
-#elif defined(TARGET_SH4)
-#elif defined(TARGET_CRIS)
-#elif defined(TARGET_S390X)
-#elif defined(TARGET_XTENSA)
- /* XXXXX */
-#else
-#error unsupported target CPU
-#endif
- env->exception_index = -1;
+ cc->cpu_exec_enter(cpu);
+
+ /* Calculate difference between guest clock and host clock.
+ * This delay includes the delay of the last cycle, so
+ * what we have to do is sleep until it is 0. As for the
+ * advance/delay we gain here, we try to fix it next time.
+ */
+ init_delay_params(&sc, cpu);
/* prepare setjmp context for exception handling */
for(;;) {
- if (setjmp(env->jmp_env) == 0) {
+ if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
- if (env->exception_index >= 0) {
- if (env->exception_index >= EXCP_INTERRUPT) {
+ if (cpu->exception_index >= 0) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
- ret = env->exception_index;
+ ret = cpu->exception_index;
if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
}
+ cpu->exception_index = -1;
break;
} else {
#if defined(CONFIG_USER_ONLY)
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
- do_interrupt(env);
+ cc->do_interrupt(cpu);
#endif
- ret = env->exception_index;
+ ret = cpu->exception_index;
+ cpu->exception_index = -1;
break;
#else
- do_interrupt(env);
- env->exception_index = -1;
+ cc->do_interrupt(cpu);
+ cpu->exception_index = -1;
#endif
}
}
next_tb = 0; /* force lookup of first TB */
for(;;) {
- interrupt_request = env->interrupt_request;
+ interrupt_request = cpu->interrupt_request;
if (unlikely(interrupt_request)) {
- if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
+ if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
- env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
+ cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+ cpu->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cpu);
}
-#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
- defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
- defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
if (interrupt_request & CPU_INTERRUPT_HALT) {
- env->interrupt_request &= ~CPU_INTERRUPT_HALT;
- env->halted = 1;
- env->exception_index = EXCP_HLT;
- cpu_loop_exit(env);
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ cpu->halted = 1;
+ cpu->exception_index = EXCP_HLT;
+ cpu_loop_exit(cpu);
}
-#endif
#if defined(TARGET_I386)
-#if !defined(CONFIG_USER_ONLY)
- if (interrupt_request & CPU_INTERRUPT_POLL) {
- env->interrupt_request &= ~CPU_INTERRUPT_POLL;
- apic_poll_irq(env->apic_state);
- }
-#endif
if (interrupt_request & CPU_INTERRUPT_INIT) {
- cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
- 0);
- do_cpu_init(x86_env_get_cpu(env));
- env->exception_index = EXCP_HALTED;
- cpu_loop_exit(env);
- } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
- do_cpu_sipi(x86_env_get_cpu(env));
- } else if (env->hflags2 & HF2_GIF_MASK) {
- if ((interrupt_request & CPU_INTERRUPT_SMI) &&
- !(env->hflags & HF_SMM_MASK)) {
- cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
- 0);
- env->interrupt_request &= ~CPU_INTERRUPT_SMI;
- do_smm_enter(env);
- next_tb = 0;
- } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
- !(env->hflags2 & HF2_NMI_MASK)) {
- env->interrupt_request &= ~CPU_INTERRUPT_NMI;
- env->hflags2 |= HF2_NMI_MASK;
- do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
- next_tb = 0;
- } else if (interrupt_request & CPU_INTERRUPT_MCE) {
- env->interrupt_request &= ~CPU_INTERRUPT_MCE;
- do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
- next_tb = 0;
- } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (((env->hflags2 & HF2_VINTR_MASK) &&
- (env->hflags2 & HF2_HIF_MASK)) ||
- (!(env->hflags2 & HF2_VINTR_MASK) &&
- (env->eflags & IF_MASK &&
- !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
- int intno;
- cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
- 0);
- env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
- intno = cpu_get_pic_interrupt(env);
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- /* ensure that no TB jump will be modified as
- the program flow was changed */
- next_tb = 0;
-#if !defined(CONFIG_USER_ONLY)
- } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
- (env->eflags & IF_MASK) &&
- !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
- int intno;
- /* FIXME: this should respect TPR */
- cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
- 0);
- intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
- do_interrupt_x86_hardirq(env, intno, 1);
- env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- next_tb = 0;
-#endif
- }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
+ do_cpu_init(x86_cpu);
+ cpu->exception_index = EXCP_HALTED;
+ cpu_loop_exit(cpu);
}
-#elif defined(TARGET_PPC)
- if ((interrupt_request & CPU_INTERRUPT_RESET)) {
+#else
+ if (interrupt_request & CPU_INTERRUPT_RESET) {
cpu_reset(cpu);
}
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- ppc_hw_interrupt(env);
- if (env->pending_interrupts == 0)
- env->interrupt_request &= ~CPU_INTERRUPT_HARD;
- next_tb = 0;
- }
-#elif defined(TARGET_LM32)
- if ((interrupt_request & CPU_INTERRUPT_HARD)
- && (env->ie & IE_IE)) {
- env->exception_index = EXCP_IRQ;
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_MICROBLAZE)
- if ((interrupt_request & CPU_INTERRUPT_HARD)
- && (env->sregs[SR_MSR] & MSR_IE)
- && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
- && !(env->iflags & (D_FLAG | IMM_FLAG))) {
- env->exception_index = EXCP_IRQ;
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_MIPS)
- if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- cpu_mips_hw_interrupts_pending(env)) {
- /* Raise it */
- env->exception_index = EXCP_EXT_INTERRUPT;
- env->error_code = 0;
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_OPENRISC)
- {
- int idx = -1;
- if ((interrupt_request & CPU_INTERRUPT_HARD)
- && (env->sr & SR_IEE)) {
- idx = EXCP_INT;
- }
- if ((interrupt_request & CPU_INTERRUPT_TIMER)
- && (env->sr & SR_TEE)) {
- idx = EXCP_TICK;
- }
- if (idx >= 0) {
- env->exception_index = idx;
- do_interrupt(env);
- next_tb = 0;
- }
- }
-#elif defined(TARGET_SPARC)
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- if (cpu_interrupts_enabled(env) &&
- env->interrupt_index > 0) {
- int pil = env->interrupt_index & 0xf;
- int type = env->interrupt_index & 0xf0;
-
- if (((type == TT_EXTINT) &&
- cpu_pil_allowed(env, pil)) ||
- type != TT_EXTINT) {
- env->exception_index = env->interrupt_index;
- do_interrupt(env);
- next_tb = 0;
- }
- }
- }
-#elif defined(TARGET_ARM)
- if (interrupt_request & CPU_INTERRUPT_FIQ
- && !(env->uncached_cpsr & CPSR_F)) {
- env->exception_index = EXCP_FIQ;
- do_interrupt(env);
- next_tb = 0;
- }
- /* ARMv7-M interrupt return works by loading a magic value
- into the PC. On real hardware the load causes the
- return to occur. The qemu implementation performs the
- jump normally, then does the exception return when the
- CPU tries to execute code at the magic address.
- This will cause the magic PC value to be pushed to
- the stack if an interrupt occurred at the wrong time.
- We avoid this by disabling interrupts when
- pc contains a magic address. */
- if (interrupt_request & CPU_INTERRUPT_HARD
- && ((IS_M(env) && env->regs[15] < 0xfffffff0)
- || !(env->uncached_cpsr & CPSR_I))) {
- env->exception_index = EXCP_IRQ;
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_UNICORE32)
- if (interrupt_request & CPU_INTERRUPT_HARD
- && !(env->uncached_asr & ASR_I)) {
- env->exception_index = UC32_EXCP_INTR;
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_SH4)
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_ALPHA)
- {
- int idx = -1;
- /* ??? This hard-codes the OSF/1 interrupt levels. */
- switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
- case 0 ... 3:
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- idx = EXCP_DEV_INTERRUPT;
- }
- /* FALLTHRU */
- case 4:
- if (interrupt_request & CPU_INTERRUPT_TIMER) {
- idx = EXCP_CLK_INTERRUPT;
- }
- /* FALLTHRU */
- case 5:
- if (interrupt_request & CPU_INTERRUPT_SMP) {
- idx = EXCP_SMP_INTERRUPT;
- }
- /* FALLTHRU */
- case 6:
- if (interrupt_request & CPU_INTERRUPT_MCHK) {
- idx = EXCP_MCHK;
- }
- }
- if (idx >= 0) {
- env->exception_index = idx;
- env->error_code = 0;
- do_interrupt(env);
- next_tb = 0;
- }
- }
-#elif defined(TARGET_CRIS)
- if (interrupt_request & CPU_INTERRUPT_HARD
- && (env->pregs[PR_CCS] & I_FLAG)
- && !env->locked_irq) {
- env->exception_index = EXCP_IRQ;
- do_interrupt(env);
- next_tb = 0;
- }
- if (interrupt_request & CPU_INTERRUPT_NMI) {
- unsigned int m_flag_archval;
- if (env->pregs[PR_VR] < 32) {
- m_flag_archval = M_FLAG_V10;
- } else {
- m_flag_archval = M_FLAG_V32;
- }
- if ((env->pregs[PR_CCS] & m_flag_archval)) {
- env->exception_index = EXCP_NMI;
- do_interrupt(env);
- next_tb = 0;
- }
- }
-#elif defined(TARGET_M68K)
- if (interrupt_request & CPU_INTERRUPT_HARD
- && ((env->sr & SR_I) >> SR_I_SHIFT)
- < env->pending_level) {
- /* Real hardware gets the interrupt vector via an
- IACK cycle at this point. Current emulated
- hardware doesn't rely on this, so we
- provide/save the vector when the interrupt is
- first signalled. */
- env->exception_index = env->pending_vector;
- do_interrupt_m68k_hardirq(env);
- next_tb = 0;
- }
-#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
- if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->psw.mask & PSW_MASK_EXT)) {
- do_interrupt(env);
- next_tb = 0;
- }
-#elif defined(TARGET_XTENSA)
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- env->exception_index = EXC_IRQ;
- do_interrupt(env);
+#endif
+ /* The target hook has 3 exit conditions:
+ False when the interrupt isn't processed,
+ True when it is, and we should restart on a new TB,
+ and via longjmp via cpu_loop_exit. */
+ if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
next_tb = 0;
}
-#endif
- /* Don't use the cached interrupt_request value,
- do_interrupt may have updated the EXITTB flag. */
- if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
- env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+ /* Don't use the cached interrupt_request value,
+ do_interrupt may have updated the EXITTB flag. */
+ if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
}
if (unlikely(cpu->exit_request)) {
cpu->exit_request = 0;
- env->exception_index = EXCP_INTERRUPT;
- cpu_loop_exit(env);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit(cpu);
}
-#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
- if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
- /* restore flags in standard format */
-#if defined(TARGET_I386)
- env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
- | (DF & DF_MASK);
- log_cpu_state(env, CPU_DUMP_CCOP);
- env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
-#elif defined(TARGET_M68K)
- cpu_m68k_flush_flags(env, env->cc_op);
- env->cc_op = CC_OP_FLAGS;
- env->sr = (env->sr & 0xffe0)
- | env->cc_dest | (env->cc_x << 4);
- log_cpu_state(env, 0);
-#else
- log_cpu_state(env, 0);
-#endif
- }
-#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
spin_lock(&tcg_ctx.tb_ctx.tb_lock);
+ have_tb_lock = true;
tb = tb_find_fast(env);
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
next_tb = 0;
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
}
-#ifdef CONFIG_DEBUG_EXEC
- qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
- tb->tc_ptr, tb->pc,
- lookup_symbol(tb->pc));
-#endif
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
+ tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
+ }
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
if (next_tb != 0 && tb->page_addr[1] == -1) {
- tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
+ tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
+ next_tb & TB_EXIT_MASK, tb);
}
+ have_tb_lock = false;
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
/* cpu_interrupt might be called while translating the
cpu->current_tb = tb;
barrier();
if (likely(!cpu->exit_request)) {
+ trace_exec_tb(tb, tb->pc);
tc_ptr = tb->tc_ptr;
/* execute the generated code */
- next_tb = tcg_qemu_tb_exec(env, tc_ptr);
- if ((next_tb & 3) == 2) {
+ next_tb = cpu_tb_exec(cpu, tc_ptr);
+ switch (next_tb & TB_EXIT_MASK) {
+ case TB_EXIT_REQUESTED:
+ /* Something asked us to stop executing
+ * chained TBs; just continue round the main
+ * loop. Whatever requested the exit will also
+ * have set something else (eg exit_request or
+ * interrupt_request) which we will handle
+ * next time around the loop.
+ */
+ next_tb = 0;
+ break;
+ case TB_EXIT_ICOUNT_EXPIRED:
+ {
/* Instruction counter expired. */
- int insns_left;
- tb = (TranslationBlock *)(next_tb & ~3);
- /* Restore PC. */
- cpu_pc_from_tb(env, tb);
- insns_left = env->icount_decr.u32;
- if (env->icount_extra && insns_left >= 0) {
+ int insns_left = cpu->icount_decr.u32;
+ if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
- env->icount_extra += insns_left;
- if (env->icount_extra > 0xffff) {
- insns_left = 0xffff;
- } else {
- insns_left = env->icount_extra;
- }
- env->icount_extra -= insns_left;
- env->icount_decr.u16.low = insns_left;
+ cpu->icount_extra += insns_left;
+ insns_left = MIN(0xffff, cpu->icount_extra);
+ cpu->icount_extra -= insns_left;
+ cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
+ tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
cpu_exec_nocache(env, insns_left, tb);
+ align_clocks(&sc, cpu);
}
- env->exception_index = EXCP_INTERRUPT;
+ cpu->exception_index = EXCP_INTERRUPT;
next_tb = 0;
- cpu_loop_exit(env);
+ cpu_loop_exit(cpu);
}
+ break;
+ }
+ default:
+ break;
}
}
cpu->current_tb = NULL;
+ /* Try to align the host and virtual clocks
+ if the guest is in advance */
+ align_clocks(&sc, cpu);
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
} /* for(;;) */
} else {
/* Reload env after longjmp - the compiler may have smashed all
* local variables as longjmp is marked 'noreturn'. */
- env = cpu_single_env;
+ cpu = current_cpu;
+ env = cpu->env_ptr;
+ cc = CPU_GET_CLASS(cpu);
+ cpu->can_do_io = 1;
+#ifdef TARGET_I386
+ x86_cpu = X86_CPU(cpu);
+#endif
+ if (have_tb_lock) {
+ spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
+ have_tb_lock = false;
+ }
}
} /* for(;;) */
+ cc->cpu_exec_exit(cpu);
+ rcu_read_unlock();
-#if defined(TARGET_I386)
- /* restore flags in standard format */
- env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
- | (DF & DF_MASK);
-#elif defined(TARGET_ARM)
- /* XXX: Save/restore host fpu exception state?. */
-#elif defined(TARGET_UNICORE32)
-#elif defined(TARGET_SPARC)
-#elif defined(TARGET_PPC)
-#elif defined(TARGET_LM32)
-#elif defined(TARGET_M68K)
- cpu_m68k_flush_flags(env, env->cc_op);
- env->cc_op = CC_OP_FLAGS;
- env->sr = (env->sr & 0xffe0)
- | env->cc_dest | (env->cc_x << 4);
-#elif defined(TARGET_MICROBLAZE)
-#elif defined(TARGET_MIPS)
-#elif defined(TARGET_OPENRISC)
-#elif defined(TARGET_SH4)
-#elif defined(TARGET_ALPHA)
-#elif defined(TARGET_CRIS)
-#elif defined(TARGET_S390X)
-#elif defined(TARGET_XTENSA)
- /* XXXXX */
-#else
-#error unsupported target CPU
-#endif
-
- /* fail safe : never use cpu_single_env outside cpu_exec() */
- cpu_single_env = NULL;
+ /* fail safe : never use current_cpu outside cpu_exec() */
+ current_cpu = NULL;
return ret;
}