*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "trace.h"
+#include "trace-root.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg.h"
uint8_t *tb_ptr = itb->tc_ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
- "Trace %p [" TARGET_FMT_lx "] %s\n",
- itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
+ "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, cpu->cpu_index, itb->pc,
+ lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
- if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
+ && qemu_log_in_addr_range(itb->pc)) {
+ qemu_log_lock();
#if defined(TARGET_I386)
log_cpu_state(cpu, CPU_DUMP_CCOP);
-#elif defined(TARGET_M68K)
- /* ??? Should not modify env state for dumping. */
- cpu_m68k_flush_flags(env, env->cc_op);
- env->cc_op = CC_OP_FLAGS;
- env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
- log_cpu_state(cpu, 0);
#else
log_cpu_state(cpu, 0);
#endif
+ qemu_log_unlock();
}
#endif /* DEBUG_DISAS */
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
- cpu->tcg_exit_req = 0;
+ atomic_set(&cpu->tcg_exit_req, 0);
}
return ret;
}
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
- bool old_tb_flushed;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
- old_tb_flushed = cpu->tb_flushed;
- cpu->tb_flushed = false;
+ tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
- tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
- cpu->tb_flushed |= old_tb_flushed;
+ tb->orig_tb = orig_tb;
+ tb_unlock();
+
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
+
+ tb_lock();
tb_phys_invalidate(tb, -1);
tb_free(tb);
+ tb_unlock();
}
#endif
-static TranslationBlock *tb_find_physical(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
+static void cpu_exec_step(CPUState *cpu)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb, **tb_hash_head, **ptb1;
- uint32_t h;
- tb_page_addr_t phys_pc, phys_page1;
+ TranslationBlock *tb;
+ target_ulong cs_base, pc;
+ uint32_t flags;
- /* find translated block using physical mappings */
- phys_pc = get_page_addr_code(env, pc);
- phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags);
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ tb = tb_gen_code(cpu, pc, cs_base, flags,
+ 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
+ tb->orig_tb = NULL;
+ /* execute the generated code */
+ trace_exec_tb_nocache(tb, pc);
+ cpu_tb_exec(cpu, tb);
+ tb_phys_invalidate(tb, -1);
+ tb_free(tb);
+}
- /* Start at head of the hash entry */
- ptb1 = tb_hash_head = &tcg_ctx.tb_ctx.tb_phys_hash[h];
- tb = *ptb1;
-
- while (tb) {
- if (tb->pc == pc &&
- tb->page_addr[0] == phys_page1 &&
- tb->cs_base == cs_base &&
- tb->flags == flags) {
-
- if (tb->page_addr[1] == -1) {
- /* done, we have a match */
- break;
- } else {
- /* check next page if needed */
- target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) +
- TARGET_PAGE_SIZE;
- tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2);
-
- if (tb->page_addr[1] == phys_page2) {
- break;
- }
- }
- }
+void cpu_exec_step_atomic(CPUState *cpu)
+{
+ start_exclusive();
- ptb1 = &tb->phys_hash_next;
- tb = *ptb1;
- }
+ /* Since we got here, we know that parallel_cpus must be true. */
+ parallel_cpus = false;
+ cpu_exec_step(cpu);
+ parallel_cpus = true;
- if (tb) {
- /* Move the TB to the head of the list */
- *ptb1 = tb->phys_hash_next;
- tb->phys_hash_next = *tb_hash_head;
- *tb_hash_head = tb;
- }
- return tb;
+ end_exclusive();
}
-static TranslationBlock *tb_find_slow(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
-{
- TranslationBlock *tb;
+struct tb_desc {
+ target_ulong pc;
+ target_ulong cs_base;
+ CPUArchState *env;
+ tb_page_addr_t phys_page1;
+ uint32_t flags;
+};
- tb = tb_find_physical(cpu, pc, cs_base, flags);
- if (tb) {
- goto found;
- }
+static bool tb_cmp(const void *p, const void *d)
+{
+ const TranslationBlock *tb = p;
+ const struct tb_desc *desc = d;
+
+ if (tb->pc == desc->pc &&
+ tb->page_addr[0] == desc->phys_page1 &&
+ tb->cs_base == desc->cs_base &&
+ tb->flags == desc->flags &&
+ !atomic_read(&tb->invalid)) {
+ /* check next page if needed */
+ if (tb->page_addr[1] == -1) {
+ return true;
+ } else {
+ tb_page_addr_t phys_page2;
+ target_ulong virt_page2;
-#ifdef CONFIG_USER_ONLY
- /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
- * taken outside tb_lock. Since we're momentarily dropping
- * tb_lock, there's a chance that our desired tb has been
- * translated.
- */
- tb_unlock();
- mmap_lock();
- tb_lock();
- tb = tb_find_physical(cpu, pc, cs_base, flags);
- if (tb) {
- mmap_unlock();
- goto found;
+ virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ phys_page2 = get_page_addr_code(desc->env, virt_page2);
+ if (tb->page_addr[1] == phys_page2) {
+ return true;
+ }
+ }
}
-#endif
-
- /* if no translated code available, then translate it now */
- tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
+ return false;
+}
-#ifdef CONFIG_USER_ONLY
- mmap_unlock();
-#endif
+static TranslationBlock *tb_htable_lookup(CPUState *cpu,
+ target_ulong pc,
+ target_ulong cs_base,
+ uint32_t flags)
+{
+ tb_page_addr_t phys_pc;
+ struct tb_desc desc;
+ uint32_t h;
-found:
- /* we add the TB in the virtual pc hash table */
- cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
- return tb;
+ desc.env = (CPUArchState *)cpu->env_ptr;
+ desc.cs_base = cs_base;
+ desc.flags = flags;
+ desc.pc = pc;
+ phys_pc = get_page_addr_code(desc.env, pc);
+ desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
+ h = tb_hash_func(phys_pc, pc, flags);
+ return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
-static inline TranslationBlock *tb_find_fast(CPUState *cpu,
- TranslationBlock **last_tb,
- int tb_exit)
+static inline TranslationBlock *tb_find(CPUState *cpu,
+ TranslationBlock *last_tb,
+ int tb_exit)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
+ bool have_tb_lock = false;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb_lock();
- tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
- tb = tb_find_slow(cpu, pc, cs_base, flags);
- }
- if (cpu->tb_flushed) {
- /* Ensure that no TB jump will be modified as the
- * translation buffer has been flushed.
- */
- *last_tb = NULL;
- cpu->tb_flushed = false;
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ if (!tb) {
+
+ /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
+ * taken outside tb_lock. As system emulation is currently
+ * single threaded the locks are NOPs.
+ */
+ mmap_lock();
+ tb_lock();
+ have_tb_lock = true;
+
+ /* There's a chance that our desired tb has been translated while
+ * taking the locks so we check again inside the lock.
+ */
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ if (!tb) {
+ /* if no translated code available, then translate it now */
+ tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
+ }
+
+ mmap_unlock();
+ }
+
+ /* We add the TB in the virtual pc hash table for the fast lookup */
+ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in
* spanning two pages because the mapping for the second page can change.
*/
if (tb->page_addr[1] != -1) {
- *last_tb = NULL;
+ last_tb = NULL;
}
#endif
/* See if we can patch the calling TB. */
- if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- tb_add_jump(*last_tb, tb_exit, tb);
+ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ if (!have_tb_lock) {
+ tb_lock();
+ have_tb_lock = true;
+ }
+ if (!tb->invalid) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+ }
+ if (have_tb_lock) {
+ tb_unlock();
}
- tb_unlock();
return tb;
}
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
- TranslationBlock *last_tb = NULL; /* Avoid chaining TBs */
- cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
+ cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
*ret = -1;
return true;
#endif
return false;
}
-static inline void cpu_handle_interrupt(CPUState *cpu,
+static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
- cpu_loop_exit(cpu);
+ return true;
}
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
- cpu_loop_exit(cpu);
+ return true;
}
#if defined(TARGET_I386)
else if (interrupt_request & CPU_INTERRUPT_INIT) {
X86CPU *x86_cpu = X86_CPU(cpu);
CPUArchState *env = &x86_cpu->env;
replay_interrupt();
- cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
- cpu_loop_exit(cpu);
+ return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
- cpu_loop_exit(cpu);
+ return true;
}
#endif
/* The target hook has 3 exit conditions:
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
- replay_interrupt();
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
+ replay_interrupt();
*last_tb = NULL;
}
/* The target hook may have updated the 'cpu->interrupt_request';
*last_tb = NULL;
}
}
- if (unlikely(cpu->exit_request || replay_has_interrupt())) {
- cpu->exit_request = 0;
+ if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
+ atomic_set(&cpu->exit_request, 0);
cpu->exception_index = EXCP_INTERRUPT;
- cpu_loop_exit(cpu);
+ return true;
}
+
+ return false;
}
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
{
uintptr_t ret;
- if (unlikely(cpu->exit_request)) {
+ if (unlikely(atomic_read(&cpu->exit_request))) {
return;
}
trace_exec_tb(tb, tb->pc);
ret = cpu_tb_exec(cpu, tb);
- *last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
+ tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
*tb_exit = ret & TB_EXIT_MASK;
switch (*tb_exit) {
case TB_EXIT_REQUESTED:
* have set something else (eg exit_request or
* interrupt_request) which we will handle
* next time around the loop. But we need to
- * ensure the tcg_exit_req read in generated code
+ * ensure the zeroing of tcg_exit_req (see cpu_tb_exec)
* comes before the next read of cpu->exit_request
* or cpu->interrupt_request.
*/
- smp_rmb();
+ smp_mb();
*last_tb = NULL;
break;
case TB_EXIT_ICOUNT_EXPIRED:
abort();
#else
int insns_left = cpu->icount_decr.u32;
+ *last_tb = NULL;
if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
cpu->icount_extra += insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
- cpu_exec_nocache(cpu, insns_left, *last_tb, false);
+ cpu_exec_nocache(cpu, insns_left, tb, false);
align_clocks(sc, cpu);
}
cpu->exception_index = EXCP_INTERRUPT;
- *last_tb = NULL;
cpu_loop_exit(cpu);
}
break;
#endif
}
default:
+ *last_tb = tb;
break;
}
}
*/
init_delay_params(&sc, cpu);
- for(;;) {
- TranslationBlock *tb, *last_tb;
- int tb_exit = 0;
-
- /* prepare setjmp context for exception handling */
- if (sigsetjmp(cpu->jmp_env, 0) == 0) {
- /* if an exception is pending, we execute it here */
- if (cpu_handle_exception(cpu, &ret)) {
- break;
- }
-
- last_tb = NULL; /* forget the last executed TB after exception */
- cpu->tb_flushed = false; /* reset before first TB lookup */
- for(;;) {
- cpu_handle_interrupt(cpu, &last_tb);
- tb = tb_find_fast(cpu, &last_tb, tb_exit);
- cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
- /* Try to align the host and virtual clocks
- if the guest is in advance */
- align_clocks(&sc, cpu);
- } /* for(;;) */
- } else {
+ /* prepare setjmp context for exception handling */
+ if (sigsetjmp(cpu->jmp_env, 0) != 0) {
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
- /* Some compilers wrongly smash all local variables after
- * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
- * Reload essential local variables here for those compilers.
- * Newer versions of gcc would complain about this code (-Wclobbered). */
- cpu = current_cpu;
- cc = CPU_GET_CLASS(cpu);
+ /* Some compilers wrongly smash all local variables after
+ * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
+ * Reload essential local variables here for those compilers.
+ * Newer versions of gcc would complain about this code (-Wclobbered). */
+ cpu = current_cpu;
+ cc = CPU_GET_CLASS(cpu);
#else /* buggy compiler */
- /* Assert that the compiler does not smash local variables. */
- g_assert(cpu == current_cpu);
- g_assert(cc == CPU_GET_CLASS(cpu));
+ /* Assert that the compiler does not smash local variables. */
+ g_assert(cpu == current_cpu);
+ g_assert(cc == CPU_GET_CLASS(cpu));
#endif /* buggy compiler */
- cpu->can_do_io = 1;
- tb_lock_reset();
+ cpu->can_do_io = 1;
+ tb_lock_reset();
+ }
+
+ /* if an exception is pending, we execute it here */
+ while (!cpu_handle_exception(cpu, &ret)) {
+ TranslationBlock *last_tb = NULL;
+ int tb_exit = 0;
+
+ while (!cpu_handle_interrupt(cpu, &last_tb)) {
+ TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit);
+ cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
+ /* Try to align the host and virtual clocks
+ if the guest is in advance */
+ align_clocks(&sc, cpu);
}
- } /* for(;;) */
+ }
cc->cpu_exec_exit(cpu);
rcu_read_unlock();