#include "cpu.h"
#include "trace.h"
#include "disas/disas.h"
+#include "exec/exec-all.h"
#include "tcg.h"
#include "qemu/atomic.h"
#include "sysemu/qtest.h"
uint8_t *tb_ptr = itb->tc_ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
- "Trace %p [" TARGET_FMT_lx "] %s\n",
- itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
+ "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, cpu->cpu_index, itb->pc,
+ lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
- if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
+ && qemu_log_in_addr_range(itb->pc)) {
+ qemu_log_lock();
#if defined(TARGET_I386)
log_cpu_state(cpu, CPU_DUMP_CCOP);
-#elif defined(TARGET_M68K)
- /* ??? Should not modify env state for dumping. */
- cpu_m68k_flush_flags(env, env->cc_op);
- env->cc_op = CC_OP_FLAGS;
- env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
- log_cpu_state(cpu, 0);
#else
log_cpu_state(cpu, 0);
#endif
+ qemu_log_unlock();
}
#endif /* DEBUG_DISAS */
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
- cpu->tcg_exit_req = 0;
+ atomic_set(&cpu->tcg_exit_req, 0);
}
return ret;
}
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
- bool old_tb_flushed;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
- old_tb_flushed = cpu->tb_flushed;
- cpu->tb_flushed = false;
+ tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
- tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
- cpu->tb_flushed |= old_tb_flushed;
+ tb->orig_tb = orig_tb;
+ tb_unlock();
+
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
+
+ tb_lock();
tb_phys_invalidate(tb, -1);
tb_free(tb);
+ tb_unlock();
}
#endif
-static TranslationBlock *tb_find_physical(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
+static void cpu_exec_step(CPUState *cpu)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb, **tb_hash_head, **ptb1;
- unsigned int h;
- tb_page_addr_t phys_pc, phys_page1;
-
- /* find translated block using physical mappings */
- phys_pc = get_page_addr_code(env, pc);
- phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_phys_hash_func(phys_pc);
-
- /* Start at head of the hash entry */
- ptb1 = tb_hash_head = &tcg_ctx.tb_ctx.tb_phys_hash[h];
- tb = *ptb1;
-
- while (tb) {
- if (tb->pc == pc &&
- tb->page_addr[0] == phys_page1 &&
- tb->cs_base == cs_base &&
- tb->flags == flags) {
-
- if (tb->page_addr[1] == -1) {
- /* done, we have a match */
- break;
- } else {
- /* check next page if needed */
- target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) +
- TARGET_PAGE_SIZE;
- tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2);
-
- if (tb->page_addr[1] == phys_page2) {
- break;
- }
- }
- }
-
- ptb1 = &tb->phys_hash_next;
- tb = *ptb1;
- }
+ TranslationBlock *tb;
+ target_ulong cs_base, pc;
+ uint32_t flags;
- if (tb) {
- /* Move the TB to the head of the list */
- *ptb1 = tb->phys_hash_next;
- tb->phys_hash_next = *tb_hash_head;
- *tb_hash_head = tb;
- }
- return tb;
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ tb = tb_gen_code(cpu, pc, cs_base, flags,
+ 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
+ tb->orig_tb = NULL;
+ /* execute the generated code */
+ trace_exec_tb_nocache(tb, pc);
+ cpu_tb_exec(cpu, tb);
+ tb_phys_invalidate(tb, -1);
+ tb_free(tb);
}
-static TranslationBlock *tb_find_slow(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
+void cpu_exec_step_atomic(CPUState *cpu)
{
- TranslationBlock *tb;
+ start_exclusive();
- tb = tb_find_physical(cpu, pc, cs_base, flags);
- if (tb) {
- goto found;
- }
+ /* Since we got here, we know that parallel_cpus must be true. */
+ parallel_cpus = false;
+ cpu_exec_step(cpu);
+ parallel_cpus = true;
-#ifdef CONFIG_USER_ONLY
- /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
- * taken outside tb_lock. Since we're momentarily dropping
- * tb_lock, there's a chance that our desired tb has been
- * translated.
- */
- tb_unlock();
- mmap_lock();
- tb_lock();
- tb = tb_find_physical(cpu, pc, cs_base, flags);
- if (tb) {
- mmap_unlock();
- goto found;
- }
-#endif
+ end_exclusive();
+}
- /* if no translated code available, then translate it now */
- tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
+struct tb_desc {
+ target_ulong pc;
+ target_ulong cs_base;
+ CPUArchState *env;
+ tb_page_addr_t phys_page1;
+ uint32_t flags;
+};
-#ifdef CONFIG_USER_ONLY
- mmap_unlock();
-#endif
+static bool tb_cmp(const void *p, const void *d)
+{
+ const TranslationBlock *tb = p;
+ const struct tb_desc *desc = d;
+
+ if (tb->pc == desc->pc &&
+ tb->page_addr[0] == desc->phys_page1 &&
+ tb->cs_base == desc->cs_base &&
+ tb->flags == desc->flags &&
+ !atomic_read(&tb->invalid)) {
+ /* check next page if needed */
+ if (tb->page_addr[1] == -1) {
+ return true;
+ } else {
+ tb_page_addr_t phys_page2;
+ target_ulong virt_page2;
-found:
- /* we add the TB in the virtual pc hash table */
- cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
- return tb;
+ virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ phys_page2 = get_page_addr_code(desc->env, virt_page2);
+ if (tb->page_addr[1] == phys_page2) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static TranslationBlock *tb_htable_lookup(CPUState *cpu,
+ target_ulong pc,
+ target_ulong cs_base,
+ uint32_t flags)
+{
+ tb_page_addr_t phys_pc;
+ struct tb_desc desc;
+ uint32_t h;
+
+ desc.env = (CPUArchState *)cpu->env_ptr;
+ desc.cs_base = cs_base;
+ desc.flags = flags;
+ desc.pc = pc;
+ phys_pc = get_page_addr_code(desc.env, pc);
+ desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
+ h = tb_hash_func(phys_pc, pc, flags);
+ return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
-static inline TranslationBlock *tb_find_fast(CPUState *cpu,
- TranslationBlock **last_tb,
- int tb_exit)
+static inline TranslationBlock *tb_find(CPUState *cpu,
+ TranslationBlock *last_tb,
+ int tb_exit)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
+ bool have_tb_lock = false;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb_lock();
- tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
- tb = tb_find_slow(cpu, pc, cs_base, flags);
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ if (!tb) {
+
+ /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
+ * taken outside tb_lock. As system emulation is currently
+ * single threaded the locks are NOPs.
+ */
+ mmap_lock();
+ tb_lock();
+ have_tb_lock = true;
+
+ /* There's a chance that our desired tb has been translated while
+ * taking the locks so we check again inside the lock.
+ */
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ if (!tb) {
+ /* if no translated code available, then translate it now */
+ tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
+ }
+
+ mmap_unlock();
+ }
+
+ /* We add the TB in the virtual pc hash table for the fast lookup */
+ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
- if (cpu->tb_flushed) {
- /* Ensure that no TB jump will be modified as the
- * translation buffer has been flushed.
- */
- *last_tb = NULL;
- cpu->tb_flushed = false;
+#ifndef CONFIG_USER_ONLY
+ /* We don't take care of direct jumps when address mapping changes in
+ * system emulation. So it's not safe to make a direct jump to a TB
+ * spanning two pages because the mapping for the second page can change.
+ */
+ if (tb->page_addr[1] != -1) {
+ last_tb = NULL;
}
+#endif
/* See if we can patch the calling TB. */
- if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- tb_add_jump(*last_tb, tb_exit, tb);
+ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ if (!have_tb_lock) {
+ tb_lock();
+ have_tb_lock = true;
+ }
+ if (!tb->invalid) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+ }
+ if (have_tb_lock) {
+ tb_unlock();
}
- tb_unlock();
return tb;
}
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
- TranslationBlock *last_tb = NULL; /* Avoid chaining TBs */
- cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
+ cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
*ret = -1;
return true;
#endif
if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
*last_tb = NULL;
}
+ /* The target hook may have updated the 'cpu->interrupt_request';
+ * reload the 'interrupt_request' value */
+ interrupt_request = cpu->interrupt_request;
}
- /* Don't use the cached interrupt_request value,
- do_interrupt may have updated the EXITTB flag. */
- if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
+ if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
}
}
- if (unlikely(cpu->exit_request || replay_has_interrupt())) {
- cpu->exit_request = 0;
+ if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
+ atomic_set(&cpu->exit_request, 0);
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
{
uintptr_t ret;
- if (unlikely(cpu->exit_request)) {
+ if (unlikely(atomic_read(&cpu->exit_request))) {
return;
}
init_delay_params(&sc, cpu);
for(;;) {
- TranslationBlock *tb, *last_tb;
- int tb_exit = 0;
-
/* prepare setjmp context for exception handling */
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
+ TranslationBlock *tb, *last_tb = NULL;
+ int tb_exit = 0;
+
/* if an exception is pending, we execute it here */
if (cpu_handle_exception(cpu, &ret)) {
break;
}
- last_tb = NULL; /* forget the last executed TB after exception */
- cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
cpu_handle_interrupt(cpu, &last_tb);
- tb = tb_find_fast(cpu, &last_tb, tb_exit);
+ tb = tb_find(cpu, last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
/* Try to align the host and virtual clocks
if the guest is in advance */