uint8_t *tb_ptr = itb->tc_ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
- "Trace %p [" TARGET_FMT_lx "] %s\n",
- itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
+ "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, cpu->cpu_index, itb->pc,
+ lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
- if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
+ && qemu_log_in_addr_range(itb->pc)) {
+ qemu_log_lock();
#if defined(TARGET_I386)
log_cpu_state(cpu, CPU_DUMP_CCOP);
-#elif defined(TARGET_M68K)
- /* ??? Should not modify env state for dumping. */
- cpu_m68k_flush_flags(env, env->cc_op);
- env->cc_op = CC_OP_FLAGS;
- env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
- log_cpu_state(cpu, 0);
#else
log_cpu_state(cpu, 0);
#endif
+ qemu_log_unlock();
}
#endif /* DEBUG_DISAS */
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
- cpu->tcg_exit_req = 0;
+ atomic_set(&cpu->tcg_exit_req, 0);
}
return ret;
}
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
- bool old_tb_flushed;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
- old_tb_flushed = cpu->tb_flushed;
- cpu->tb_flushed = false;
+ tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
- tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
- cpu->tb_flushed |= old_tb_flushed;
+ tb->orig_tb = orig_tb;
+ tb_unlock();
+
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
+
+ tb_lock();
tb_phys_invalidate(tb, -1);
tb_free(tb);
+ tb_unlock();
}
#endif
+static void cpu_exec_step(CPUState *cpu)
+{
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
+ TranslationBlock *tb;
+ target_ulong cs_base, pc;
+ uint32_t flags;
+
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ tb = tb_gen_code(cpu, pc, cs_base, flags,
+ 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
+ tb->orig_tb = NULL;
+ /* execute the generated code */
+ trace_exec_tb_nocache(tb, pc);
+ cpu_tb_exec(cpu, tb);
+ tb_phys_invalidate(tb, -1);
+ tb_free(tb);
+}
+
+void cpu_exec_step_atomic(CPUState *cpu)
+{
+ start_exclusive();
+
+ /* Since we got here, we know that parallel_cpus must be true. */
+ parallel_cpus = false;
+ cpu_exec_step(cpu);
+ parallel_cpus = true;
+
+ end_exclusive();
+}
+
struct tb_desc {
target_ulong pc;
target_ulong cs_base;
if (tb->pc == desc->pc &&
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
- tb->flags == desc->flags) {
+ tb->flags == desc->flags &&
+ !atomic_read(&tb->invalid)) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
return false;
}
-static TranslationBlock *tb_find_physical(CPUState *cpu,
+static TranslationBlock *tb_htable_lookup(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
uint32_t flags)
return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
-static TranslationBlock *tb_find_slow(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
-{
- TranslationBlock *tb;
-
- tb = tb_find_physical(cpu, pc, cs_base, flags);
- if (tb) {
- goto found;
- }
-
-#ifdef CONFIG_USER_ONLY
- /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
- * taken outside tb_lock. Since we're momentarily dropping
- * tb_lock, there's a chance that our desired tb has been
- * translated.
- */
- tb_unlock();
- mmap_lock();
- tb_lock();
- tb = tb_find_physical(cpu, pc, cs_base, flags);
- if (tb) {
- mmap_unlock();
- goto found;
- }
-#endif
-
- /* if no translated code available, then translate it now */
- tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
-
-#ifdef CONFIG_USER_ONLY
- mmap_unlock();
-#endif
-
-found:
- /* we add the TB in the virtual pc hash table */
- cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
- return tb;
-}
-
-static inline TranslationBlock *tb_find_fast(CPUState *cpu,
- TranslationBlock *last_tb,
- int tb_exit)
+static inline TranslationBlock *tb_find(CPUState *cpu,
+ TranslationBlock *last_tb,
+ int tb_exit)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
+ bool have_tb_lock = false;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb_lock();
- tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
- tb = tb_find_slow(cpu, pc, cs_base, flags);
- }
- if (cpu->tb_flushed) {
- /* Ensure that no TB jump will be modified as the
- * translation buffer has been flushed.
- */
- last_tb = NULL;
- cpu->tb_flushed = false;
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ if (!tb) {
+
+ /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
+ * taken outside tb_lock. As system emulation is currently
+ * single threaded the locks are NOPs.
+ */
+ mmap_lock();
+ tb_lock();
+ have_tb_lock = true;
+
+ /* There's a chance that our desired tb has been translated while
+ * taking the locks so we check again inside the lock.
+ */
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags);
+ if (!tb) {
+ /* if no translated code available, then translate it now */
+ tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
+ }
+
+ mmap_unlock();
+ }
+
+ /* We add the TB in the virtual pc hash table for the fast lookup */
+ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
#ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in
#endif
/* See if we can patch the calling TB. */
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- tb_add_jump(last_tb, tb_exit, tb);
+ if (!have_tb_lock) {
+ tb_lock();
+ have_tb_lock = true;
+ }
+ if (!tb->invalid) {
+ tb_add_jump(last_tb, tb_exit, tb);
+ }
+ }
+ if (have_tb_lock) {
+ tb_unlock();
}
- tb_unlock();
return tb;
}
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
- cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, NULL, 0), true);
+ cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
*ret = -1;
return true;
#endif
*last_tb = NULL;
}
}
- if (unlikely(cpu->exit_request || replay_has_interrupt())) {
- cpu->exit_request = 0;
+ if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
+ atomic_set(&cpu->exit_request, 0);
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
{
uintptr_t ret;
- if (unlikely(cpu->exit_request)) {
+ if (unlikely(atomic_read(&cpu->exit_request))) {
return;
}
break;
}
- cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
cpu_handle_interrupt(cpu, &last_tb);
- tb = tb_find_fast(cpu, last_tb, tb_exit);
+ tb = tb_find(cpu, last_tb, tb_exit);
cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
/* Try to align the host and virtual clocks
if the guest is in advance */