error_setg(errp, "No MTTCG when icount is enabled");
} else {
#ifndef TARGET_SUPPORTS_MTTCG
- error_report("Guest not yet converted to MTTCG - "
- "you may get unexpected results");
+ warn_report("Guest not yet converted to MTTCG - "
+ "you may get unexpected results");
#endif
if (!check_tcg_memory_orders_compatible()) {
- error_report("Guest expects a stronger memory ordering "
- "than the host provides");
+ warn_report("Guest expects a stronger memory ordering "
+ "than the host provides");
error_printf("This may cause strange/hard to debug errors\n");
}
mttcg_enabled = true;
* account executed instructions. This is done by the TCG vCPU
* thread so the main-loop can see time has moved forward.
*/
-void cpu_update_icount(CPUState *cpu)
+static void cpu_update_icount_locked(CPUState *cpu)
{
int64_t executed = cpu_get_icount_executed(cpu);
cpu->icount_budget -= executed;
-#ifndef CONFIG_ATOMIC64
+ atomic_set_i64(&timers_state.qemu_icount,
+ timers_state.qemu_icount + executed);
+}
+
+/*
+ * Update the global shared timer_state.qemu_icount to take into
+ * account executed instructions. This is done by the TCG vCPU
+ * thread so the main-loop can see time has moved forward.
+ */
+void cpu_update_icount(CPUState *cpu)
+{
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
-#endif
- atomic_set__nocheck(&timers_state.qemu_icount,
- timers_state.qemu_icount + executed);
-#ifndef CONFIG_ATOMIC64
+ cpu_update_icount_locked(cpu);
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
-#endif
}
static int64_t cpu_get_icount_raw_locked(void)
exit(1);
}
/* Take into account what has run */
- cpu_update_icount(cpu);
+ cpu_update_icount_locked(cpu);
}
- /* The read is protected by the seqlock, so __nocheck is okay. */
- return atomic_read__nocheck(&timers_state.qemu_icount);
+ /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
+ return atomic_read_i64(&timers_state.qemu_icount);
}
static int64_t cpu_get_icount_locked(void)
{
int64_t icount = cpu_get_icount_raw_locked();
- return atomic_read__nocheck(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount);
+ return atomic_read_i64(&timers_state.qemu_icount_bias) +
+ cpu_icount_to_ns(icount);
}
int64_t cpu_get_icount_raw(void)
timers_state.icount_time_shift + 1);
}
last_delta = delta;
- atomic_set__nocheck(&timers_state.qemu_icount_bias,
- cur_icount - (timers_state.qemu_icount
- << timers_state.icount_time_shift));
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ cur_icount - (timers_state.qemu_icount
+ << timers_state.icount_time_shift));
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
}
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
if (runstate_is_running()) {
- int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
- cpu_get_clock_locked());
+ int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
+ cpu_get_clock_locked());
int64_t warp_delta;
warp_delta = clock - timers_state.vm_clock_warp_start;
int64_t delta = clock - cur_icount;
warp_delta = MIN(warp_delta, delta);
}
- atomic_set__nocheck(&timers_state.qemu_icount_bias,
- timers_state.qemu_icount_bias + warp_delta);
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ timers_state.qemu_icount_bias + warp_delta);
}
timers_state.vm_clock_warp_start = -1;
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
- atomic_set__nocheck(&timers_state.qemu_icount_bias,
- timers_state.qemu_icount_bias + warp);
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ timers_state.qemu_icount_bias + warp);
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
return;
}
- /* warp clock deterministically in record/replay mode */
- if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
- return;
- }
+ if (replay_mode != REPLAY_MODE_PLAY) {
+ if (!all_cpu_threads_idle()) {
+ return;
+ }
- if (!all_cpu_threads_idle()) {
- return;
- }
+ if (qtest_enabled()) {
+ /* When testing, qtest commands advance icount. */
+ return;
+ }
- if (qtest_enabled()) {
- /* When testing, qtest commands advance icount. */
- return;
+ replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
+ } else {
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
+ /* vCPU is sleeping and warp can't be started.
+ It is probably a race condition: notification sent
+ to vCPU was processed in advance and vCPU went to sleep.
+ Therefore we have to wake it up for doing someting. */
+ if (replay_has_checkpoint()) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ }
+ return;
+ }
}
/* We want to use the earliest deadline from ALL vm_clocks */
*/
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
- atomic_set__nocheck(&timers_state.qemu_icount_bias,
- timers_state.qemu_icount_bias + deadline);
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ timers_state.qemu_icount_bias + deadline);
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
void cpu_ticks_init(void)
{
seqlock_init(&timers_state.vm_clock_seqlock);
+ qemu_spin_init(&timers_state.vm_clock_lock);
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
cpu_throttle_timer_tick, NULL);
if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
kick_tcg_thread, NULL);
+ }
+ if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
}
}
static void stop_tcg_kick_timer(void)
{
assert(!mttcg_enabled);
- if (tcg_kick_vcpu_timer) {
+ if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
timer_del(tcg_kick_vcpu_timer);
- tcg_kick_vcpu_timer = NULL;
}
}
process_queued_cpu_work(cpu);
}
-static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
+static void qemu_tcg_rr_wait_io_event(void)
{
+ CPUState *cpu;
+
while (all_cpu_threads_idle()) {
stop_tcg_kick_timer();
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
}
start_tcg_kick_timer();
- qemu_wait_io_event_common(cpu);
+ CPU_FOREACH(cpu) {
+ qemu_wait_io_event_common(cpu);
+ }
}
static void qemu_wait_io_event(CPUState *cpu)
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
- tcg_time += profile_getclock() - ti;
+ atomic_set(&tcg_ctx->prof.cpu_exec_time,
+ tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
#endif
return ret;
}
atomic_mb_set(&cpu->exit_request, 0);
}
- qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu);
+ if (use_icount && all_cpu_threads_idle()) {
+ /*
+ * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
+ * in the main_loop, wake it up in order to start the warp timer.
+ */
+ qemu_notify_event();
+ }
+
+ qemu_tcg_rr_wait_io_event();
deal_with_unplugged_cpus();
}
}
cpu->thread_kicked = true;
err = pthread_kill(cpu->thread->thread, SIG_IPI);
- if (err) {
+ if (err && err != ESRCH) {
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
exit(1);
}
void cpu_stop_current(void)
{
if (current_cpu) {
- qemu_cpu_stop(current_cpu, true);
+ current_cpu->stop = true;
+ cpu_exit(current_cpu);
}
}
return CPU_INFO_ARCH_X86;
case SYS_EMU_TARGET_PPC:
- case SYS_EMU_TARGET_PPCEMB:
case SYS_EMU_TARGET_PPC64:
return CPU_INFO_ARCH_PPC;