*/
/* Needed early for CONFIG_BSD etc. */
-#include "config-host.h"
+#include "qemu/osdep.h"
#include "monitor/monitor.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
+#include "sysemu/block-backend.h"
#include "exec/gdbstub.h"
#include "sysemu/dma.h"
#include "sysemu/kvm.h"
#include "qemu/seqlock.h"
#include "qapi-event.h"
#include "hw/nmi.h"
+#include "sysemu/replay.h"
#ifndef _WIN32
#include "qemu/compatfd.h"
ticks = timers_state.cpu_ticks_offset;
if (timers_state.cpu_ticks_enabled) {
- ticks += cpu_get_real_ticks();
+ ticks += cpu_get_host_ticks();
}
if (timers_state.cpu_ticks_prev > ticks) {
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (!timers_state.cpu_ticks_enabled) {
- timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
+ timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
timers_state.cpu_clock_offset -= get_clock();
timers_state.cpu_ticks_enabled = 1;
}
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (timers_state.cpu_ticks_enabled) {
- timers_state.cpu_ticks_offset += cpu_get_real_ticks();
+ timers_state.cpu_ticks_offset += cpu_get_host_ticks();
timers_state.cpu_clock_offset = cpu_get_clock_locked();
timers_state.cpu_ticks_enabled = 0;
}
return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
}
-static void icount_warp_rt(void *opaque)
+static void icount_warp_rt(void)
{
/* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
* changes from -1 to another value, so the race here is okay.
seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (runstate_is_running()) {
- int64_t clock = cpu_get_clock_locked();
+ int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
+ cpu_get_clock_locked());
int64_t warp_delta;
warp_delta = clock - vm_clock_warp_start;
}
}
+static void icount_timer_cb(void *opaque)
+{
+ /* No need for a checkpoint because the timer already synchronizes
+ * with CHECKPOINT_CLOCK_VIRTUAL_RT.
+ */
+ icount_warp_rt();
+}
+
void qtest_clock_warp(int64_t dest)
{
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
-void qemu_clock_warp(QEMUClockType type)
+void qemu_start_warp_timer(void)
{
int64_t clock;
int64_t deadline;
- /*
- * There are too many global variables to make the "warp" behavior
- * applicable to other clocks. But a clock argument removes the
- * need for if statements all over the place.
+ if (!use_icount) {
+ return;
+ }
+
+ /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
+ * do not fire, so computing the deadline does not make sense.
*/
- if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
+ if (!runstate_is_running()) {
return;
}
- if (icount_sleep) {
- /*
- * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
- * This ensures that the deadline for the timer is computed correctly
- * below.
- * This also makes sure that the insn counter is synchronized before
- * the CPU starts running, in case the CPU is woken by an event other
- * than the earliest QEMU_CLOCK_VIRTUAL timer.
- */
- icount_warp_rt(NULL);
- timer_del(icount_warp_timer);
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
+ return;
}
+
if (!all_cpu_threads_idle()) {
return;
}
if (qtest_enabled()) {
/* When testing, qtest commands advance icount. */
- return;
+ return;
}
/* We want to use the earliest deadline from ALL vm_clocks */
}
}
+static void qemu_account_warp_timer(void)
+{
+ if (!use_icount || !icount_sleep) {
+ return;
+ }
+
+ /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
+ * do not fire, so computing the deadline does not make sense.
+ */
+ if (!runstate_is_running()) {
+ return;
+ }
+
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
+ return;
+ }
+
+ timer_del(icount_warp_timer);
+ icount_warp_rt();
+}
+
static bool icount_state_needed(void *opaque)
{
return use_icount;
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
- icount_warp_rt, NULL);
+ icount_timer_cb, NULL);
}
icount_align_option = qemu_opt_get_bool(opts, "align", false);
if (icount_align_option && !icount_sleep) {
- error_setg(errp, "align=on and sleep=no are incompatible");
+ error_setg(errp, "align=on and sleep=off are incompatible");
}
if (strcmp(option, "auto") != 0) {
errno = 0;
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
} else if (!icount_sleep) {
- error_setg(errp, "shift=auto and sleep=no are incompatible");
+ error_setg(errp, "shift=auto and sleep=off are incompatible");
}
use_icount = 2;
}
}
-void cpu_clean_all_dirty(void)
-{
- CPUState *cpu;
-
- CPU_FOREACH(cpu) {
- cpu_clean_state(cpu);
- }
-}
-
static int do_vm_stop(RunState state)
{
int ret = 0;
}
bdrv_drain_all();
- ret = bdrv_flush_all();
+ ret = blk_flush_all();
return ret;
}
if (cpu->stop) {
cpu->stop = false;
cpu->stopped = true;
- qemu_cond_signal(&qemu_pause_cond);
+ qemu_cond_broadcast(&qemu_pause_cond);
}
flush_queued_work(cpu);
cpu->thread_kicked = false;
static void qemu_tcg_wait_io_event(CPUState *cpu)
{
while (all_cpu_threads_idle()) {
- /* Start accounting real time to the virtual clock if the CPUs
- are idle. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
static QemuCond *tcg_halt_cond;
static QemuThread *tcg_cpu_thread;
- tcg_cpu_address_space_init(cpu, cpu->as);
-
/* share a single thread for all cpus with TCG */
if (!tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->nr_cores = smp_cores;
cpu->nr_threads = smp_threads;
cpu->stopped = true;
+
+ if (!cpu->as) {
+ /* If the target cpu hasn't set up any address spaces itself,
+ * give it the default one.
+ */
+ AddressSpace *as = address_space_init_shareable(cpu->memory,
+ "cpu-memory");
+ cpu->num_ases = 1;
+ cpu_address_space_init(cpu, as, 0);
+ }
+
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (tcg_enabled()) {
current_cpu->stop = false;
current_cpu->stopped = true;
cpu_exit(current_cpu);
- qemu_cond_signal(&qemu_pause_cond);
+ qemu_cond_broadcast(&qemu_pause_cond);
}
}
return vm_stop(state);
} else {
runstate_set(state);
+
+ bdrv_drain_all();
/* Make sure to return an error if the flush in a previous vm_stop()
* failed. */
- return bdrv_flush_all();
+ return blk_flush_all();
+ }
+}
+
+static int64_t tcg_get_icount_limit(void)
+{
+ int64_t deadline;
+
+ if (replay_mode != REPLAY_MODE_PLAY) {
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ /* Maintain prior (possibly buggy) behaviour where if no deadline
+ * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
+ * INT32_MAX nanoseconds ahead, we still use INT32_MAX
+ * nanoseconds.
+ */
+ if ((deadline < 0) || (deadline > INT32_MAX)) {
+ deadline = INT32_MAX;
+ }
+
+ return qemu_icount_round(deadline);
+ } else {
+ return replay_get_instructions();
}
}
#endif
if (use_icount) {
int64_t count;
- int64_t deadline;
int decr;
timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+ cpu->icount_extra);
cpu->icount_decr.u16.low = 0;
cpu->icount_extra = 0;
- deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
-
- /* Maintain prior (possibly buggy) behaviour where if no deadline
- * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
- * INT32_MAX nanoseconds ahead, we still use INT32_MAX
- * nanoseconds.
- */
- if ((deadline < 0) || (deadline > INT32_MAX)) {
- deadline = INT32_MAX;
- }
-
- count = qemu_icount_round(deadline);
+ count = tcg_get_icount_limit();
timers_state.qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
+ cpu->icount_extra);
cpu->icount_decr.u32 = 0;
cpu->icount_extra = 0;
+ replay_account_executed_instructions();
}
return ret;
}
int r;
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ qemu_account_warp_timer();
if (next_cpu == NULL) {
next_cpu = first_cpu;
info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
info->value->thread_id = cpu->thread_id;
#if defined(TARGET_I386)
- info->value->has_pc = true;
- info->value->pc = env->eip + env->segs[R_CS].base;
+ info->value->arch = CPU_INFO_ARCH_X86;
+ info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
#elif defined(TARGET_PPC)
- info->value->has_nip = true;
- info->value->nip = env->nip;
+ info->value->arch = CPU_INFO_ARCH_PPC;
+ info->value->u.ppc.nip = env->nip;
#elif defined(TARGET_SPARC)
- info->value->has_pc = true;
- info->value->pc = env->pc;
- info->value->has_npc = true;
- info->value->npc = env->npc;
+ info->value->arch = CPU_INFO_ARCH_SPARC;
+ info->value->u.q_sparc.pc = env->pc;
+ info->value->u.q_sparc.npc = env->npc;
#elif defined(TARGET_MIPS)
- info->value->has_PC = true;
- info->value->PC = env->active_tc.PC;
+ info->value->arch = CPU_INFO_ARCH_MIPS;
+ info->value->u.q_mips.PC = env->active_tc.PC;
#elif defined(TARGET_TRICORE)
- info->value->has_PC = true;
- info->value->PC = env->PC;
+ info->value->arch = CPU_INFO_ARCH_TRICORE;
+ info->value->u.tricore.PC = env->PC;
+#else
+ info->value->arch = CPU_INFO_ARCH_OTHER;
#endif
/* XXX: waiting for the qapi to support GSList */