/* Needed early for CONFIG_BSD etc. */
#include "qemu/osdep.h"
-
+#include "qemu-common.h"
+#include "cpu.h"
#include "monitor/monitor.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
+#include "sysemu/block-backend.h"
#include "exec/gdbstub.h"
#include "sysemu/dma.h"
#include "sysemu/kvm.h"
#include "qmp-commands.h"
+#include "exec/exec-all.h"
#include "qemu/thread.h"
#include "sysemu/cpus.h"
return icount << icount_time_shift;
}
-/* return the host CPU cycle counter and handle stop/restart */
-/* Caller must hold the BQL */
+/* return the time elapsed in VM between vm_start and vm_stop. Unless
+ * icount is active, cpu_get_ticks() uses units of the host CPU cycle
+ * counter.
+ *
+ * Caller must hold the BQL
+ */
int64_t cpu_get_ticks(void)
{
int64_t ticks;
static int64_t cpu_get_clock_locked(void)
{
- int64_t ticks;
+ int64_t time;
- ticks = timers_state.cpu_clock_offset;
+ time = timers_state.cpu_clock_offset;
if (timers_state.cpu_ticks_enabled) {
- ticks += get_clock();
+ time += get_clock();
}
- return ticks;
+ return time;
}
-/* return the host CPU monotonic timer and handle stop/restart */
+/* Return the monotonic time elapsed in VM, i.e.,
+ * the time between vm_start and vm_stop
+ */
int64_t cpu_get_clock(void)
{
int64_t ti;
}
/* enable cpu_get_ticks()
- * Caller must hold BQL which server as mutex for vm_clock_seqlock.
+ * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
*/
void cpu_enable_ticks(void)
{
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
if (!timers_state.cpu_ticks_enabled) {
timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
timers_state.cpu_clock_offset -= get_clock();
timers_state.cpu_ticks_enabled = 1;
}
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
}
/* disable cpu_get_ticks() : the clock is stopped. You must not call
* cpu_get_ticks() after that.
- * Caller must hold BQL which server as mutex for vm_clock_seqlock.
+ * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
*/
void cpu_disable_ticks(void)
{
/* Here, the really thing protected by seqlock is cpu_clock_offset. */
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
if (timers_state.cpu_ticks_enabled) {
timers_state.cpu_ticks_offset += cpu_get_host_ticks();
timers_state.cpu_clock_offset = cpu_get_clock_locked();
timers_state.cpu_ticks_enabled = 0;
}
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
}
/* Correlation between real and virtual time is always going to be
fairly approximate, so ignore small variation.
When the guest is idle real and virtual time will be aligned in
the IO wait loop. */
-#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
+#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
static void icount_adjust(void)
{
return;
}
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
cur_time = cpu_get_clock_locked();
cur_icount = cpu_get_icount_locked();
last_delta = delta;
timers_state.qemu_icount_bias = cur_icount
- (timers_state.qemu_icount << icount_time_shift);
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
}
static void icount_adjust_rt(void *opaque)
{
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- get_ticks_per_sec() / 10);
+ NANOSECONDS_PER_SECOND / 10);
icount_adjust();
}
static void icount_warp_rt(void)
{
+ unsigned seq;
+ int64_t warp_start;
+
/* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
* changes from -1 to another value, so the race here is okay.
*/
- if (atomic_read(&vm_clock_warp_start) == -1) {
+ do {
+ seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+ warp_start = vm_clock_warp_start;
+ } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
+
+ if (warp_start == -1) {
return;
}
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
if (runstate_is_running()) {
int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
cpu_get_clock_locked());
timers_state.qemu_icount_bias += warp_delta;
}
vm_clock_warp_start = -1;
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
}
-static void icount_dummy_timer(void *opaque)
+static void icount_timer_cb(void *opaque)
{
- (void)opaque;
+ /* No need for a checkpoint because the timer already synchronizes
+ * with CHECKPOINT_CLOCK_VIRTUAL_RT.
+ */
+ icount_warp_rt();
}
void qtest_clock_warp(int64_t dest)
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
timers_state.qemu_icount_bias += warp;
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
-void qemu_clock_warp(QEMUClockType type)
+void qemu_start_warp_timer(void)
{
int64_t clock;
int64_t deadline;
- /*
- * There are too many global variables to make the "warp" behavior
- * applicable to other clocks. But a clock argument removes the
- * need for if statements all over the place.
- */
- if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
+ if (!use_icount) {
return;
}
}
/* warp clock deterministically in record/replay mode */
- if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP)) {
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
return;
}
- if (icount_sleep) {
- /*
- * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
- * This ensures that the deadline for the timer is computed correctly
- * below.
- * This also makes sure that the insn counter is synchronized before
- * the CPU starts running, in case the CPU is woken by an event other
- * than the earliest QEMU_CLOCK_VIRTUAL timer.
- */
- icount_warp_rt();
- timer_del(icount_warp_timer);
- }
if (!all_cpu_threads_idle()) {
return;
}
if (qtest_enabled()) {
/* When testing, qtest commands advance icount. */
- return;
+ return;
}
/* We want to use the earliest deadline from ALL vm_clocks */
* It is useful when we want a deterministic execution time,
* isolated from host latencies.
*/
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
timers_state.qemu_icount_bias += deadline;
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
} else {
/*
* you will not be sending network packets continuously instead of
* every 100ms.
*/
- seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ seqlock_write_begin(&timers_state.vm_clock_seqlock);
if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
vm_clock_warp_start = clock;
}
- seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+ seqlock_write_end(&timers_state.vm_clock_seqlock);
timer_mod_anticipate(icount_warp_timer, clock + deadline);
}
} else if (deadline == 0) {
}
}
+static void qemu_account_warp_timer(void)
+{
+ if (!use_icount || !icount_sleep) {
+ return;
+ }
+
+ /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
+ * do not fire, so computing the deadline does not make sense.
+ */
+ if (!runstate_is_running()) {
+ return;
+ }
+
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
+ return;
+ }
+
+ timer_del(icount_warp_timer);
+ icount_warp_rt();
+}
+
static bool icount_state_needed(void *opaque)
{
return use_icount;
void cpu_ticks_init(void)
{
- seqlock_init(&timers_state.vm_clock_seqlock, NULL);
+ seqlock_init(&timers_state.vm_clock_seqlock);
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
cpu_throttle_timer_tick, NULL);
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
- icount_dummy_timer, NULL);
+ icount_timer_cb, NULL);
}
icount_align_option = qemu_opt_get_bool(opts, "align", false);
if (icount_align_option && !icount_sleep) {
- error_setg(errp, "align=on and sleep=no are incompatible");
+ error_setg(errp, "align=on and sleep=off are incompatible");
}
if (strcmp(option, "auto") != 0) {
errno = 0;
} else if (icount_align_option) {
error_setg(errp, "shift=auto and align=on are incompatible");
} else if (!icount_sleep) {
- error_setg(errp, "shift=auto and sleep=no are incompatible");
+ error_setg(errp, "shift=auto and sleep=off are incompatible");
}
use_icount = 2;
icount_adjust_vm, NULL);
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- get_ticks_per_sec() / 10);
+ NANOSECONDS_PER_SECOND / 10);
}
/***********************************************************/
}
bdrv_drain_all();
- ret = bdrv_flush_all();
+ ret = blk_flush_all();
return ret;
}
raise(SIGBUS);
sigemptyset(&set);
sigaddset(&set, SIGBUS);
- sigprocmask(SIG_UNBLOCK, &set, NULL);
+ pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}
perror("Failed to re-raise SIGBUS!\n");
abort();
qemu_cpu_kick(cpu);
}
+static void qemu_kvm_destroy_vcpu(CPUState *cpu)
+{
+ if (kvm_destroy_vcpu(cpu) < 0) {
+ error_report("kvm_destroy_vcpu failed");
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void qemu_tcg_destroy_vcpu(CPUState *cpu)
+{
+}
+
static void flush_queued_work(CPUState *cpu)
{
struct qemu_work_item *wi;
static void qemu_tcg_wait_io_event(CPUState *cpu)
{
while (all_cpu_threads_idle()) {
- /* Start accounting real time to the virtual clock if the CPUs
- are idle. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
- while (1) {
+ do {
if (cpu_can_run(cpu)) {
r = kvm_cpu_exec(cpu);
if (r == EXCP_DEBUG) {
}
}
qemu_kvm_wait_io_event(cpu);
- }
+ } while (!cpu->unplug || cpu_can_run(cpu));
+ qemu_kvm_destroy_vcpu(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
return NULL;
}
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
+ CPUState *remove_cpu = NULL;
rcu_register_thread();
}
}
qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
+ CPU_FOREACH(cpu) {
+ if (cpu->unplug && !cpu_can_run(cpu)) {
+ remove_cpu = cpu;
+ break;
+ }
+ }
+ if (remove_cpu) {
+ qemu_tcg_destroy_vcpu(remove_cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ remove_cpu = NULL;
+ }
}
return NULL;
}
}
+void cpu_remove(CPUState *cpu)
+{
+ cpu->stop = true;
+ cpu->unplug = true;
+ qemu_cpu_kick(cpu);
+}
+
+void cpu_remove_sync(CPUState *cpu)
+{
+ cpu_remove(cpu);
+ while (cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
+}
+
/* For temporary buffers for forming a name */
#define VCPU_THREAD_NAME_SIZE 16
bdrv_drain_all();
/* Make sure to return an error if the flush in a previous vm_stop()
* failed. */
- return bdrv_flush_all();
+ return blk_flush_all();
}
}
int r;
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ qemu_account_warp_timer();
if (next_cpu == NULL) {
next_cpu = first_cpu;
break;
}
} else if (cpu->stop || cpu->stopped) {
+ if (cpu->unplug) {
+ next_cpu = CPU_NEXT(cpu);
+ }
break;
}
}
info->value->thread_id = cpu->thread_id;
#if defined(TARGET_I386)
info->value->arch = CPU_INFO_ARCH_X86;
- info->value->u.x86 = g_new0(CpuInfoX86, 1);
- info->value->u.x86->pc = env->eip + env->segs[R_CS].base;
+ info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
#elif defined(TARGET_PPC)
info->value->arch = CPU_INFO_ARCH_PPC;
- info->value->u.ppc = g_new0(CpuInfoPPC, 1);
- info->value->u.ppc->nip = env->nip;
+ info->value->u.ppc.nip = env->nip;
#elif defined(TARGET_SPARC)
info->value->arch = CPU_INFO_ARCH_SPARC;
- info->value->u.q_sparc = g_new0(CpuInfoSPARC, 1);
- info->value->u.q_sparc->pc = env->pc;
- info->value->u.q_sparc->npc = env->npc;
+ info->value->u.q_sparc.pc = env->pc;
+ info->value->u.q_sparc.npc = env->npc;
#elif defined(TARGET_MIPS)
info->value->arch = CPU_INFO_ARCH_MIPS;
- info->value->u.q_mips = g_new0(CpuInfoMIPS, 1);
- info->value->u.q_mips->PC = env->active_tc.PC;
+ info->value->u.q_mips.PC = env->active_tc.PC;
#elif defined(TARGET_TRICORE)
info->value->arch = CPU_INFO_ARCH_TRICORE;
- info->value->u.tricore = g_new0(CpuInfoTricore, 1);
- info->value->u.tricore->PC = env->PC;
+ info->value->u.tricore.PC = env->PC;
#else
info->value->arch = CPU_INFO_ARCH_OTHER;
- info->value->u.other = g_new0(CpuInfoOther, 1);
#endif
/* XXX: waiting for the qapi to support GSList */
void qmp_inject_nmi(Error **errp)
{
-#if defined(TARGET_I386)
- CPUState *cs;
-
- CPU_FOREACH(cs) {
- X86CPU *cpu = X86_CPU(cs);
-
- if (!cpu->apic_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_NMI);
- } else {
- apic_deliver_nmi(cpu->apic_state);
- }
- }
-#else
nmi_monitor_handle(monitor_get_cpu_index(), errp);
-#endif
}
void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)