/* Protected by TimersState seqlock */
static bool icount_sleep = true;
-/* Conversion factor from emulated instructions to virtual clock ticks. */
-static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
int64_t cpu_ticks_prev;
int64_t cpu_ticks_offset;
- /* cpu_clock_offset can be read out of BQL, so protect it with
- * this lock.
+ /* Protect fields that can be respectively read outside the
+ * BQL, and written from multiple threads.
*/
QemuSeqLock vm_clock_seqlock;
- int64_t cpu_clock_offset;
- int32_t cpu_ticks_enabled;
- int64_t dummy;
+ QemuSpin vm_clock_lock;
+
+ int16_t cpu_ticks_enabled;
+
+ /* Conversion factor from emulated instructions to virtual clock ticks. */
+ int16_t icount_time_shift;
/* Compensate for varying guest execution speed. */
int64_t qemu_icount_bias;
+
+ int64_t vm_clock_warp_start;
+ int64_t cpu_clock_offset;
+
/* Only written by TCG thread */
int64_t qemu_icount;
+
/* for adjusting icount */
- int64_t vm_clock_warp_start;
QEMUTimer *icount_rt_timer;
QEMUTimer *icount_vm_timer;
QEMUTimer *icount_warp_timer;
* account executed instructions. This is done by the TCG vCPU
* thread so the main-loop can see time has moved forward.
*/
-void cpu_update_icount(CPUState *cpu)
+static void cpu_update_icount_locked(CPUState *cpu)
{
int64_t executed = cpu_get_icount_executed(cpu);
cpu->icount_budget -= executed;
-#ifdef CONFIG_ATOMIC64
- atomic_set__nocheck(&timers_state.qemu_icount,
- atomic_read__nocheck(&timers_state.qemu_icount) +
- executed);
-#else /* FIXME: we need 64bit atomics to do this safely */
- timers_state.qemu_icount += executed;
-#endif
+ atomic_set_i64(&timers_state.qemu_icount,
+ timers_state.qemu_icount + executed);
}
-int64_t cpu_get_icount_raw(void)
+/*
+ * Update the global shared timer_state.qemu_icount to take into
+ * account executed instructions. This is done by the TCG vCPU
+ * thread so the main-loop can see time has moved forward.
+ */
+void cpu_update_icount(CPUState *cpu)
+{
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
+ cpu_update_icount_locked(cpu);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
+}
+
+static int64_t cpu_get_icount_raw_locked(void)
{
CPUState *cpu = current_cpu;
exit(1);
}
/* Take into account what has run */
- cpu_update_icount(cpu);
+ cpu_update_icount_locked(cpu);
}
-#ifdef CONFIG_ATOMIC64
- return atomic_read__nocheck(&timers_state.qemu_icount);
-#else /* FIXME: we need 64bit atomics to do this safely */
- return timers_state.qemu_icount;
-#endif
+ /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
+ return atomic_read_i64(&timers_state.qemu_icount);
}
-/* Return the virtual CPU time, based on the instruction counter. */
static int64_t cpu_get_icount_locked(void)
{
- int64_t icount = cpu_get_icount_raw();
- return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
+ int64_t icount = cpu_get_icount_raw_locked();
+ return atomic_read_i64(&timers_state.qemu_icount_bias) +
+ cpu_icount_to_ns(icount);
+}
+
+int64_t cpu_get_icount_raw(void)
+{
+ int64_t icount;
+ unsigned start;
+
+ do {
+ start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+ icount = cpu_get_icount_raw_locked();
+ } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
+
+ return icount;
}
+/* Return the virtual CPU time, based on the instruction counter. */
int64_t cpu_get_icount(void)
{
int64_t icount;
int64_t cpu_icount_to_ns(int64_t icount)
{
- return icount << icount_time_shift;
+ return icount << atomic_read(&timers_state.icount_time_shift);
+}
+
+static int64_t cpu_get_ticks_locked(void)
+{
+ int64_t ticks = timers_state.cpu_ticks_offset;
+ if (timers_state.cpu_ticks_enabled) {
+ ticks += cpu_get_host_ticks();
+ }
+
+ if (timers_state.cpu_ticks_prev > ticks) {
+ /* Non increasing ticks may happen if the host uses software suspend. */
+ timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
+ ticks = timers_state.cpu_ticks_prev;
+ }
+
+ timers_state.cpu_ticks_prev = ticks;
+ return ticks;
}
/* return the time elapsed in VM between vm_start and vm_stop. Unless
* icount is active, cpu_get_ticks() uses units of the host CPU cycle
* counter.
- *
- * Caller must hold the BQL
*/
int64_t cpu_get_ticks(void)
{
return cpu_get_icount();
}
- ticks = timers_state.cpu_ticks_offset;
- if (timers_state.cpu_ticks_enabled) {
- ticks += cpu_get_host_ticks();
- }
-
- if (timers_state.cpu_ticks_prev > ticks) {
- /* Note: non increasing ticks may happen if the host uses
- software suspend */
- timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
- ticks = timers_state.cpu_ticks_prev;
- }
-
- timers_state.cpu_ticks_prev = ticks;
+ qemu_spin_lock(&timers_state.vm_clock_lock);
+ ticks = cpu_get_ticks_locked();
+ qemu_spin_unlock(&timers_state.vm_clock_lock);
return ticks;
}
*/
void cpu_enable_ticks(void)
{
- /* Here, the really thing protected by seqlock is cpu_clock_offset. */
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
if (!timers_state.cpu_ticks_enabled) {
timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
timers_state.cpu_clock_offset -= get_clock();
timers_state.cpu_ticks_enabled = 1;
}
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
}
/* disable cpu_get_ticks() : the clock is stopped. You must not call
*/
void cpu_disable_ticks(void)
{
- /* Here, the really thing protected by seqlock is cpu_clock_offset. */
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
if (timers_state.cpu_ticks_enabled) {
timers_state.cpu_ticks_offset += cpu_get_host_ticks();
timers_state.cpu_clock_offset = cpu_get_clock_locked();
timers_state.cpu_ticks_enabled = 0;
}
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
}
/* Correlation between real and virtual time is always going to be
return;
}
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
cur_time = cpu_get_clock_locked();
cur_icount = cpu_get_icount_locked();
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
if (delta > 0
&& last_delta + ICOUNT_WOBBLE < delta * 2
- && icount_time_shift > 0) {
+ && timers_state.icount_time_shift > 0) {
/* The guest is getting too far ahead. Slow time down. */
- icount_time_shift--;
+ atomic_set(&timers_state.icount_time_shift,
+ timers_state.icount_time_shift - 1);
}
if (delta < 0
&& last_delta - ICOUNT_WOBBLE > delta * 2
- && icount_time_shift < MAX_ICOUNT_SHIFT) {
+ && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
/* The guest is getting too far behind. Speed time up. */
- icount_time_shift++;
+ atomic_set(&timers_state.icount_time_shift,
+ timers_state.icount_time_shift + 1);
}
last_delta = delta;
- timers_state.qemu_icount_bias = cur_icount
- - (timers_state.qemu_icount << icount_time_shift);
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ cur_icount - (timers_state.qemu_icount
+ << timers_state.icount_time_shift));
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
}
static void icount_adjust_rt(void *opaque)
static int64_t qemu_icount_round(int64_t count)
{
- return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
+ int shift = atomic_read(&timers_state.icount_time_shift);
+ return (count + (1 << shift) - 1) >> shift;
}
static void icount_warp_rt(void)
return;
}
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
if (runstate_is_running()) {
int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
cpu_get_clock_locked());
int64_t delta = clock - cur_icount;
warp_delta = MIN(warp_delta, delta);
}
- timers_state.qemu_icount_bias += warp_delta;
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ timers_state.qemu_icount_bias + warp_delta);
}
timers_state.vm_clock_warp_start = -1;
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
- timers_state.qemu_icount_bias += warp;
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ timers_state.qemu_icount_bias + warp);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
return;
}
- /* warp clock deterministically in record/replay mode */
- if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
- return;
- }
+ if (replay_mode != REPLAY_MODE_PLAY) {
+ if (!all_cpu_threads_idle()) {
+ return;
+ }
- if (!all_cpu_threads_idle()) {
- return;
- }
+ if (qtest_enabled()) {
+ /* When testing, qtest commands advance icount. */
+ return;
+ }
- if (qtest_enabled()) {
- /* When testing, qtest commands advance icount. */
- return;
+ replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
+ } else {
+ /* warp clock deterministically in record/replay mode */
+ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
+ /* vCPU is sleeping and warp can't be started.
+ It is probably a race condition: notification sent
+ to vCPU was processed in advance and vCPU went to sleep.
+ Therefore we have to wake it up for doing someting. */
+ if (replay_has_checkpoint()) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ }
+ return;
+ }
}
/* We want to use the earliest deadline from ALL vm_clocks */
* It is useful when we want a deterministic execution time,
* isolated from host latencies.
*/
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
- timers_state.qemu_icount_bias += deadline;
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
+ atomic_set_i64(&timers_state.qemu_icount_bias,
+ timers_state.qemu_icount_bias + deadline);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
} else {
/*
* you will not be sending network packets continuously instead of
* every 100ms.
*/
- seqlock_write_begin(&timers_state.vm_clock_seqlock);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
if (timers_state.vm_clock_warp_start == -1
|| timers_state.vm_clock_warp_start > clock) {
timers_state.vm_clock_warp_start = clock;
}
- seqlock_write_end(&timers_state.vm_clock_seqlock);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock,
+ &timers_state.vm_clock_lock);
timer_mod_anticipate(timers_state.icount_warp_timer,
clock + deadline);
}
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT64(cpu_ticks_offset, TimersState),
- VMSTATE_INT64(dummy, TimersState),
+ VMSTATE_UNUSED(8),
VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
VMSTATE_END_OF_LIST()
},
void cpu_ticks_init(void)
{
seqlock_init(&timers_state.vm_clock_seqlock);
+ qemu_spin_init(&timers_state.vm_clock_lock);
vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
cpu_throttle_timer_tick, NULL);
}
if (strcmp(option, "auto") != 0) {
errno = 0;
- icount_time_shift = strtol(option, &rem_str, 0);
+ timers_state.icount_time_shift = strtol(option, &rem_str, 0);
if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
error_setg(errp, "icount: Invalid shift value");
}
/* 125MIPS seems a reasonable initial guess at the guest speed.
It will be corrected fairly quickly anyway. */
- icount_time_shift = 3;
+ timers_state.icount_time_shift = 3;
/* Have both realtime and virtual time triggers for speed adjustment.
The realtime trigger catches emulated time passing too slowly,
return;
}
- if (!qemu_in_vcpu_thread() && first_cpu) {
+ if (qemu_in_vcpu_thread()) {
+ /* A CPU is currently running; kick it back out to the
+ * tcg_cpu_exec() loop so it will recalculate its
+ * icount deadline immediately.
+ */
+ qemu_cpu_kick(current_cpu);
+ } else if (first_cpu) {
/* qemu_cpu_kick is not enough to kick a halted CPU out of
* qemu_tcg_wait_io_event. async_run_on_cpu, instead,
* causes cpu_thread_is_idle to return false. This way,
* handle_icount_deadline can run.
+ * If we have no CPUs at all for some reason, we don't
+ * need to do anything.
*/
async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
}
if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
kick_tcg_thread, NULL);
+ }
+ if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
}
}
static void stop_tcg_kick_timer(void)
{
assert(!mttcg_enabled);
- if (tcg_kick_vcpu_timer) {
+ if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
timer_del(tcg_kick_vcpu_timer);
- tcg_kick_vcpu_timer = NULL;
}
}
}
}
-static int do_vm_stop(RunState state)
+static int do_vm_stop(RunState state, bool send_stop)
{
int ret = 0;
pause_all_vcpus();
runstate_set(state);
vm_state_notify(0, state);
- qapi_event_send_stop(&error_abort);
+ if (send_stop) {
+ qapi_event_send_stop();
+ }
}
bdrv_drain_all();
return ret;
}
+/* Special vm_stop() variant for terminating the process. Historically clients
+ * did not expect a QMP STOP event and so we need to retain compatibility.
+ */
+int vm_shutdown(void)
+{
+ return do_vm_stop(RUN_STATE_SHUTDOWN, false);
+}
+
static bool cpu_can_run(CPUState *cpu)
{
if (cpu->stop) {
insns_left = MIN(0xffff, cpu->icount_budget);
cpu->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
+
+ replay_mutex_lock();
}
}
cpu->icount_budget = 0;
replay_account_executed_instructions();
+
+ replay_mutex_unlock();
}
}
int64_t ti;
#endif
+ assert(tcg_enabled());
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
{
CPUState *cpu = arg;
+ assert(tcg_enabled());
rcu_register_thread();
tcg_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
- CPU_FOREACH(cpu) {
- cpu->thread_id = qemu_get_thread_id();
- cpu->created = true;
- cpu->can_do_io = 1;
- }
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->can_do_io = 1;
qemu_cond_signal(&qemu_cpu_cond);
/* wait for initial kick-off after machine start */
cpu->exit_request = 1;
while (1) {
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
*/
handle_icount_deadline();
+ replay_mutex_unlock();
+
if (!cpu) {
cpu = first_cpu;
}
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
prepare_icount_for_run(cpu);
r = tcg_cpu_exec(cpu);
process_icount_data(cpu);
+ qemu_mutex_lock_iothread();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
atomic_mb_set(&cpu->exit_request, 0);
}
- qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
+ qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu);
deal_with_unplugged_cpus();
}
{
CPUState *cpu = arg;
+ assert(tcg_enabled());
g_assert(!use_icount);
rcu_register_thread();
/* process any pending work */
cpu->exit_request = 1;
- while (1) {
+ do {
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
r = tcg_cpu_exec(cpu);
+ qemu_mutex_lock_iothread();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
return iothread_locked;
}
-void qemu_mutex_lock_iothread(void)
+/*
+ * The BQL is taken from so many places that it is worth profiling the
+ * callers directly, instead of funneling them all through a single function.
+ */
+void qemu_mutex_lock_iothread_impl(const char *file, int line)
{
+ QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
+
g_assert(!qemu_mutex_iothread_locked());
- qemu_mutex_lock(&qemu_global_mutex);
+ bql_lock(&qemu_global_mutex, file, line);
iothread_locked = true;
}
}
}
+ /* We need to drop the replay_lock so any vCPU threads woken up
+ * can finish their replay tasks
+ */
+ replay_mutex_unlock();
+
while (!all_vcpus_paused()) {
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
CPU_FOREACH(cpu) {
qemu_cpu_kick(cpu);
}
}
+
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
}
void cpu_resume(CPUState *cpu)
static QemuThread *single_tcg_cpu_thread;
static int tcg_region_inited;
+ assert(tcg_enabled());
/*
* Initialize TCG regions--once. Now is a good time, because:
* (1) TCG's init context, prologue and target globals have been set up.
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
} else {
/* For non-MTTCG cases we share the thread */
cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond;
+ cpu->thread_id = first_cpu->thread_id;
+ cpu->can_do_io = 1;
+ cpu->created = true;
}
}
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
static void qemu_kvm_start_vcpu(CPUState *cpu)
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
static void qemu_hvf_start_vcpu(CPUState *cpu)
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
static void qemu_whpx_start_vcpu(CPUState *cpu)
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
static void qemu_dummy_start_vcpu(CPUState *cpu)
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
QEMU_THREAD_JOINABLE);
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
void qemu_init_vcpu(CPUState *cpu)
} else {
qemu_dummy_start_vcpu(cpu);
}
+
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
}
void cpu_stop_current(void)
return 0;
}
- return do_vm_stop(state);
+ return do_vm_stop(state, true);
}
/**
int vm_prepare_start(void)
{
RunState requested;
- int res = 0;
qemu_vmstop_requested(&requested);
if (runstate_is_running() && requested == RUN_STATE__MAX) {
* the STOP event.
*/
if (runstate_is_running()) {
- qapi_event_send_stop(&error_abort);
- res = -1;
- } else {
- replay_enable_events();
- cpu_enable_ticks();
- runstate_set(RUN_STATE_RUNNING);
- vm_state_notify(1, RUN_STATE_RUNNING);
+ qapi_event_send_stop();
+ qapi_event_send_resume();
+ return -1;
}
/* We are sending this now, but the CPUs will be resumed shortly later */
- qapi_event_send_resume(&error_abort);
- return res;
+ qapi_event_send_resume();
+
+ replay_enable_events();
+ cpu_enable_ticks();
+ runstate_set(RUN_STATE_RUNNING);
+ vm_state_notify(1, RUN_STATE_RUNNING);
+ return 0;
}
void vm_start(void)
#elif defined(TARGET_SPARC)
SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
CPUSPARCState *env = &sparc_cpu->env;
+#elif defined(TARGET_RISCV)
+ RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
+ CPURISCVState *env = &riscv_cpu->env;
#elif defined(TARGET_MIPS)
MIPSCPU *mips_cpu = MIPS_CPU(cpu);
CPUMIPSState *env = &mips_cpu->env;
#elif defined(TARGET_S390X)
info->value->arch = CPU_INFO_ARCH_S390;
info->value->u.s390.cpu_state = env->cpu_state;
+#elif defined(TARGET_RISCV)
+ info->value->arch = CPU_INFO_ARCH_RISCV;
+ info->value->u.riscv.pc = env->pc;
#else
info->value->arch = CPU_INFO_ARCH_OTHER;
#endif
return head;
}
+static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
+{
+ /*
+ * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
+ * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
+ */
+ switch (target) {
+ case SYS_EMU_TARGET_I386:
+ case SYS_EMU_TARGET_X86_64:
+ return CPU_INFO_ARCH_X86;
+
+ case SYS_EMU_TARGET_PPC:
+ case SYS_EMU_TARGET_PPC64:
+ return CPU_INFO_ARCH_PPC;
+
+ case SYS_EMU_TARGET_SPARC:
+ case SYS_EMU_TARGET_SPARC64:
+ return CPU_INFO_ARCH_SPARC;
+
+ case SYS_EMU_TARGET_MIPS:
+ case SYS_EMU_TARGET_MIPSEL:
+ case SYS_EMU_TARGET_MIPS64:
+ case SYS_EMU_TARGET_MIPS64EL:
+ return CPU_INFO_ARCH_MIPS;
+
+ case SYS_EMU_TARGET_TRICORE:
+ return CPU_INFO_ARCH_TRICORE;
+
+ case SYS_EMU_TARGET_S390X:
+ return CPU_INFO_ARCH_S390;
+
+ case SYS_EMU_TARGET_RISCV32:
+ case SYS_EMU_TARGET_RISCV64:
+ return CPU_INFO_ARCH_RISCV;
+
+ default:
+ return CPU_INFO_ARCH_OTHER;
+ }
+}
+
+static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
+{
+#ifdef TARGET_S390X
+ S390CPU *s390_cpu = S390_CPU(cpu);
+ CPUS390XState *env = &s390_cpu->env;
+
+ info->cpu_state = env->cpu_state;
+#else
+ abort();
+#endif
+}
+
/*
* fast means: we NEVER interrupt vCPU threads to retrieve
* information from KVM.
MachineState *ms = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(ms);
CpuInfoFastList *head = NULL, *cur_item = NULL;
+ SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
+ -1, &error_abort);
CPUState *cpu;
-#if defined(TARGET_S390X)
- S390CPU *s390_cpu;
- CPUS390XState *env;
-#endif
CPU_FOREACH(cpu) {
CpuInfoFastList *info = g_malloc0(sizeof(*info));
info->value->props = props;
}
-#if defined(TARGET_S390X)
- s390_cpu = S390_CPU(cpu);
- env = &s390_cpu->env;
- info->value->arch = CPU_INFO_ARCH_S390;
- info->value->u.s390.cpu_state = env->cpu_state;
-#endif
+ info->value->arch = sysemu_target_to_cpuinfo_arch(target);
+ info->value->target = target;
+ if (target == SYS_EMU_TARGET_S390X) {
+ cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
+ }
+
if (!cur_item) {
head = cur_item = info;
} else {