return;
}
- if (!qemu_in_vcpu_thread() && first_cpu) {
+ if (qemu_in_vcpu_thread()) {
+ /* A CPU is currently running; kick it back out to the
+ * tcg_cpu_exec() loop so it will recalculate its
+ * icount deadline immediately.
+ */
+ qemu_cpu_kick(current_cpu);
+ } else if (first_cpu) {
/* qemu_cpu_kick is not enough to kick a halted CPU out of
* qemu_tcg_wait_io_event. async_run_on_cpu, instead,
* causes cpu_thread_is_idle to return false. This way,
* handle_icount_deadline can run.
+ * If we have no CPUs at all for some reason, we don't
+ * need to do anything.
*/
async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
}
}
}
-static int do_vm_stop(RunState state)
+static int do_vm_stop(RunState state, bool send_stop)
{
int ret = 0;
pause_all_vcpus();
runstate_set(state);
vm_state_notify(0, state);
- qapi_event_send_stop(&error_abort);
+ if (send_stop) {
+ qapi_event_send_stop(&error_abort);
+ }
}
bdrv_drain_all();
return ret;
}
+/* Special vm_stop() variant for terminating the process. Historically clients
+ * did not expect a QMP STOP event and so we need to retain compatibility.
+ */
+int vm_shutdown(void)
+{
+ return do_vm_stop(RUN_STATE_SHUTDOWN, false);
+}
+
static bool cpu_can_run(CPUState *cpu)
{
if (cpu->stop) {
insns_left = MIN(0xffff, cpu->icount_budget);
cpu->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
+
+ replay_mutex_lock();
}
}
cpu->icount_budget = 0;
replay_account_executed_instructions();
+
+ replay_mutex_unlock();
}
}
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
cpu->exit_request = 1;
while (1) {
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
*/
handle_icount_deadline();
+ replay_mutex_unlock();
+
if (!cpu) {
cpu = first_cpu;
}
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
prepare_icount_for_run(cpu);
r = tcg_cpu_exec(cpu);
process_icount_data(cpu);
+ qemu_mutex_lock_iothread();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
/* process any pending work */
cpu->exit_request = 1;
- while (1) {
+ do {
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
r = tcg_cpu_exec(cpu);
+ qemu_mutex_lock_iothread();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
}
}
+ /* We need to drop the replay_lock so any vCPU threads woken up
+ * can finish their replay tasks
+ */
+ replay_mutex_unlock();
+
while (!all_vcpus_paused()) {
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
CPU_FOREACH(cpu) {
qemu_cpu_kick(cpu);
}
}
+
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
}
void cpu_resume(CPUState *cpu)
return 0;
}
- return do_vm_stop(state);
+ return do_vm_stop(state, true);
}
/**
int vm_prepare_start(void)
{
RunState requested;
- int res = 0;
qemu_vmstop_requested(&requested);
if (runstate_is_running() && requested == RUN_STATE__MAX) {
*/
if (runstate_is_running()) {
qapi_event_send_stop(&error_abort);
- res = -1;
- } else {
- replay_enable_events();
- cpu_enable_ticks();
- runstate_set(RUN_STATE_RUNNING);
- vm_state_notify(1, RUN_STATE_RUNNING);
+ qapi_event_send_resume(&error_abort);
+ return -1;
}
/* We are sending this now, but the CPUs will be resumed shortly later */
qapi_event_send_resume(&error_abort);
- return res;
+
+ replay_enable_events();
+ cpu_enable_ticks();
+ runstate_set(RUN_STATE_RUNNING);
+ vm_state_notify(1, RUN_STATE_RUNNING);
+ return 0;
}
void vm_start(void)
return head;
}
+static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
+{
+ /*
+ * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
+ * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
+ */
+ switch (target) {
+ case SYS_EMU_TARGET_I386:
+ case SYS_EMU_TARGET_X86_64:
+ return CPU_INFO_ARCH_X86;
+
+ case SYS_EMU_TARGET_PPC:
+ case SYS_EMU_TARGET_PPCEMB:
+ case SYS_EMU_TARGET_PPC64:
+ return CPU_INFO_ARCH_PPC;
+
+ case SYS_EMU_TARGET_SPARC:
+ case SYS_EMU_TARGET_SPARC64:
+ return CPU_INFO_ARCH_SPARC;
+
+ case SYS_EMU_TARGET_MIPS:
+ case SYS_EMU_TARGET_MIPSEL:
+ case SYS_EMU_TARGET_MIPS64:
+ case SYS_EMU_TARGET_MIPS64EL:
+ return CPU_INFO_ARCH_MIPS;
+
+ case SYS_EMU_TARGET_TRICORE:
+ return CPU_INFO_ARCH_TRICORE;
+
+ case SYS_EMU_TARGET_S390X:
+ return CPU_INFO_ARCH_S390;
+
+ case SYS_EMU_TARGET_RISCV32:
+ case SYS_EMU_TARGET_RISCV64:
+ return CPU_INFO_ARCH_RISCV;
+
+ default:
+ return CPU_INFO_ARCH_OTHER;
+ }
+}
+
+static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
+{
+#ifdef TARGET_S390X
+ S390CPU *s390_cpu = S390_CPU(cpu);
+ CPUS390XState *env = &s390_cpu->env;
+
+ info->cpu_state = env->cpu_state;
+#else
+ abort();
+#endif
+}
+
/*
* fast means: we NEVER interrupt vCPU threads to retrieve
* information from KVM.
MachineState *ms = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(ms);
CpuInfoFastList *head = NULL, *cur_item = NULL;
+ SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
+ -1, &error_abort);
CPUState *cpu;
-#if defined(TARGET_S390X)
- S390CPU *s390_cpu;
- CPUS390XState *env;
-#endif
CPU_FOREACH(cpu) {
CpuInfoFastList *info = g_malloc0(sizeof(*info));
info->value->props = props;
}
-#if defined(TARGET_S390X)
- s390_cpu = S390_CPU(cpu);
- env = &s390_cpu->env;
- info->value->arch = CPU_INFO_ARCH_S390;
- info->value->u.s390.cpu_state = env->cpu_state;
-#endif
+ info->value->arch = sysemu_target_to_cpuinfo_arch(target);
+ info->value->target = target;
+ if (target == SYS_EMU_TARGET_S390X) {
+ cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
+ } else {
+ /* do nothing for @CpuInfoOther */
+ }
+
if (!cur_item) {
head = cur_item = info;
} else {