* THE SOFTWARE.
*/
-/* Needed early for CONFIG_BSD etc. */
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/config-file.h"
#include "cpu.h"
#include "monitor/monitor.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc.h"
+#include "qapi/qapi-events-run-state.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
#include "sysemu/hax.h"
-#include "qmp-commands.h"
+#include "sysemu/hvf.h"
+#include "sysemu/whpx.h"
#include "exec/exec-all.h"
#include "qemu/thread.h"
#include "sysemu/cpus.h"
#include "sysemu/qtest.h"
#include "qemu/main-loop.h"
+#include "qemu/option.h"
#include "qemu/bitmap.h"
#include "qemu/seqlock.h"
#include "tcg.h"
-#include "qapi-event.h"
#include "hw/nmi.h"
#include "sysemu/replay.h"
#include "hw/boards.h"
/* Protected by TimersState seqlock */
static bool icount_sleep = true;
-static int64_t vm_clock_warp_start = -1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
-static QEMUTimer *icount_rt_timer;
-static QEMUTimer *icount_vm_timer;
-static QEMUTimer *icount_warp_timer;
-
typedef struct TimersState {
/* Protected by BQL. */
int64_t cpu_ticks_prev;
int64_t qemu_icount_bias;
/* Only written by TCG thread */
int64_t qemu_icount;
+ /* for adjusting icount */
+ int64_t vm_clock_warp_start;
+ QEMUTimer *icount_rt_timer;
+ QEMUTimer *icount_vm_timer;
+ QEMUTimer *icount_warp_timer;
} TimersState;
static TimersState timers_state;
if (cpu && cpu->running) {
if (!cpu->can_do_io) {
- fprintf(stderr, "Bad icount read\n");
+ error_report("Bad icount read");
exit(1);
}
/* Take into account what has run */
static void icount_adjust_rt(void *opaque)
{
- timer_mod(icount_rt_timer,
+ timer_mod(timers_state.icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
icount_adjust();
}
static void icount_adjust_vm(void *opaque)
{
- timer_mod(icount_vm_timer,
+ timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
*/
do {
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
- warp_start = vm_clock_warp_start;
+ warp_start = timers_state.vm_clock_warp_start;
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
if (warp_start == -1) {
cpu_get_clock_locked());
int64_t warp_delta;
- warp_delta = clock - vm_clock_warp_start;
+ warp_delta = clock - timers_state.vm_clock_warp_start;
if (use_icount == 2) {
/*
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
}
timers_state.qemu_icount_bias += warp_delta;
}
- vm_clock_warp_start = -1;
+ timers_state.vm_clock_warp_start = -1;
seqlock_write_end(&timers_state.vm_clock_seqlock);
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
* every 100ms.
*/
seqlock_write_begin(&timers_state.vm_clock_seqlock);
- if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
- vm_clock_warp_start = clock;
+ if (timers_state.vm_clock_warp_start == -1
+ || timers_state.vm_clock_warp_start > clock) {
+ timers_state.vm_clock_warp_start = clock;
}
seqlock_write_end(&timers_state.vm_clock_seqlock);
- timer_mod_anticipate(icount_warp_timer, clock + deadline);
+ timer_mod_anticipate(timers_state.icount_warp_timer,
+ clock + deadline);
}
} else if (deadline == 0) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
return;
}
- timer_del(icount_warp_timer);
+ timer_del(timers_state.icount_warp_timer);
icount_warp_rt();
}
return use_icount;
}
+static bool warp_timer_state_needed(void *opaque)
+{
+ TimersState *s = opaque;
+ return s->icount_warp_timer != NULL;
+}
+
+static bool adjust_timers_state_needed(void *opaque)
+{
+ TimersState *s = opaque;
+ return s->icount_rt_timer != NULL;
+}
+
+/*
+ * Subsection for warp timer migration is optional, because may not be created
+ */
+static const VMStateDescription icount_vmstate_warp_timer = {
+ .name = "timer/icount/warp_timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = warp_timer_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(vm_clock_warp_start, TimersState),
+ VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription icount_vmstate_adjust_timers = {
+ .name = "timer/icount/timers",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = adjust_timers_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
+ VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
/*
* This is a subsection for icount migration.
*/
VMSTATE_INT64(qemu_icount_bias, TimersState),
VMSTATE_INT64(qemu_icount, TimersState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &icount_vmstate_warp_timer,
+ &icount_vmstate_adjust_timers,
+ NULL
}
};
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
- icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
+ timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
icount_timer_cb, NULL);
}
the virtual time trigger catches emulated time passing too fast.
Realtime triggers occur even when idle, so use them less frequently
than VM triggers. */
- icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+ timers_state.vm_clock_warp_start = -1;
+ timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
icount_adjust_rt, NULL);
- timer_mod(icount_rt_timer,
+ timer_mod(timers_state.icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
- icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
icount_adjust_vm, NULL);
- timer_mod(icount_vm_timer,
+ timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
}
return;
}
- if (!qemu_in_vcpu_thread() && first_cpu) {
+ if (qemu_in_vcpu_thread()) {
+ /* A CPU is currently running; kick it back out to the
+ * tcg_cpu_exec() loop so it will recalculate its
+ * icount deadline immediately.
+ */
+ qemu_cpu_kick(current_cpu);
+ } else if (first_cpu) {
/* qemu_cpu_kick is not enough to kick a halted CPU out of
* qemu_tcg_wait_io_event. async_run_on_cpu, instead,
* causes cpu_thread_is_idle to return false. This way,
* handle_icount_deadline can run.
+ * If we have no CPUs at all for some reason, we don't
+ * need to do anything.
*/
async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
}
static void start_tcg_kick_timer(void)
{
- if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
+ assert(!mttcg_enabled);
+ if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
kick_tcg_thread, NULL);
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
static void stop_tcg_kick_timer(void)
{
+ assert(!mttcg_enabled);
if (tcg_kick_vcpu_timer) {
timer_del(tcg_kick_vcpu_timer);
tcg_kick_vcpu_timer = NULL;
CPU_FOREACH(cpu) {
cpu_synchronize_state(cpu);
+ /* TODO: move to cpu_synchronize_state() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_state(cpu);
+ }
}
}
CPU_FOREACH(cpu) {
cpu_synchronize_post_reset(cpu);
+ /* TODO: move to cpu_synchronize_post_reset() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_post_reset(cpu);
+ }
}
}
CPU_FOREACH(cpu) {
cpu_synchronize_post_init(cpu);
+ /* TODO: move to cpu_synchronize_post_init() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_post_init(cpu);
+ }
}
}
}
}
-static int do_vm_stop(RunState state)
+static int do_vm_stop(RunState state, bool send_stop)
{
int ret = 0;
pause_all_vcpus();
runstate_set(state);
vm_state_notify(0, state);
- qapi_event_send_stop(&error_abort);
+ if (send_stop) {
+ qapi_event_send_stop(&error_abort);
+ }
}
bdrv_drain_all();
return ret;
}
+/* Special vm_stop() variant for terminating the process. Historically clients
+ * did not expect a QMP STOP event and so we need to retain compatibility.
+ */
+int vm_shutdown(void)
+{
+ return do_vm_stop(RUN_STATE_SHUTDOWN, false);
+}
+
static bool cpu_can_run(CPUState *cpu)
{
if (cpu->stop) {
{
}
-static void qemu_wait_io_event_common(CPUState *cpu)
+static void qemu_cpu_stop(CPUState *cpu, bool exit)
{
- atomic_mb_set(&cpu->thread_kicked, false);
- if (cpu->stop) {
- cpu->stop = false;
- cpu->stopped = true;
- qemu_cond_broadcast(&qemu_pause_cond);
+ g_assert(qemu_cpu_is_self(cpu));
+ cpu->stop = false;
+ cpu->stopped = true;
+ if (exit) {
+ cpu_exit(cpu);
}
- process_queued_cpu_work(cpu);
+ qemu_cond_broadcast(&qemu_pause_cond);
}
-static bool qemu_tcg_should_sleep(CPUState *cpu)
+static void qemu_wait_io_event_common(CPUState *cpu)
{
- if (mttcg_enabled) {
- return cpu_thread_is_idle(cpu);
- } else {
- return all_cpu_threads_idle();
+ atomic_mb_set(&cpu->thread_kicked, false);
+ if (cpu->stop) {
+ qemu_cpu_stop(cpu, false);
}
+ process_queued_cpu_work(cpu);
}
-static void qemu_tcg_wait_io_event(CPUState *cpu)
+static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
{
- while (qemu_tcg_should_sleep(cpu)) {
+ while (all_cpu_threads_idle()) {
stop_tcg_kick_timer();
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
qemu_wait_io_event_common(cpu);
}
-static void qemu_kvm_wait_io_event(CPUState *cpu)
+static void qemu_wait_io_event(CPUState *cpu)
{
while (cpu_thread_is_idle(cpu)) {
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
+#ifdef _WIN32
+ /* Eat dummy APC queued by qemu_cpu_kick_thread. */
+ if (!tcg_enabled()) {
+ SleepEx(0, TRUE);
+ }
+#endif
qemu_wait_io_event_common(cpu);
}
r = kvm_init_vcpu(cpu);
if (r < 0) {
- fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
+ error_report("kvm_init_vcpu failed: %s", strerror(-r));
exit(1);
}
cpu_handle_guest_debug(cpu);
}
}
- qemu_kvm_wait_io_event(cpu);
+ qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
qemu_kvm_destroy_vcpu(cpu);
cpu->created = false;
qemu_cond_signal(&qemu_cpu_cond);
qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
return NULL;
}
static void *qemu_dummy_cpu_thread_fn(void *arg)
{
#ifdef _WIN32
- fprintf(stderr, "qtest is not supported under Windows\n");
+ error_report("qtest is not supported under Windows");
exit(1);
#else
CPUState *cpu = arg;
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
- while (1) {
+ do {
qemu_mutex_unlock_iothread();
do {
int sig;
exit(1);
}
qemu_mutex_lock_iothread();
- qemu_wait_io_event_common(cpu);
- }
+ qemu_wait_io_event(cpu);
+ } while (!cpu->unplug);
+ rcu_unregister_thread();
return NULL;
#endif
}
insns_left = MIN(0xffff, cpu->icount_budget);
cpu->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
+
+ replay_mutex_lock();
}
}
cpu->icount_budget = 0;
replay_account_executed_instructions();
+
+ replay_mutex_unlock();
}
}
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
- CPU_FOREACH(cpu) {
- cpu->thread_id = qemu_get_thread_id();
- cpu->created = true;
- cpu->can_do_io = 1;
- }
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->can_do_io = 1;
qemu_cond_signal(&qemu_cpu_cond);
/* wait for initial kick-off after machine start */
cpu->exit_request = 1;
while (1) {
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
*/
handle_icount_deadline();
+ replay_mutex_unlock();
+
if (!cpu) {
cpu = first_cpu;
}
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
prepare_icount_for_run(cpu);
r = tcg_cpu_exec(cpu);
process_icount_data(cpu);
+ qemu_mutex_lock_iothread();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
atomic_mb_set(&cpu->exit_request, 0);
}
- qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
+ qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
deal_with_unplugged_cpus();
}
+ rcu_unregister_thread();
return NULL;
}
CPUState *cpu = arg;
int r;
+ rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
hax_init_vcpu(cpu);
qemu_cond_signal(&qemu_cpu_cond);
- while (1) {
+ do {
if (cpu_can_run(cpu)) {
r = hax_smp_cpu_exec(cpu);
if (r == EXCP_DEBUG) {
}
}
+ qemu_wait_io_event(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/* The HVF-specific vCPU thread function. This one should only run when the host
+ * CPU supports the VMX "unrestricted guest" feature. */
+static void *qemu_hvf_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+
+ int r;
+
+ assert(hvf_enabled());
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->can_do_io = 1;
+ current_cpu = cpu;
+
+ hvf_init_vcpu(cpu);
+
+ /* signal CPU creation */
+ cpu->created = true;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = hvf_vcpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+ qemu_wait_io_event(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+
+ hvf_vcpu_destroy(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *qemu_whpx_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+ int r;
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+ cpu->thread_id = qemu_get_thread_id();
+ current_cpu = cpu;
+
+ r = whpx_init_vcpu(cpu);
+ if (r < 0) {
+ fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
+ exit(1);
+ }
+
+ /* signal CPU creation */
+ cpu->created = true;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = whpx_vcpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
while (cpu_thread_is_idle(cpu)) {
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
-#ifdef _WIN32
- SleepEx(0, TRUE);
-#endif
qemu_wait_io_event_common(cpu);
- }
+ } while (!cpu->unplug || cpu_can_run(cpu));
+
+ whpx_destroy_vcpu(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
return NULL;
}
/* process any pending work */
cpu->exit_request = 1;
- while (1) {
+ do {
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
r = tcg_cpu_exec(cpu);
+ qemu_mutex_lock_iothread();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
/* Ignore everything else? */
break;
}
- } else if (cpu->unplug) {
- qemu_tcg_destroy_vcpu(cpu);
- cpu->created = false;
- qemu_cond_signal(&qemu_cpu_cond);
- qemu_mutex_unlock_iothread();
- return NULL;
}
atomic_mb_set(&cpu->exit_request, 0);
- qemu_tcg_wait_io_event(cpu);
- }
+ qemu_wait_io_event(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+ qemu_tcg_destroy_vcpu(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
return NULL;
}
}
#else /* _WIN32 */
if (!qemu_cpu_is_self(cpu)) {
- if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
+ if (whpx_enabled()) {
+ whpx_vcpu_kick(cpu);
+ } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
__func__, GetLastError());
exit(1);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
CPU_FOREACH(cpu) {
- cpu->stop = true;
- qemu_cpu_kick(cpu);
+ if (qemu_cpu_is_self(cpu)) {
+ qemu_cpu_stop(cpu, true);
+ } else {
+ cpu->stop = true;
+ qemu_cpu_kick(cpu);
+ }
}
- if (qemu_in_vcpu_thread()) {
- cpu_stop_current();
- }
+ /* We need to drop the replay_lock so any vCPU threads woken up
+ * can finish their replay tasks
+ */
+ replay_mutex_unlock();
while (!all_vcpus_paused()) {
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
qemu_cpu_kick(cpu);
}
}
+
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
}
void cpu_resume(CPUState *cpu)
}
}
-void cpu_remove(CPUState *cpu)
+void cpu_remove_sync(CPUState *cpu)
{
cpu->stop = true;
cpu->unplug = true;
qemu_cpu_kick(cpu);
-}
-
-void cpu_remove_sync(CPUState *cpu)
-{
- cpu_remove(cpu);
- while (cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
+ qemu_mutex_unlock_iothread();
+ qemu_thread_join(cpu->thread);
+ qemu_mutex_lock_iothread();
}
/* For temporary buffers for forming a name */
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
} else {
/* For non-MTTCG cases we share the thread */
cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond;
+ cpu->thread_id = first_cpu->thread_id;
+ cpu->can_do_io = 1;
+ cpu->created = true;
}
}
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
static void qemu_kvm_start_vcpu(CPUState *cpu)
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
+}
+
+static void qemu_hvf_start_vcpu(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ /* HVF currently does not support TCG, and only runs in
+ * unrestricted-guest mode. */
+ assert(hvf_enabled());
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+}
+
+static void qemu_whpx_start_vcpu(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+#ifdef _WIN32
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
+#endif
}
static void qemu_dummy_start_vcpu(CPUState *cpu)
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
QEMU_THREAD_JOINABLE);
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
}
void qemu_init_vcpu(CPUState *cpu)
/* If the target cpu hasn't set up any address spaces itself,
* give it the default one.
*/
- AddressSpace *as = g_new0(AddressSpace, 1);
-
- address_space_init(as, cpu->memory, "cpu-memory");
cpu->num_ases = 1;
- cpu_address_space_init(cpu, as, 0);
+ cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
}
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (hax_enabled()) {
qemu_hax_start_vcpu(cpu);
+ } else if (hvf_enabled()) {
+ qemu_hvf_start_vcpu(cpu);
} else if (tcg_enabled()) {
qemu_tcg_init_vcpu(cpu);
+ } else if (whpx_enabled()) {
+ qemu_whpx_start_vcpu(cpu);
} else {
qemu_dummy_start_vcpu(cpu);
}
+
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
}
void cpu_stop_current(void)
{
if (current_cpu) {
- current_cpu->stop = false;
- current_cpu->stopped = true;
- cpu_exit(current_cpu);
- qemu_cond_broadcast(&qemu_pause_cond);
+ qemu_cpu_stop(current_cpu, true);
}
}
return 0;
}
- return do_vm_stop(state);
+ return do_vm_stop(state, true);
}
/**
int vm_prepare_start(void)
{
RunState requested;
- int res = 0;
qemu_vmstop_requested(&requested);
if (runstate_is_running() && requested == RUN_STATE__MAX) {
*/
if (runstate_is_running()) {
qapi_event_send_stop(&error_abort);
- res = -1;
- } else {
- replay_enable_events();
- cpu_enable_ticks();
- runstate_set(RUN_STATE_RUNNING);
- vm_state_notify(1, RUN_STATE_RUNNING);
+ qapi_event_send_resume(&error_abort);
+ return -1;
}
/* We are sending this now, but the CPUs will be resumed shortly later */
qapi_event_send_resume(&error_abort);
- return res;
+
+ replay_enable_events();
+ cpu_enable_ticks();
+ runstate_set(RUN_STATE_RUNNING);
+ vm_state_notify(1, RUN_STATE_RUNNING);
+ return 0;
}
void vm_start(void)
#elif defined(TARGET_SPARC)
SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
CPUSPARCState *env = &sparc_cpu->env;
+#elif defined(TARGET_RISCV)
+ RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
+ CPURISCVState *env = &riscv_cpu->env;
#elif defined(TARGET_MIPS)
MIPSCPU *mips_cpu = MIPS_CPU(cpu);
CPUMIPSState *env = &mips_cpu->env;
#elif defined(TARGET_TRICORE)
TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
CPUTriCoreState *env = &tricore_cpu->env;
+#elif defined(TARGET_S390X)
+ S390CPU *s390_cpu = S390_CPU(cpu);
+ CPUS390XState *env = &s390_cpu->env;
#endif
cpu_synchronize_state(cpu);
#elif defined(TARGET_TRICORE)
info->value->arch = CPU_INFO_ARCH_TRICORE;
info->value->u.tricore.PC = env->PC;
+#elif defined(TARGET_S390X)
+ info->value->arch = CPU_INFO_ARCH_S390;
+ info->value->u.s390.cpu_state = env->cpu_state;
+#elif defined(TARGET_RISCV)
+ info->value->arch = CPU_INFO_ARCH_RISCV;
+ info->value->u.riscv.pc = env->pc;
#else
info->value->arch = CPU_INFO_ARCH_OTHER;
#endif
return head;
}
+static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
+{
+ /*
+ * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
+ * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
+ */
+ switch (target) {
+ case SYS_EMU_TARGET_I386:
+ case SYS_EMU_TARGET_X86_64:
+ return CPU_INFO_ARCH_X86;
+
+ case SYS_EMU_TARGET_PPC:
+ case SYS_EMU_TARGET_PPCEMB:
+ case SYS_EMU_TARGET_PPC64:
+ return CPU_INFO_ARCH_PPC;
+
+ case SYS_EMU_TARGET_SPARC:
+ case SYS_EMU_TARGET_SPARC64:
+ return CPU_INFO_ARCH_SPARC;
+
+ case SYS_EMU_TARGET_MIPS:
+ case SYS_EMU_TARGET_MIPSEL:
+ case SYS_EMU_TARGET_MIPS64:
+ case SYS_EMU_TARGET_MIPS64EL:
+ return CPU_INFO_ARCH_MIPS;
+
+ case SYS_EMU_TARGET_TRICORE:
+ return CPU_INFO_ARCH_TRICORE;
+
+ case SYS_EMU_TARGET_S390X:
+ return CPU_INFO_ARCH_S390;
+
+ case SYS_EMU_TARGET_RISCV32:
+ case SYS_EMU_TARGET_RISCV64:
+ return CPU_INFO_ARCH_RISCV;
+
+ default:
+ return CPU_INFO_ARCH_OTHER;
+ }
+}
+
+static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
+{
+#ifdef TARGET_S390X
+ S390CPU *s390_cpu = S390_CPU(cpu);
+ CPUS390XState *env = &s390_cpu->env;
+
+ info->cpu_state = env->cpu_state;
+#else
+ abort();
+#endif
+}
+
+/*
+ * fast means: we NEVER interrupt vCPU threads to retrieve
+ * information from KVM.
+ */
+CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ CpuInfoFastList *head = NULL, *cur_item = NULL;
+ SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
+ -1, &error_abort);
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ CpuInfoFastList *info = g_malloc0(sizeof(*info));
+ info->value = g_malloc0(sizeof(*info->value));
+
+ info->value->cpu_index = cpu->cpu_index;
+ info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
+ info->value->thread_id = cpu->thread_id;
+
+ info->value->has_props = !!mc->cpu_index_to_instance_props;
+ if (info->value->has_props) {
+ CpuInstanceProperties *props;
+ props = g_malloc0(sizeof(*props));
+ *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
+ info->value->props = props;
+ }
+
+ info->value->arch = sysemu_target_to_cpuinfo_arch(target);
+ info->value->target = target;
+ if (target == SYS_EMU_TARGET_S390X) {
+ cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
+ } else {
+ /* do nothing for @CpuInfoOther */
+ }
+
+ if (!cur_item) {
+ head = cur_item = info;
+ } else {
+ cur_item->next = info;
+ cur_item = info;
+ }
+ }
+
+ return head;
+}
+
void qmp_memsave(int64_t addr, int64_t size, const char *filename,
bool has_cpu, int64_t cpu_index, Error **errp)
{