#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
#include "sysemu/hax.h"
+#include "sysemu/hvf.h"
#include "qmp-commands.h"
#include "exec/exec-all.h"
#include "qapi-event.h"
#include "hw/nmi.h"
#include "sysemu/replay.h"
+#include "hw/boards.h"
#ifdef CONFIG_LINUX
return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
}
+/*
+ * Update the global shared timer_state.qemu_icount to take into
+ * account executed instructions. This is done by the TCG vCPU
+ * thread so the main-loop can see time has moved forward.
+ */
+void cpu_update_icount(CPUState *cpu)
+{
+ int64_t executed = cpu_get_icount_executed(cpu);
+ cpu->icount_budget -= executed;
+
+#ifdef CONFIG_ATOMIC64
+ atomic_set__nocheck(&timers_state.qemu_icount,
+ atomic_read__nocheck(&timers_state.qemu_icount) +
+ executed);
+#else /* FIXME: we need 64bit atomics to do this safely */
+ timers_state.qemu_icount += executed;
+#endif
+}
+
int64_t cpu_get_icount_raw(void)
{
- int64_t icount;
CPUState *cpu = current_cpu;
- icount = timers_state.qemu_icount;
if (cpu && cpu->running) {
if (!cpu->can_do_io) {
fprintf(stderr, "Bad icount read\n");
exit(1);
}
/* Take into account what has run */
- icount += cpu_get_icount_executed(cpu);
+ cpu_update_icount(cpu);
}
- return icount;
+#ifdef CONFIG_ATOMIC64
+ return atomic_read__nocheck(&timers_state.qemu_icount);
+#else /* FIXME: we need 64bit atomics to do this safely */
+ return timers_state.qemu_icount;
+#endif
}
/* Return the virtual CPU time, based on the instruction counter. */
if (deadline < 0) {
static bool notified;
if (!icount_sleep && !notified) {
- error_report("WARNING: icount sleep disabled and no active timers");
+ warn_report("icount sleep disabled and no active timers");
notified = true;
}
return;
sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
qemu_mutex_unlock_iothread();
- atomic_set(&cpu->throttle_thread_scheduled, 0);
g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
qemu_mutex_lock_iothread();
+ atomic_set(&cpu->throttle_thread_scheduled, 0);
}
static void cpu_throttle_timer_tick(void *opaque)
CPU_FOREACH(cpu) {
cpu_synchronize_state(cpu);
+ /* TODO: move to cpu_synchronize_state() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_state(cpu);
+ }
}
}
CPU_FOREACH(cpu) {
cpu_synchronize_post_reset(cpu);
+ /* TODO: move to cpu_synchronize_post_reset() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_post_reset(cpu);
+ }
}
}
CPU_FOREACH(cpu) {
cpu_synchronize_post_init(cpu);
+ /* TODO: move to cpu_synchronize_post_init() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_post_init(cpu);
+ }
+ }
+}
+
+void cpu_synchronize_all_pre_loadvm(void)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ cpu_synchronize_pre_loadvm(cpu);
}
}
{
}
+static void qemu_cpu_stop(CPUState *cpu, bool exit)
+{
+ g_assert(qemu_cpu_is_self(cpu));
+ cpu->stop = false;
+ cpu->stopped = true;
+ if (exit) {
+ cpu_exit(cpu);
+ }
+ qemu_cond_broadcast(&qemu_pause_cond);
+}
+
static void qemu_wait_io_event_common(CPUState *cpu)
{
atomic_mb_set(&cpu->thread_kicked, false);
if (cpu->stop) {
- cpu->stop = false;
- cpu->stopped = true;
- qemu_cond_broadcast(&qemu_pause_cond);
+ qemu_cpu_stop(cpu, false);
}
process_queued_cpu_work(cpu);
}
qemu_wait_io_event_common(cpu);
}
+static void qemu_hvf_wait_io_event(CPUState *cpu)
+{
+ while (cpu_thread_is_idle(cpu)) {
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ }
+ qemu_wait_io_event_common(cpu);
+}
+
static void *qemu_kvm_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
static void prepare_icount_for_run(CPUState *cpu)
{
if (use_icount) {
- int64_t count;
- int decr;
+ int insns_left;
/* These should always be cleared by process_icount_data after
* each vCPU execution. However u16.high can be raised
g_assert(cpu->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
-
- count = tcg_get_icount_limit();
-
- /* To calculate what we have executed so far we need to know
- * what we originally budgeted to run this cycle */
- cpu->icount_budget = count;
-
- decr = (count > 0xffff) ? 0xffff : count;
- count -= decr;
- cpu->icount_decr.u16.low = decr;
- cpu->icount_extra = count;
+ cpu->icount_budget = tcg_get_icount_limit();
+ insns_left = MIN(0xffff, cpu->icount_budget);
+ cpu->icount_decr.u16.low = insns_left;
+ cpu->icount_extra = cpu->icount_budget - insns_left;
}
}
{
if (use_icount) {
/* Account for executed instructions */
- timers_state.qemu_icount += cpu_get_icount_executed(cpu);
+ cpu_update_icount(cpu);
/* Reset the counters */
cpu->icount_decr.u16.low = 0;
CPUState *cpu = arg;
rcu_register_thread();
+ tcg_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
return NULL;
}
+/* The HVF-specific vCPU thread function. This one should only run when the host
+ * CPU supports the VMX "unrestricted guest" feature. */
+static void *qemu_hvf_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+
+ int r;
+
+ assert(hvf_enabled());
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->can_do_io = 1;
+ current_cpu = cpu;
+
+ hvf_init_vcpu(cpu);
+
+ /* signal CPU creation */
+ cpu->created = true;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = hvf_vcpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+ qemu_hvf_wait_io_event(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+
+ hvf_vcpu_destroy(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
+ return NULL;
+}
+
#ifdef _WIN32
static void CALLBACK dummy_apc_func(ULONG_PTR unused)
{
g_assert(!use_icount);
rcu_register_thread();
+ tcg_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
/* Ignore everything else? */
break;
}
+ } else if (cpu->unplug) {
+ qemu_tcg_destroy_vcpu(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
+ return NULL;
}
atomic_mb_set(&cpu->exit_request, 0);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
CPU_FOREACH(cpu) {
- cpu->stop = true;
- qemu_cpu_kick(cpu);
- }
-
- if (qemu_in_vcpu_thread()) {
- cpu_stop_current();
+ if (qemu_cpu_is_self(cpu)) {
+ qemu_cpu_stop(cpu, true);
+ } else {
+ cpu->stop = true;
+ qemu_cpu_kick(cpu);
+ }
}
while (!all_vcpus_paused()) {
char thread_name[VCPU_THREAD_NAME_SIZE];
static QemuCond *single_tcg_halt_cond;
static QemuThread *single_tcg_cpu_thread;
+ static int tcg_region_inited;
+
+ /*
+ * Initialize TCG regions--once. Now is a good time, because:
+ * (1) TCG's init context, prologue and target globals have been set up.
+ * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
+ * -accel flag is processed, so the check doesn't work then).
+ */
+ if (!tcg_region_inited) {
+ tcg_region_inited = 1;
+ tcg_region_init();
+ }
if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
}
}
+static void qemu_hvf_start_vcpu(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ /* HVF currently does not support TCG, and only runs in
+ * unrestricted-guest mode. */
+ assert(hvf_enabled());
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
+}
+
static void qemu_dummy_start_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
/* If the target cpu hasn't set up any address spaces itself,
* give it the default one.
*/
- AddressSpace *as = address_space_init_shareable(cpu->memory,
- "cpu-memory");
cpu->num_ases = 1;
- cpu_address_space_init(cpu, as, 0);
+ cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
}
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (hax_enabled()) {
qemu_hax_start_vcpu(cpu);
+ } else if (hvf_enabled()) {
+ qemu_hvf_start_vcpu(cpu);
} else if (tcg_enabled()) {
qemu_tcg_init_vcpu(cpu);
} else {
void cpu_stop_current(void)
{
if (current_cpu) {
- current_cpu->stop = false;
- current_cpu->stopped = true;
- cpu_exit(current_cpu);
- qemu_cond_broadcast(&qemu_pause_cond);
+ qemu_cpu_stop(current_cpu, true);
}
}
CpuInfoList *qmp_query_cpus(Error **errp)
{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
CpuInfoList *head = NULL, *cur_item = NULL;
CPUState *cpu;
#else
info->value->arch = CPU_INFO_ARCH_OTHER;
#endif
+ info->value->has_props = !!mc->cpu_index_to_instance_props;
+ if (info->value->has_props) {
+ CpuInstanceProperties *props;
+ props = g_malloc0(sizeof(*props));
+ *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
+ info->value->props = props;
+ }
/* XXX: waiting for the qapi to support GSList */
if (!cur_item) {