#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/config-file.h"
+#include "migration/vmstate.h"
#include "monitor/monitor.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
#include "exec/exec-all.h"
#include "qemu/thread.h"
+#include "qemu/plugin.h"
#include "sysemu/cpus.h"
#include "sysemu/qtest.h"
#include "qemu/main-loop.h"
#include "tcg.h"
#include "hw/nmi.h"
#include "sysemu/replay.h"
+#include "sysemu/runstate.h"
+#include "hw/boards.h"
+#include "hw/hw.h"
#ifdef CONFIG_LINUX
#endif /* CONFIG_LINUX */
+static QemuMutex qemu_global_mutex;
+
int64_t max_delay;
int64_t max_advance;
static TimersState timers_state;
bool mttcg_enabled;
-/*
- * We default to false if we know other options have been enabled
- * which are currently incompatible with MTTCG. Otherwise when each
- * guest (target) has been updated to support:
- * - atomic instructions
- * - memory ordering primitives (barriers)
- * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
- *
- * Once a guest architecture has been converted to the new primitives
- * there are two remaining limitations to check.
- *
- * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
- * - The host must have a stronger memory order than the guest
- *
- * It may be possible in future to support strong guests on weak hosts
- * but that will require tagging all load/stores in a guest with their
- * implicit memory order requirements which would likely slow things
- * down a lot.
- */
-
-static bool check_tcg_memory_orders_compatible(void)
-{
-#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
- return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
-#else
- return false;
-#endif
-}
-
-static bool default_mttcg_enabled(void)
-{
- if (use_icount || TCG_OVERSIZED_GUEST) {
- return false;
- } else {
-#ifdef TARGET_SUPPORTS_MTTCG
- return check_tcg_memory_orders_compatible();
-#else
- return false;
-#endif
- }
-}
-
-void qemu_tcg_configure(QemuOpts *opts, Error **errp)
-{
- const char *t = qemu_opt_get(opts, "thread");
- if (t) {
- if (strcmp(t, "multi") == 0) {
- if (TCG_OVERSIZED_GUEST) {
- error_setg(errp, "No MTTCG when guest word size > hosts");
- } else if (use_icount) {
- error_setg(errp, "No MTTCG when icount is enabled");
- } else {
-#ifndef TARGET_SUPPORTS_MTTCG
- warn_report("Guest not yet converted to MTTCG - "
- "you may get unexpected results");
-#endif
- if (!check_tcg_memory_orders_compatible()) {
- warn_report("Guest expects a stronger memory ordering "
- "than the host provides");
- error_printf("This may cause strange/hard to debug errors\n");
- }
- mttcg_enabled = true;
- }
- } else if (strcmp(t, "single") == 0) {
- mttcg_enabled = false;
- } else {
- error_setg(errp, "Invalid 'thread' setting %s", t);
- }
- } else {
- mttcg_enabled = default_mttcg_enabled();
- }
-}
/* The current number of executed instructions is based on what we
* originally budgeted minus the current state of the decrementing
assert(qtest_enabled());
aio_context = qemu_get_aio_context();
while (clock < dest) {
- int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+ int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
+ QEMU_TIMER_ATTR_ALL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
seqlock_write_lock(&timers_state.vm_clock_seqlock,
/* We want to use the earliest deadline from ALL vm_clocks */
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
- deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
+ ~QEMU_TIMER_ATTR_EXTERNAL);
if (deadline < 0) {
static bool notified;
if (!icount_sleep && !notified) {
{
double pct;
double throttle_ratio;
- long sleeptime_ns;
+ int64_t sleeptime_ns, endtime_ns;
if (!cpu_throttle_get_percentage()) {
return;
pct = (double)cpu_throttle_get_percentage()/100;
throttle_ratio = pct / (1 - pct);
- sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
-
- qemu_mutex_unlock_iothread();
- g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
- qemu_mutex_lock_iothread();
+ /* Add 1ns to fix double's rounding error (like 0.9999999...) */
+ sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
+ endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
+ while (sleeptime_ns > 0 && !cpu->stop) {
+ if (sleeptime_ns > SCALE_MS) {
+ qemu_cond_timedwait(cpu->halt_cond, &qemu_global_mutex,
+ sleeptime_ns / SCALE_MS);
+ } else {
+ qemu_mutex_unlock_iothread();
+ g_usleep(sleeptime_ns / SCALE_US);
+ qemu_mutex_lock_iothread();
+ }
+ sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ }
atomic_set(&cpu->throttle_thread_scheduled, 0);
}
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
}
-/* Kick the currently round-robin scheduled vCPU */
-static void qemu_cpu_kick_rr_cpu(void)
+/* Kick the currently round-robin scheduled vCPU to next */
+static void qemu_cpu_kick_rr_next_cpu(void)
{
CPUState *cpu;
do {
} while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
}
+/* Kick all RR vCPUs */
+static void qemu_cpu_kick_rr_cpus(void)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ cpu_exit(cpu);
+ };
+}
+
static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
{
}
static void kick_tcg_thread(void *opaque)
{
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
- qemu_cpu_kick_rr_cpu();
+ qemu_cpu_kick_rr_next_cpu();
}
static void start_tcg_kick_timer(void)
}
bdrv_drain_all();
- replay_disable_events();
ret = bdrv_flush_all();
return ret;
}
#endif /* !CONFIG_LINUX */
-static QemuMutex qemu_global_mutex;
-
static QemuThread io_thread;
/* cpu creation */
static void qemu_wait_io_event(CPUState *cpu)
{
+ bool slept = false;
+
while (cpu_thread_is_idle(cpu)) {
+ if (!slept) {
+ slept = true;
+ qemu_plugin_vcpu_idle_cb(cpu);
+ }
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
+ if (slept) {
+ qemu_plugin_vcpu_resume_cb(cpu);
+ }
#ifdef _WIN32
/* Eat dummy APC queued by qemu_cpu_kick_thread. */
int64_t deadline;
if (replay_mode != REPLAY_MODE_PLAY) {
- deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+ /*
+ * Include all the timers, because they may need an attention.
+ * Too long CPU execution may create unnecessary delay in UI.
+ */
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
+ QEMU_TIMER_ATTR_ALL);
/* Maintain prior (possibly buggy) behaviour where if no deadline
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
{
assert(qemu_in_vcpu_thread());
if (use_icount) {
- int64_t deadline =
- qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+ int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
+ QEMU_TIMER_ATTR_ALL);
if (deadline == 0) {
/* Wake up other AioContexts. */
{
qemu_cond_broadcast(cpu->halt_cond);
if (tcg_enabled()) {
- cpu_exit(cpu);
- /* NOP unless doing single-thread RR */
- qemu_cpu_kick_rr_cpu();
+ if (qemu_tcg_mttcg_enabled()) {
+ cpu_exit(cpu);
+ } else {
+ qemu_cpu_kick_rr_cpus();
+ }
} else {
if (hax_enabled()) {
/*
void qemu_init_vcpu(CPUState *cpu)
{
- cpu->nr_cores = smp_cores;
- cpu->nr_threads = smp_threads;
+ MachineState *ms = MACHINE(qdev_get_machine());
+
+ cpu->nr_cores = ms->smp.cores;
+ cpu->nr_threads = ms->smp.threads;
cpu->stopped = true;
cpu->random_seed = qemu_guest_random_seed_thread_part1();
/* We are sending this now, but the CPUs will be resumed shortly later */
qapi_event_send_resume();
- replay_enable_events();
cpu_enable_ticks();
runstate_set(RUN_STATE_RUNNING);
vm_state_notify(1, RUN_STATE_RUNNING);