]> Git Repo - qemu.git/blobdiff - cpus.c
hmp: fix MemdevList memory leak
[qemu.git] / cpus.c
diff --git a/cpus.c b/cpus.c
index a2d09f363f38e8834746730fb55f478b0aa20eb9..0f7d0eaf3674a09d1ab89a1a8f8cfd0649c082e6 100644 (file)
--- a/cpus.c
+++ b/cpus.c
@@ -26,6 +26,7 @@
 #include "config-host.h"
 
 #include "monitor/monitor.h"
+#include "qapi/qmp/qerror.h"
 #include "sysemu/sysemu.h"
 #include "exec/gdbstub.h"
 #include "sysemu/dma.h"
@@ -38,6 +39,8 @@
 #include "qemu/main-loop.h"
 #include "qemu/bitmap.h"
 #include "qemu/seqlock.h"
+#include "qapi-event.h"
+#include "hw/nmi.h"
 
 #ifndef _WIN32
 #include "qemu/compatfd.h"
@@ -62,6 +65,8 @@
 #endif /* CONFIG_LINUX */
 
 static CPUState *next_cpu;
+int64_t max_delay;
+int64_t max_advance;
 
 bool cpu_is_stopped(CPUState *cpu)
 {
@@ -76,7 +81,7 @@ static bool cpu_thread_is_idle(CPUState *cpu)
     if (cpu_is_stopped(cpu)) {
         return true;
     }
-    if (!cpu->halted || qemu_cpu_has_work(cpu) ||
+    if (!cpu->halted || cpu_has_work(cpu) ||
         kvm_halt_in_kernel()) {
         return false;
     }
@@ -98,17 +103,17 @@ static bool all_cpu_threads_idle(void)
 /***********************************************************/
 /* guest cycle counter */
 
+/* Protected by TimersState seqlock */
+
+static int64_t vm_clock_warp_start = -1;
 /* Conversion factor from emulated instructions to virtual clock ticks.  */
 static int icount_time_shift;
 /* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
 #define MAX_ICOUNT_SHIFT 10
-/* Compensate for varying guest execution speed.  */
-static int64_t qemu_icount_bias;
+
 static QEMUTimer *icount_rt_timer;
 static QEMUTimer *icount_vm_timer;
 static QEMUTimer *icount_warp_timer;
-static int64_t vm_clock_warp_start;
-static int64_t qemu_icount;
 
 typedef struct TimersState {
     /* Protected by BQL.  */
@@ -122,61 +127,85 @@ typedef struct TimersState {
     int64_t cpu_clock_offset;
     int32_t cpu_ticks_enabled;
     int64_t dummy;
+
+    /* Compensate for varying guest execution speed.  */
+    int64_t qemu_icount_bias;
+    /* Only written by TCG thread */
+    int64_t qemu_icount;
 } TimersState;
 
 static TimersState timers_state;
 
 /* Return the virtual CPU time, based on the instruction counter.  */
-int64_t cpu_get_icount(void)
+static int64_t cpu_get_icount_locked(void)
 {
     int64_t icount;
     CPUState *cpu = current_cpu;
 
-    icount = qemu_icount;
+    icount = timers_state.qemu_icount;
     if (cpu) {
-        CPUArchState *env = cpu->env_ptr;
-        if (!can_do_io(env)) {
+        if (!cpu_can_do_io(cpu)) {
             fprintf(stderr, "Bad clock read\n");
         }
-        icount -= (env->icount_decr.u16.low + env->icount_extra);
+        icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
     }
-    return qemu_icount_bias + (icount << icount_time_shift);
+    return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
+}
+
+int64_t cpu_get_icount(void)
+{
+    int64_t icount;
+    unsigned start;
+
+    do {
+        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+        icount = cpu_get_icount_locked();
+    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
+
+    return icount;
+}
+
+int64_t cpu_icount_to_ns(int64_t icount)
+{
+    return icount << icount_time_shift;
 }
 
 /* return the host CPU cycle counter and handle stop/restart */
 /* Caller must hold the BQL */
 int64_t cpu_get_ticks(void)
 {
+    int64_t ticks;
+
     if (use_icount) {
         return cpu_get_icount();
     }
-    if (!timers_state.cpu_ticks_enabled) {
-        return timers_state.cpu_ticks_offset;
-    } else {
-        int64_t ticks;
-        ticks = cpu_get_real_ticks();
-        if (timers_state.cpu_ticks_prev > ticks) {
-            /* Note: non increasing ticks may happen if the host uses
-               software suspend */
-            timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
-        }
-        timers_state.cpu_ticks_prev = ticks;
-        return ticks + timers_state.cpu_ticks_offset;
+
+    ticks = timers_state.cpu_ticks_offset;
+    if (timers_state.cpu_ticks_enabled) {
+        ticks += cpu_get_real_ticks();
     }
+
+    if (timers_state.cpu_ticks_prev > ticks) {
+        /* Note: non increasing ticks may happen if the host uses
+           software suspend */
+        timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
+        ticks = timers_state.cpu_ticks_prev;
+    }
+
+    timers_state.cpu_ticks_prev = ticks;
+    return ticks;
 }
 
 static int64_t cpu_get_clock_locked(void)
 {
-    int64_t ti;
+    int64_t ticks;
 
-    if (!timers_state.cpu_ticks_enabled) {
-        ti = timers_state.cpu_clock_offset;
-    } else {
-        ti = get_clock();
-        ti += timers_state.cpu_clock_offset;
+    ticks = timers_state.cpu_clock_offset;
+    if (timers_state.cpu_ticks_enabled) {
+        ticks += get_clock();
     }
 
-    return ti;
+    return ticks;
 }
 
 /* return the host CPU monotonic timer and handle stop/restart */
@@ -193,6 +222,23 @@ int64_t cpu_get_clock(void)
     return ti;
 }
 
+/* return the offset between the host clock and virtual CPU clock */
+int64_t cpu_get_clock_offset(void)
+{
+    int64_t ti;
+    unsigned start;
+
+    do {
+        start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+        ti = timers_state.cpu_clock_offset;
+        if (!timers_state.cpu_ticks_enabled) {
+            ti -= get_clock();
+        }
+    } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
+
+    return -ti;
+}
+
 /* enable cpu_get_ticks()
  * Caller must hold BQL which server as mutex for vm_clock_seqlock.
  */
@@ -217,7 +263,7 @@ void cpu_disable_ticks(void)
     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
     seqlock_write_lock(&timers_state.vm_clock_seqlock);
     if (timers_state.cpu_ticks_enabled) {
-        timers_state.cpu_ticks_offset = cpu_get_ticks();
+        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
         timers_state.cpu_clock_offset = cpu_get_clock_locked();
         timers_state.cpu_ticks_enabled = 0;
     }
@@ -235,6 +281,8 @@ static void icount_adjust(void)
     int64_t cur_time;
     int64_t cur_icount;
     int64_t delta;
+
+    /* Protected by TimersState mutex.  */
     static int64_t last_delta;
 
     /* If the VM is not running, then do nothing.  */
@@ -242,8 +290,9 @@ static void icount_adjust(void)
         return;
     }
 
-    cur_time = cpu_get_clock();
-    cur_icount = cpu_get_icount();
+    seqlock_write_lock(&timers_state.vm_clock_seqlock);
+    cur_time = cpu_get_clock_locked();
+    cur_icount = cpu_get_icount_locked();
 
     delta = cur_icount - cur_time;
     /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
@@ -260,7 +309,9 @@ static void icount_adjust(void)
         icount_time_shift++;
     }
     last_delta = delta;
-    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
+    timers_state.qemu_icount_bias = cur_icount
+                              - (timers_state.qemu_icount << icount_time_shift);
+    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
 }
 
 static void icount_adjust_rt(void *opaque)
@@ -285,30 +336,37 @@ static int64_t qemu_icount_round(int64_t count)
 
 static void icount_warp_rt(void *opaque)
 {
-    if (vm_clock_warp_start == -1) {
+    /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
+     * changes from -1 to another value, so the race here is okay.
+     */
+    if (atomic_read(&vm_clock_warp_start) == -1) {
         return;
     }
 
+    seqlock_write_lock(&timers_state.vm_clock_seqlock);
     if (runstate_is_running()) {
         int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
-        int64_t warp_delta = clock - vm_clock_warp_start;
-        if (use_icount == 1) {
-            qemu_icount_bias += warp_delta;
-        } else {
+        int64_t warp_delta;
+
+        warp_delta = clock - vm_clock_warp_start;
+        if (use_icount == 2) {
             /*
              * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
              * far ahead of real time.
              */
-            int64_t cur_time = cpu_get_clock();
-            int64_t cur_icount = cpu_get_icount();
+            int64_t cur_time = cpu_get_clock_locked();
+            int64_t cur_icount = cpu_get_icount_locked();
             int64_t delta = cur_time - cur_icount;
-            qemu_icount_bias += MIN(warp_delta, delta);
-        }
-        if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
-            qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+            warp_delta = MIN(warp_delta, delta);
         }
+        timers_state.qemu_icount_bias += warp_delta;
     }
     vm_clock_warp_start = -1;
+    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+
+    if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
+        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+    }
 }
 
 void qtest_clock_warp(int64_t dest)
@@ -317,8 +375,11 @@ void qtest_clock_warp(int64_t dest)
     assert(qtest_enabled());
     while (clock < dest) {
         int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
-        int64_t warp = MIN(dest - clock, deadline);
-        qemu_icount_bias += warp;
+        int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
+        seqlock_write_lock(&timers_state.vm_clock_seqlock);
+        timers_state.qemu_icount_bias += warp;
+        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+
         qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
         clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
     }
@@ -327,6 +388,7 @@ void qtest_clock_warp(int64_t dest)
 
 void qemu_clock_warp(QEMUClockType type)
 {
+    int64_t clock;
     int64_t deadline;
 
     /*
@@ -346,8 +408,8 @@ void qemu_clock_warp(QEMUClockType type)
      * the earliest QEMU_CLOCK_VIRTUAL timer.
      */
     icount_warp_rt(NULL);
-    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
-        timer_del(icount_warp_timer);
+    timer_del(icount_warp_timer);
+    if (!all_cpu_threads_idle()) {
         return;
     }
 
@@ -356,17 +418,11 @@ void qemu_clock_warp(QEMUClockType type)
        return;
     }
 
-    vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     /* We want to use the earliest deadline from ALL vm_clocks */
+    clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
-
-    /* Maintain prior (possibly buggy) behaviour where if no deadline
-     * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
-     * INT32_MAX nanoseconds ahead, we still use INT32_MAX
-     * nanoseconds.
-     */
-    if ((deadline < 0) || (deadline > INT32_MAX)) {
-        deadline = INT32_MAX;
+    if (deadline < 0) {
+        return;
     }
 
     if (deadline > 0) {
@@ -387,39 +443,83 @@ void qemu_clock_warp(QEMUClockType type)
          * you will not be sending network packets continuously instead of
          * every 100ms.
          */
-        timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
+        seqlock_write_lock(&timers_state.vm_clock_seqlock);
+        if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
+            vm_clock_warp_start = clock;
+        }
+        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+        timer_mod_anticipate(icount_warp_timer, clock + deadline);
     } else if (deadline == 0) {
         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
     }
 }
 
+static bool icount_state_needed(void *opaque)
+{
+    return use_icount;
+}
+
+/*
+ * This is a subsection for icount migration.
+ */
+static const VMStateDescription icount_vmstate_timers = {
+    .name = "timer/icount",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_INT64(qemu_icount_bias, TimersState),
+        VMSTATE_INT64(qemu_icount, TimersState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static const VMStateDescription vmstate_timers = {
     .name = "timer",
     .version_id = 2,
     .minimum_version_id = 1,
-    .minimum_version_id_old = 1,
-    .fields      = (VMStateField[]) {
+    .fields = (VMStateField[]) {
         VMSTATE_INT64(cpu_ticks_offset, TimersState),
         VMSTATE_INT64(dummy, TimersState),
         VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
         VMSTATE_END_OF_LIST()
+    },
+    .subsections = (VMStateSubsection[]) {
+        {
+            .vmsd = &icount_vmstate_timers,
+            .needed = icount_state_needed,
+        }, {
+            /* empty */
+        }
     }
 };
 
-void configure_icount(const char *option)
+void configure_icount(QemuOpts *opts, Error **errp)
 {
+    const char *option;
+    char *rem_str = NULL;
+
     seqlock_init(&timers_state.vm_clock_seqlock, NULL);
     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
+    option = qemu_opt_get(opts, "shift");
     if (!option) {
+        if (qemu_opt_get(opts, "align") != NULL) {
+            error_setg(errp, "Please specify shift option when using align");
+        }
         return;
     }
-
+    icount_align_option = qemu_opt_get_bool(opts, "align", false);
     icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
                                           icount_warp_rt, NULL);
     if (strcmp(option, "auto") != 0) {
-        icount_time_shift = strtol(option, NULL, 0);
+        errno = 0;
+        icount_time_shift = strtol(option, &rem_str, 0);
+        if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
+            error_setg(errp, "icount: Invalid shift value");
+        }
         use_icount = 1;
         return;
+    } else if (icount_align_option) {
+        error_setg(errp, "shift=auto and align=on are incompatible");
     }
 
     use_icount = 2;
@@ -498,7 +598,7 @@ static int do_vm_stop(RunState state)
         pause_all_vcpus();
         runstate_set(state);
         vm_state_notify(0, state);
-        monitor_protocol_event(QEVENT_STOP, NULL);
+        qapi_event_send_stop(&error_abort);
     }
 
     bdrv_drain_all();
@@ -1083,16 +1183,25 @@ void resume_all_vcpus(void)
     }
 }
 
+/* For temporary buffers for forming a name */
+#define VCPU_THREAD_NAME_SIZE 16
+
 static void qemu_tcg_init_vcpu(CPUState *cpu)
 {
+    char thread_name[VCPU_THREAD_NAME_SIZE];
+
+    tcg_cpu_address_space_init(cpu, cpu->as);
+
     /* share a single thread for all cpus with TCG */
     if (!tcg_cpu_thread) {
         cpu->thread = g_malloc0(sizeof(QemuThread));
         cpu->halt_cond = g_malloc0(sizeof(QemuCond));
         qemu_cond_init(cpu->halt_cond);
         tcg_halt_cond = cpu->halt_cond;
-        qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
-                           QEMU_THREAD_JOINABLE);
+        snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
+                 cpu->cpu_index);
+        qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
+                           cpu, QEMU_THREAD_JOINABLE);
 #ifdef _WIN32
         cpu->hThread = qemu_thread_get_handle(cpu->thread);
 #endif
@@ -1108,11 +1217,15 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
 
 static void qemu_kvm_start_vcpu(CPUState *cpu)
 {
+    char thread_name[VCPU_THREAD_NAME_SIZE];
+
     cpu->thread = g_malloc0(sizeof(QemuThread));
     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
     qemu_cond_init(cpu->halt_cond);
-    qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
-                       QEMU_THREAD_JOINABLE);
+    snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
+             cpu->cpu_index);
+    qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
+                       cpu, QEMU_THREAD_JOINABLE);
     while (!cpu->created) {
         qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
     }
@@ -1120,10 +1233,14 @@ static void qemu_kvm_start_vcpu(CPUState *cpu)
 
 static void qemu_dummy_start_vcpu(CPUState *cpu)
 {
+    char thread_name[VCPU_THREAD_NAME_SIZE];
+
     cpu->thread = g_malloc0(sizeof(QemuThread));
     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
     qemu_cond_init(cpu->halt_cond);
-    qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
+    snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
+             cpu->cpu_index);
+    qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
                        QEMU_THREAD_JOINABLE);
     while (!cpu->created) {
         qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
@@ -1157,6 +1274,7 @@ void cpu_stop_current(void)
 int vm_stop(RunState state)
 {
     if (qemu_in_vcpu_thread()) {
+        qemu_system_vmstop_request_prepare();
         qemu_system_vmstop_request(state);
         /*
          * FIXME: should not return to device code in case
@@ -1185,6 +1303,7 @@ int vm_stop_force_state(RunState state)
 
 static int tcg_cpu_exec(CPUArchState *env)
 {
+    CPUState *cpu = ENV_GET_CPU(env);
     int ret;
 #ifdef CONFIG_PROFILER
     int64_t ti;
@@ -1197,9 +1316,10 @@ static int tcg_cpu_exec(CPUArchState *env)
         int64_t count;
         int64_t deadline;
         int decr;
-        qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
-        env->icount_decr.u16.low = 0;
-        env->icount_extra = 0;
+        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+                                    + cpu->icount_extra);
+        cpu->icount_decr.u16.low = 0;
+        cpu->icount_extra = 0;
         deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
 
         /* Maintain prior (possibly buggy) behaviour where if no deadline
@@ -1212,11 +1332,11 @@ static int tcg_cpu_exec(CPUArchState *env)
         }
 
         count = qemu_icount_round(deadline);
-        qemu_icount += count;
+        timers_state.qemu_icount += count;
         decr = (count > 0xffff) ? 0xffff : count;
         count -= decr;
-        env->icount_decr.u16.low = decr;
-        env->icount_extra = count;
+        cpu->icount_decr.u16.low = decr;
+        cpu->icount_extra = count;
     }
     ret = cpu_exec(env);
 #ifdef CONFIG_PROFILER
@@ -1225,10 +1345,10 @@ static int tcg_cpu_exec(CPUArchState *env)
     if (use_icount) {
         /* Fold pending instructions back into the
            instruction counter, and clear the interrupt flag.  */
-        qemu_icount -= (env->icount_decr.u16.low
-                        + env->icount_extra);
-        env->icount_decr.u32 = 0;
-        env->icount_extra = 0;
+        timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+                        + cpu->icount_extra);
+        cpu->icount_decr.u32 = 0;
+        cpu->icount_extra = 0;
     }
     return ret;
 }
@@ -1263,20 +1383,6 @@ static void tcg_exec_all(void)
     exit_request = 0;
 }
 
-void set_numa_modes(void)
-{
-    CPUState *cpu;
-    int i;
-
-    CPU_FOREACH(cpu) {
-        for (i = 0; i < nb_numa_nodes; i++) {
-            if (test_bit(cpu->cpu_index, node_cpumask[i])) {
-                cpu->numa_node = i;
-            }
-        }
-    }
-}
-
 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
 {
     /* XXX: implement xxx_cpu_list for targets that still miss it */
@@ -1304,6 +1410,9 @@ CpuInfoList *qmp_query_cpus(Error **errp)
 #elif defined(TARGET_MIPS)
         MIPSCPU *mips_cpu = MIPS_CPU(cpu);
         CPUMIPSState *env = &mips_cpu->env;
+#elif defined(TARGET_TRICORE)
+        TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
+        CPUTriCoreState *env = &tricore_cpu->env;
 #endif
 
         cpu_synchronize_state(cpu);
@@ -1328,6 +1437,9 @@ CpuInfoList *qmp_query_cpus(Error **errp)
 #elif defined(TARGET_MIPS)
         info->value->has_PC = true;
         info->value->PC = env->active_tc.PC;
+#elif defined(TARGET_TRICORE)
+        info->value->has_PC = true;
+        info->value->PC = env->PC;
 #endif
 
         /* XXX: waiting for the qapi to support GSList */
@@ -1371,7 +1483,10 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
         l = sizeof(buf);
         if (l > size)
             l = size;
-        cpu_memory_rw_debug(cpu, addr, buf, l, 0);
+        if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
+            error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
+            goto exit;
+        }
         if (fwrite(buf, 1, l, f) != l) {
             error_set(errp, QERR_IO_ERROR);
             goto exit;
@@ -1401,7 +1516,7 @@ void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
         l = sizeof(buf);
         if (l > size)
             l = size;
-        cpu_physical_memory_rw(addr, buf, l, 0);
+        cpu_physical_memory_read(addr, buf, l);
         if (fwrite(buf, 1, l, f) != l) {
             error_set(errp, QERR_IO_ERROR);
             goto exit;
@@ -1421,29 +1536,31 @@ void qmp_inject_nmi(Error **errp)
 
     CPU_FOREACH(cs) {
         X86CPU *cpu = X86_CPU(cs);
-        CPUX86State *env = &cpu->env;
 
-        if (!env->apic_state) {
+        if (!cpu->apic_state) {
             cpu_interrupt(cs, CPU_INTERRUPT_NMI);
         } else {
-            apic_deliver_nmi(env->apic_state);
-        }
-    }
-#elif defined(TARGET_S390X)
-    CPUState *cs;
-    S390CPU *cpu;
-
-    CPU_FOREACH(cs) {
-        cpu = S390_CPU(cs);
-        if (cpu->env.cpu_num == monitor_get_cpu_index()) {
-            if (s390_cpu_restart(S390_CPU(cs)) == -1) {
-                error_set(errp, QERR_UNSUPPORTED);
-                return;
-            }
-            break;
+            apic_deliver_nmi(cpu->apic_state);
         }
     }
 #else
-    error_set(errp, QERR_UNSUPPORTED);
+    nmi_monitor_handle(monitor_get_cpu_index(), errp);
 #endif
 }
+
+void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
+{
+    if (!use_icount) {
+        return;
+    }
+
+    cpu_fprintf(f, "Host - Guest clock  %"PRIi64" ms\n",
+                (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
+    if (icount_align_option) {
+        cpu_fprintf(f, "Max guest delay     %"PRIi64" ms\n", -max_delay/SCALE_MS);
+        cpu_fprintf(f, "Max guest advance   %"PRIi64" ms\n", max_advance/SCALE_MS);
+    } else {
+        cpu_fprintf(f, "Max guest delay     NA\n");
+        cpu_fprintf(f, "Max guest advance   NA\n");
+    }
+}
This page took 0.041882 seconds and 4 git commands to generate.