]> Git Repo - qemu.git/blob - cpus.c
target/arm: Implement SVE predicate test
[qemu.git] / cpus.c
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24
25 #include "qemu/osdep.h"
26 #include "qemu/config-file.h"
27 #include "cpu.h"
28 #include "monitor/monitor.h"
29 #include "qapi/error.h"
30 #include "qapi/qapi-commands-misc.h"
31 #include "qapi/qapi-events-run-state.h"
32 #include "qapi/qmp/qerror.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/block-backend.h"
36 #include "exec/gdbstub.h"
37 #include "sysemu/dma.h"
38 #include "sysemu/hw_accel.h"
39 #include "sysemu/kvm.h"
40 #include "sysemu/hax.h"
41 #include "sysemu/hvf.h"
42 #include "sysemu/whpx.h"
43 #include "exec/exec-all.h"
44
45 #include "qemu/thread.h"
46 #include "sysemu/cpus.h"
47 #include "sysemu/qtest.h"
48 #include "qemu/main-loop.h"
49 #include "qemu/option.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/seqlock.h"
52 #include "tcg.h"
53 #include "hw/nmi.h"
54 #include "sysemu/replay.h"
55 #include "hw/boards.h"
56
57 #ifdef CONFIG_LINUX
58
59 #include <sys/prctl.h>
60
61 #ifndef PR_MCE_KILL
62 #define PR_MCE_KILL 33
63 #endif
64
65 #ifndef PR_MCE_KILL_SET
66 #define PR_MCE_KILL_SET 1
67 #endif
68
69 #ifndef PR_MCE_KILL_EARLY
70 #define PR_MCE_KILL_EARLY 1
71 #endif
72
73 #endif /* CONFIG_LINUX */
74
75 int64_t max_delay;
76 int64_t max_advance;
77
78 /* vcpu throttling controls */
79 static QEMUTimer *throttle_timer;
80 static unsigned int throttle_percentage;
81
82 #define CPU_THROTTLE_PCT_MIN 1
83 #define CPU_THROTTLE_PCT_MAX 99
84 #define CPU_THROTTLE_TIMESLICE_NS 10000000
85
86 bool cpu_is_stopped(CPUState *cpu)
87 {
88     return cpu->stopped || !runstate_is_running();
89 }
90
91 static bool cpu_thread_is_idle(CPUState *cpu)
92 {
93     if (cpu->stop || cpu->queued_work_first) {
94         return false;
95     }
96     if (cpu_is_stopped(cpu)) {
97         return true;
98     }
99     if (!cpu->halted || cpu_has_work(cpu) ||
100         kvm_halt_in_kernel()) {
101         return false;
102     }
103     return true;
104 }
105
106 static bool all_cpu_threads_idle(void)
107 {
108     CPUState *cpu;
109
110     CPU_FOREACH(cpu) {
111         if (!cpu_thread_is_idle(cpu)) {
112             return false;
113         }
114     }
115     return true;
116 }
117
118 /***********************************************************/
119 /* guest cycle counter */
120
121 /* Protected by TimersState seqlock */
122
123 static bool icount_sleep = true;
124 /* Conversion factor from emulated instructions to virtual clock ticks.  */
125 static int icount_time_shift;
126 /* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
127 #define MAX_ICOUNT_SHIFT 10
128
129 typedef struct TimersState {
130     /* Protected by BQL.  */
131     int64_t cpu_ticks_prev;
132     int64_t cpu_ticks_offset;
133
134     /* cpu_clock_offset can be read out of BQL, so protect it with
135      * this lock.
136      */
137     QemuSeqLock vm_clock_seqlock;
138     int64_t cpu_clock_offset;
139     int32_t cpu_ticks_enabled;
140     int64_t dummy;
141
142     /* Compensate for varying guest execution speed.  */
143     int64_t qemu_icount_bias;
144     /* Only written by TCG thread */
145     int64_t qemu_icount;
146     /* for adjusting icount */
147     int64_t vm_clock_warp_start;
148     QEMUTimer *icount_rt_timer;
149     QEMUTimer *icount_vm_timer;
150     QEMUTimer *icount_warp_timer;
151 } TimersState;
152
153 static TimersState timers_state;
154 bool mttcg_enabled;
155
156 /*
157  * We default to false if we know other options have been enabled
158  * which are currently incompatible with MTTCG. Otherwise when each
159  * guest (target) has been updated to support:
160  *   - atomic instructions
161  *   - memory ordering primitives (barriers)
162  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
163  *
164  * Once a guest architecture has been converted to the new primitives
165  * there are two remaining limitations to check.
166  *
167  * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
168  * - The host must have a stronger memory order than the guest
169  *
170  * It may be possible in future to support strong guests on weak hosts
171  * but that will require tagging all load/stores in a guest with their
172  * implicit memory order requirements which would likely slow things
173  * down a lot.
174  */
175
176 static bool check_tcg_memory_orders_compatible(void)
177 {
178 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
179     return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
180 #else
181     return false;
182 #endif
183 }
184
185 static bool default_mttcg_enabled(void)
186 {
187     if (use_icount || TCG_OVERSIZED_GUEST) {
188         return false;
189     } else {
190 #ifdef TARGET_SUPPORTS_MTTCG
191         return check_tcg_memory_orders_compatible();
192 #else
193         return false;
194 #endif
195     }
196 }
197
198 void qemu_tcg_configure(QemuOpts *opts, Error **errp)
199 {
200     const char *t = qemu_opt_get(opts, "thread");
201     if (t) {
202         if (strcmp(t, "multi") == 0) {
203             if (TCG_OVERSIZED_GUEST) {
204                 error_setg(errp, "No MTTCG when guest word size > hosts");
205             } else if (use_icount) {
206                 error_setg(errp, "No MTTCG when icount is enabled");
207             } else {
208 #ifndef TARGET_SUPPORTS_MTTCG
209                 error_report("Guest not yet converted to MTTCG - "
210                              "you may get unexpected results");
211 #endif
212                 if (!check_tcg_memory_orders_compatible()) {
213                     error_report("Guest expects a stronger memory ordering "
214                                  "than the host provides");
215                     error_printf("This may cause strange/hard to debug errors\n");
216                 }
217                 mttcg_enabled = true;
218             }
219         } else if (strcmp(t, "single") == 0) {
220             mttcg_enabled = false;
221         } else {
222             error_setg(errp, "Invalid 'thread' setting %s", t);
223         }
224     } else {
225         mttcg_enabled = default_mttcg_enabled();
226     }
227 }
228
229 /* The current number of executed instructions is based on what we
230  * originally budgeted minus the current state of the decrementing
231  * icount counters in extra/u16.low.
232  */
233 static int64_t cpu_get_icount_executed(CPUState *cpu)
234 {
235     return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
236 }
237
238 /*
239  * Update the global shared timer_state.qemu_icount to take into
240  * account executed instructions. This is done by the TCG vCPU
241  * thread so the main-loop can see time has moved forward.
242  */
243 void cpu_update_icount(CPUState *cpu)
244 {
245     int64_t executed = cpu_get_icount_executed(cpu);
246     cpu->icount_budget -= executed;
247
248 #ifdef CONFIG_ATOMIC64
249     atomic_set__nocheck(&timers_state.qemu_icount,
250                         atomic_read__nocheck(&timers_state.qemu_icount) +
251                         executed);
252 #else /* FIXME: we need 64bit atomics to do this safely */
253     timers_state.qemu_icount += executed;
254 #endif
255 }
256
257 int64_t cpu_get_icount_raw(void)
258 {
259     CPUState *cpu = current_cpu;
260
261     if (cpu && cpu->running) {
262         if (!cpu->can_do_io) {
263             error_report("Bad icount read");
264             exit(1);
265         }
266         /* Take into account what has run */
267         cpu_update_icount(cpu);
268     }
269 #ifdef CONFIG_ATOMIC64
270     return atomic_read__nocheck(&timers_state.qemu_icount);
271 #else /* FIXME: we need 64bit atomics to do this safely */
272     return timers_state.qemu_icount;
273 #endif
274 }
275
276 /* Return the virtual CPU time, based on the instruction counter.  */
277 static int64_t cpu_get_icount_locked(void)
278 {
279     int64_t icount = cpu_get_icount_raw();
280     return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
281 }
282
283 int64_t cpu_get_icount(void)
284 {
285     int64_t icount;
286     unsigned start;
287
288     do {
289         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
290         icount = cpu_get_icount_locked();
291     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
292
293     return icount;
294 }
295
296 int64_t cpu_icount_to_ns(int64_t icount)
297 {
298     return icount << icount_time_shift;
299 }
300
301 /* return the time elapsed in VM between vm_start and vm_stop.  Unless
302  * icount is active, cpu_get_ticks() uses units of the host CPU cycle
303  * counter.
304  *
305  * Caller must hold the BQL
306  */
307 int64_t cpu_get_ticks(void)
308 {
309     int64_t ticks;
310
311     if (use_icount) {
312         return cpu_get_icount();
313     }
314
315     ticks = timers_state.cpu_ticks_offset;
316     if (timers_state.cpu_ticks_enabled) {
317         ticks += cpu_get_host_ticks();
318     }
319
320     if (timers_state.cpu_ticks_prev > ticks) {
321         /* Note: non increasing ticks may happen if the host uses
322            software suspend */
323         timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
324         ticks = timers_state.cpu_ticks_prev;
325     }
326
327     timers_state.cpu_ticks_prev = ticks;
328     return ticks;
329 }
330
331 static int64_t cpu_get_clock_locked(void)
332 {
333     int64_t time;
334
335     time = timers_state.cpu_clock_offset;
336     if (timers_state.cpu_ticks_enabled) {
337         time += get_clock();
338     }
339
340     return time;
341 }
342
343 /* Return the monotonic time elapsed in VM, i.e.,
344  * the time between vm_start and vm_stop
345  */
346 int64_t cpu_get_clock(void)
347 {
348     int64_t ti;
349     unsigned start;
350
351     do {
352         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
353         ti = cpu_get_clock_locked();
354     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
355
356     return ti;
357 }
358
359 /* enable cpu_get_ticks()
360  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
361  */
362 void cpu_enable_ticks(void)
363 {
364     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
365     seqlock_write_begin(&timers_state.vm_clock_seqlock);
366     if (!timers_state.cpu_ticks_enabled) {
367         timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
368         timers_state.cpu_clock_offset -= get_clock();
369         timers_state.cpu_ticks_enabled = 1;
370     }
371     seqlock_write_end(&timers_state.vm_clock_seqlock);
372 }
373
374 /* disable cpu_get_ticks() : the clock is stopped. You must not call
375  * cpu_get_ticks() after that.
376  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
377  */
378 void cpu_disable_ticks(void)
379 {
380     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
381     seqlock_write_begin(&timers_state.vm_clock_seqlock);
382     if (timers_state.cpu_ticks_enabled) {
383         timers_state.cpu_ticks_offset += cpu_get_host_ticks();
384         timers_state.cpu_clock_offset = cpu_get_clock_locked();
385         timers_state.cpu_ticks_enabled = 0;
386     }
387     seqlock_write_end(&timers_state.vm_clock_seqlock);
388 }
389
390 /* Correlation between real and virtual time is always going to be
391    fairly approximate, so ignore small variation.
392    When the guest is idle real and virtual time will be aligned in
393    the IO wait loop.  */
394 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
395
396 static void icount_adjust(void)
397 {
398     int64_t cur_time;
399     int64_t cur_icount;
400     int64_t delta;
401
402     /* Protected by TimersState mutex.  */
403     static int64_t last_delta;
404
405     /* If the VM is not running, then do nothing.  */
406     if (!runstate_is_running()) {
407         return;
408     }
409
410     seqlock_write_begin(&timers_state.vm_clock_seqlock);
411     cur_time = cpu_get_clock_locked();
412     cur_icount = cpu_get_icount_locked();
413
414     delta = cur_icount - cur_time;
415     /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
416     if (delta > 0
417         && last_delta + ICOUNT_WOBBLE < delta * 2
418         && icount_time_shift > 0) {
419         /* The guest is getting too far ahead.  Slow time down.  */
420         icount_time_shift--;
421     }
422     if (delta < 0
423         && last_delta - ICOUNT_WOBBLE > delta * 2
424         && icount_time_shift < MAX_ICOUNT_SHIFT) {
425         /* The guest is getting too far behind.  Speed time up.  */
426         icount_time_shift++;
427     }
428     last_delta = delta;
429     timers_state.qemu_icount_bias = cur_icount
430                               - (timers_state.qemu_icount << icount_time_shift);
431     seqlock_write_end(&timers_state.vm_clock_seqlock);
432 }
433
434 static void icount_adjust_rt(void *opaque)
435 {
436     timer_mod(timers_state.icount_rt_timer,
437               qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
438     icount_adjust();
439 }
440
441 static void icount_adjust_vm(void *opaque)
442 {
443     timer_mod(timers_state.icount_vm_timer,
444                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
445                    NANOSECONDS_PER_SECOND / 10);
446     icount_adjust();
447 }
448
449 static int64_t qemu_icount_round(int64_t count)
450 {
451     return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
452 }
453
454 static void icount_warp_rt(void)
455 {
456     unsigned seq;
457     int64_t warp_start;
458
459     /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
460      * changes from -1 to another value, so the race here is okay.
461      */
462     do {
463         seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
464         warp_start = timers_state.vm_clock_warp_start;
465     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
466
467     if (warp_start == -1) {
468         return;
469     }
470
471     seqlock_write_begin(&timers_state.vm_clock_seqlock);
472     if (runstate_is_running()) {
473         int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
474                                      cpu_get_clock_locked());
475         int64_t warp_delta;
476
477         warp_delta = clock - timers_state.vm_clock_warp_start;
478         if (use_icount == 2) {
479             /*
480              * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
481              * far ahead of real time.
482              */
483             int64_t cur_icount = cpu_get_icount_locked();
484             int64_t delta = clock - cur_icount;
485             warp_delta = MIN(warp_delta, delta);
486         }
487         timers_state.qemu_icount_bias += warp_delta;
488     }
489     timers_state.vm_clock_warp_start = -1;
490     seqlock_write_end(&timers_state.vm_clock_seqlock);
491
492     if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
493         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
494     }
495 }
496
497 static void icount_timer_cb(void *opaque)
498 {
499     /* No need for a checkpoint because the timer already synchronizes
500      * with CHECKPOINT_CLOCK_VIRTUAL_RT.
501      */
502     icount_warp_rt();
503 }
504
505 void qtest_clock_warp(int64_t dest)
506 {
507     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
508     AioContext *aio_context;
509     assert(qtest_enabled());
510     aio_context = qemu_get_aio_context();
511     while (clock < dest) {
512         int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
513         int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
514
515         seqlock_write_begin(&timers_state.vm_clock_seqlock);
516         timers_state.qemu_icount_bias += warp;
517         seqlock_write_end(&timers_state.vm_clock_seqlock);
518
519         qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
520         timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
521         clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
522     }
523     qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
524 }
525
526 void qemu_start_warp_timer(void)
527 {
528     int64_t clock;
529     int64_t deadline;
530
531     if (!use_icount) {
532         return;
533     }
534
535     /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
536      * do not fire, so computing the deadline does not make sense.
537      */
538     if (!runstate_is_running()) {
539         return;
540     }
541
542     /* warp clock deterministically in record/replay mode */
543     if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
544         return;
545     }
546
547     if (!all_cpu_threads_idle()) {
548         return;
549     }
550
551     if (qtest_enabled()) {
552         /* When testing, qtest commands advance icount.  */
553         return;
554     }
555
556     /* We want to use the earliest deadline from ALL vm_clocks */
557     clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
558     deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
559     if (deadline < 0) {
560         static bool notified;
561         if (!icount_sleep && !notified) {
562             warn_report("icount sleep disabled and no active timers");
563             notified = true;
564         }
565         return;
566     }
567
568     if (deadline > 0) {
569         /*
570          * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
571          * sleep.  Otherwise, the CPU might be waiting for a future timer
572          * interrupt to wake it up, but the interrupt never comes because
573          * the vCPU isn't running any insns and thus doesn't advance the
574          * QEMU_CLOCK_VIRTUAL.
575          */
576         if (!icount_sleep) {
577             /*
578              * We never let VCPUs sleep in no sleep icount mode.
579              * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
580              * to the next QEMU_CLOCK_VIRTUAL event and notify it.
581              * It is useful when we want a deterministic execution time,
582              * isolated from host latencies.
583              */
584             seqlock_write_begin(&timers_state.vm_clock_seqlock);
585             timers_state.qemu_icount_bias += deadline;
586             seqlock_write_end(&timers_state.vm_clock_seqlock);
587             qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
588         } else {
589             /*
590              * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
591              * "real" time, (related to the time left until the next event) has
592              * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
593              * This avoids that the warps are visible externally; for example,
594              * you will not be sending network packets continuously instead of
595              * every 100ms.
596              */
597             seqlock_write_begin(&timers_state.vm_clock_seqlock);
598             if (timers_state.vm_clock_warp_start == -1
599                 || timers_state.vm_clock_warp_start > clock) {
600                 timers_state.vm_clock_warp_start = clock;
601             }
602             seqlock_write_end(&timers_state.vm_clock_seqlock);
603             timer_mod_anticipate(timers_state.icount_warp_timer,
604                                  clock + deadline);
605         }
606     } else if (deadline == 0) {
607         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
608     }
609 }
610
611 static void qemu_account_warp_timer(void)
612 {
613     if (!use_icount || !icount_sleep) {
614         return;
615     }
616
617     /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
618      * do not fire, so computing the deadline does not make sense.
619      */
620     if (!runstate_is_running()) {
621         return;
622     }
623
624     /* warp clock deterministically in record/replay mode */
625     if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
626         return;
627     }
628
629     timer_del(timers_state.icount_warp_timer);
630     icount_warp_rt();
631 }
632
633 static bool icount_state_needed(void *opaque)
634 {
635     return use_icount;
636 }
637
638 static bool warp_timer_state_needed(void *opaque)
639 {
640     TimersState *s = opaque;
641     return s->icount_warp_timer != NULL;
642 }
643
644 static bool adjust_timers_state_needed(void *opaque)
645 {
646     TimersState *s = opaque;
647     return s->icount_rt_timer != NULL;
648 }
649
650 /*
651  * Subsection for warp timer migration is optional, because may not be created
652  */
653 static const VMStateDescription icount_vmstate_warp_timer = {
654     .name = "timer/icount/warp_timer",
655     .version_id = 1,
656     .minimum_version_id = 1,
657     .needed = warp_timer_state_needed,
658     .fields = (VMStateField[]) {
659         VMSTATE_INT64(vm_clock_warp_start, TimersState),
660         VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
661         VMSTATE_END_OF_LIST()
662     }
663 };
664
665 static const VMStateDescription icount_vmstate_adjust_timers = {
666     .name = "timer/icount/timers",
667     .version_id = 1,
668     .minimum_version_id = 1,
669     .needed = adjust_timers_state_needed,
670     .fields = (VMStateField[]) {
671         VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
672         VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
673         VMSTATE_END_OF_LIST()
674     }
675 };
676
677 /*
678  * This is a subsection for icount migration.
679  */
680 static const VMStateDescription icount_vmstate_timers = {
681     .name = "timer/icount",
682     .version_id = 1,
683     .minimum_version_id = 1,
684     .needed = icount_state_needed,
685     .fields = (VMStateField[]) {
686         VMSTATE_INT64(qemu_icount_bias, TimersState),
687         VMSTATE_INT64(qemu_icount, TimersState),
688         VMSTATE_END_OF_LIST()
689     },
690     .subsections = (const VMStateDescription*[]) {
691         &icount_vmstate_warp_timer,
692         &icount_vmstate_adjust_timers,
693         NULL
694     }
695 };
696
697 static const VMStateDescription vmstate_timers = {
698     .name = "timer",
699     .version_id = 2,
700     .minimum_version_id = 1,
701     .fields = (VMStateField[]) {
702         VMSTATE_INT64(cpu_ticks_offset, TimersState),
703         VMSTATE_INT64(dummy, TimersState),
704         VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
705         VMSTATE_END_OF_LIST()
706     },
707     .subsections = (const VMStateDescription*[]) {
708         &icount_vmstate_timers,
709         NULL
710     }
711 };
712
713 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
714 {
715     double pct;
716     double throttle_ratio;
717     long sleeptime_ns;
718
719     if (!cpu_throttle_get_percentage()) {
720         return;
721     }
722
723     pct = (double)cpu_throttle_get_percentage()/100;
724     throttle_ratio = pct / (1 - pct);
725     sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
726
727     qemu_mutex_unlock_iothread();
728     g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
729     qemu_mutex_lock_iothread();
730     atomic_set(&cpu->throttle_thread_scheduled, 0);
731 }
732
733 static void cpu_throttle_timer_tick(void *opaque)
734 {
735     CPUState *cpu;
736     double pct;
737
738     /* Stop the timer if needed */
739     if (!cpu_throttle_get_percentage()) {
740         return;
741     }
742     CPU_FOREACH(cpu) {
743         if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
744             async_run_on_cpu(cpu, cpu_throttle_thread,
745                              RUN_ON_CPU_NULL);
746         }
747     }
748
749     pct = (double)cpu_throttle_get_percentage()/100;
750     timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
751                                    CPU_THROTTLE_TIMESLICE_NS / (1-pct));
752 }
753
754 void cpu_throttle_set(int new_throttle_pct)
755 {
756     /* Ensure throttle percentage is within valid range */
757     new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
758     new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
759
760     atomic_set(&throttle_percentage, new_throttle_pct);
761
762     timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
763                                        CPU_THROTTLE_TIMESLICE_NS);
764 }
765
766 void cpu_throttle_stop(void)
767 {
768     atomic_set(&throttle_percentage, 0);
769 }
770
771 bool cpu_throttle_active(void)
772 {
773     return (cpu_throttle_get_percentage() != 0);
774 }
775
776 int cpu_throttle_get_percentage(void)
777 {
778     return atomic_read(&throttle_percentage);
779 }
780
781 void cpu_ticks_init(void)
782 {
783     seqlock_init(&timers_state.vm_clock_seqlock);
784     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
785     throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
786                                            cpu_throttle_timer_tick, NULL);
787 }
788
789 void configure_icount(QemuOpts *opts, Error **errp)
790 {
791     const char *option;
792     char *rem_str = NULL;
793
794     option = qemu_opt_get(opts, "shift");
795     if (!option) {
796         if (qemu_opt_get(opts, "align") != NULL) {
797             error_setg(errp, "Please specify shift option when using align");
798         }
799         return;
800     }
801
802     icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
803     if (icount_sleep) {
804         timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
805                                          icount_timer_cb, NULL);
806     }
807
808     icount_align_option = qemu_opt_get_bool(opts, "align", false);
809
810     if (icount_align_option && !icount_sleep) {
811         error_setg(errp, "align=on and sleep=off are incompatible");
812     }
813     if (strcmp(option, "auto") != 0) {
814         errno = 0;
815         icount_time_shift = strtol(option, &rem_str, 0);
816         if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
817             error_setg(errp, "icount: Invalid shift value");
818         }
819         use_icount = 1;
820         return;
821     } else if (icount_align_option) {
822         error_setg(errp, "shift=auto and align=on are incompatible");
823     } else if (!icount_sleep) {
824         error_setg(errp, "shift=auto and sleep=off are incompatible");
825     }
826
827     use_icount = 2;
828
829     /* 125MIPS seems a reasonable initial guess at the guest speed.
830        It will be corrected fairly quickly anyway.  */
831     icount_time_shift = 3;
832
833     /* Have both realtime and virtual time triggers for speed adjustment.
834        The realtime trigger catches emulated time passing too slowly,
835        the virtual time trigger catches emulated time passing too fast.
836        Realtime triggers occur even when idle, so use them less frequently
837        than VM triggers.  */
838     timers_state.vm_clock_warp_start = -1;
839     timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
840                                    icount_adjust_rt, NULL);
841     timer_mod(timers_state.icount_rt_timer,
842                    qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
843     timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
844                                         icount_adjust_vm, NULL);
845     timer_mod(timers_state.icount_vm_timer,
846                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
847                    NANOSECONDS_PER_SECOND / 10);
848 }
849
850 /***********************************************************/
851 /* TCG vCPU kick timer
852  *
853  * The kick timer is responsible for moving single threaded vCPU
854  * emulation on to the next vCPU. If more than one vCPU is running a
855  * timer event with force a cpu->exit so the next vCPU can get
856  * scheduled.
857  *
858  * The timer is removed if all vCPUs are idle and restarted again once
859  * idleness is complete.
860  */
861
862 static QEMUTimer *tcg_kick_vcpu_timer;
863 static CPUState *tcg_current_rr_cpu;
864
865 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
866
867 static inline int64_t qemu_tcg_next_kick(void)
868 {
869     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
870 }
871
872 /* Kick the currently round-robin scheduled vCPU */
873 static void qemu_cpu_kick_rr_cpu(void)
874 {
875     CPUState *cpu;
876     do {
877         cpu = atomic_mb_read(&tcg_current_rr_cpu);
878         if (cpu) {
879             cpu_exit(cpu);
880         }
881     } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
882 }
883
884 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
885 {
886 }
887
888 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
889 {
890     if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
891         qemu_notify_event();
892         return;
893     }
894
895     if (qemu_in_vcpu_thread()) {
896         /* A CPU is currently running; kick it back out to the
897          * tcg_cpu_exec() loop so it will recalculate its
898          * icount deadline immediately.
899          */
900         qemu_cpu_kick(current_cpu);
901     } else if (first_cpu) {
902         /* qemu_cpu_kick is not enough to kick a halted CPU out of
903          * qemu_tcg_wait_io_event.  async_run_on_cpu, instead,
904          * causes cpu_thread_is_idle to return false.  This way,
905          * handle_icount_deadline can run.
906          * If we have no CPUs at all for some reason, we don't
907          * need to do anything.
908          */
909         async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
910     }
911 }
912
913 static void kick_tcg_thread(void *opaque)
914 {
915     timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
916     qemu_cpu_kick_rr_cpu();
917 }
918
919 static void start_tcg_kick_timer(void)
920 {
921     assert(!mttcg_enabled);
922     if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
923         tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
924                                            kick_tcg_thread, NULL);
925         timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
926     }
927 }
928
929 static void stop_tcg_kick_timer(void)
930 {
931     assert(!mttcg_enabled);
932     if (tcg_kick_vcpu_timer) {
933         timer_del(tcg_kick_vcpu_timer);
934         tcg_kick_vcpu_timer = NULL;
935     }
936 }
937
938 /***********************************************************/
939 void hw_error(const char *fmt, ...)
940 {
941     va_list ap;
942     CPUState *cpu;
943
944     va_start(ap, fmt);
945     fprintf(stderr, "qemu: hardware error: ");
946     vfprintf(stderr, fmt, ap);
947     fprintf(stderr, "\n");
948     CPU_FOREACH(cpu) {
949         fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
950         cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
951     }
952     va_end(ap);
953     abort();
954 }
955
956 void cpu_synchronize_all_states(void)
957 {
958     CPUState *cpu;
959
960     CPU_FOREACH(cpu) {
961         cpu_synchronize_state(cpu);
962         /* TODO: move to cpu_synchronize_state() */
963         if (hvf_enabled()) {
964             hvf_cpu_synchronize_state(cpu);
965         }
966     }
967 }
968
969 void cpu_synchronize_all_post_reset(void)
970 {
971     CPUState *cpu;
972
973     CPU_FOREACH(cpu) {
974         cpu_synchronize_post_reset(cpu);
975         /* TODO: move to cpu_synchronize_post_reset() */
976         if (hvf_enabled()) {
977             hvf_cpu_synchronize_post_reset(cpu);
978         }
979     }
980 }
981
982 void cpu_synchronize_all_post_init(void)
983 {
984     CPUState *cpu;
985
986     CPU_FOREACH(cpu) {
987         cpu_synchronize_post_init(cpu);
988         /* TODO: move to cpu_synchronize_post_init() */
989         if (hvf_enabled()) {
990             hvf_cpu_synchronize_post_init(cpu);
991         }
992     }
993 }
994
995 void cpu_synchronize_all_pre_loadvm(void)
996 {
997     CPUState *cpu;
998
999     CPU_FOREACH(cpu) {
1000         cpu_synchronize_pre_loadvm(cpu);
1001     }
1002 }
1003
1004 static int do_vm_stop(RunState state, bool send_stop)
1005 {
1006     int ret = 0;
1007
1008     if (runstate_is_running()) {
1009         cpu_disable_ticks();
1010         pause_all_vcpus();
1011         runstate_set(state);
1012         vm_state_notify(0, state);
1013         if (send_stop) {
1014             qapi_event_send_stop(&error_abort);
1015         }
1016     }
1017
1018     bdrv_drain_all();
1019     replay_disable_events();
1020     ret = bdrv_flush_all();
1021
1022     return ret;
1023 }
1024
1025 /* Special vm_stop() variant for terminating the process.  Historically clients
1026  * did not expect a QMP STOP event and so we need to retain compatibility.
1027  */
1028 int vm_shutdown(void)
1029 {
1030     return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1031 }
1032
1033 static bool cpu_can_run(CPUState *cpu)
1034 {
1035     if (cpu->stop) {
1036         return false;
1037     }
1038     if (cpu_is_stopped(cpu)) {
1039         return false;
1040     }
1041     return true;
1042 }
1043
1044 static void cpu_handle_guest_debug(CPUState *cpu)
1045 {
1046     gdb_set_stop_cpu(cpu);
1047     qemu_system_debug_request();
1048     cpu->stopped = true;
1049 }
1050
1051 #ifdef CONFIG_LINUX
1052 static void sigbus_reraise(void)
1053 {
1054     sigset_t set;
1055     struct sigaction action;
1056
1057     memset(&action, 0, sizeof(action));
1058     action.sa_handler = SIG_DFL;
1059     if (!sigaction(SIGBUS, &action, NULL)) {
1060         raise(SIGBUS);
1061         sigemptyset(&set);
1062         sigaddset(&set, SIGBUS);
1063         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1064     }
1065     perror("Failed to re-raise SIGBUS!\n");
1066     abort();
1067 }
1068
1069 static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
1070 {
1071     if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1072         sigbus_reraise();
1073     }
1074
1075     if (current_cpu) {
1076         /* Called asynchronously in VCPU thread.  */
1077         if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1078             sigbus_reraise();
1079         }
1080     } else {
1081         /* Called synchronously (via signalfd) in main thread.  */
1082         if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1083             sigbus_reraise();
1084         }
1085     }
1086 }
1087
1088 static void qemu_init_sigbus(void)
1089 {
1090     struct sigaction action;
1091
1092     memset(&action, 0, sizeof(action));
1093     action.sa_flags = SA_SIGINFO;
1094     action.sa_sigaction = sigbus_handler;
1095     sigaction(SIGBUS, &action, NULL);
1096
1097     prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1098 }
1099 #else /* !CONFIG_LINUX */
1100 static void qemu_init_sigbus(void)
1101 {
1102 }
1103 #endif /* !CONFIG_LINUX */
1104
1105 static QemuMutex qemu_global_mutex;
1106
1107 static QemuThread io_thread;
1108
1109 /* cpu creation */
1110 static QemuCond qemu_cpu_cond;
1111 /* system init */
1112 static QemuCond qemu_pause_cond;
1113
1114 void qemu_init_cpu_loop(void)
1115 {
1116     qemu_init_sigbus();
1117     qemu_cond_init(&qemu_cpu_cond);
1118     qemu_cond_init(&qemu_pause_cond);
1119     qemu_mutex_init(&qemu_global_mutex);
1120
1121     qemu_thread_get_self(&io_thread);
1122 }
1123
1124 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
1125 {
1126     do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
1127 }
1128
1129 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1130 {
1131     if (kvm_destroy_vcpu(cpu) < 0) {
1132         error_report("kvm_destroy_vcpu failed");
1133         exit(EXIT_FAILURE);
1134     }
1135 }
1136
1137 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1138 {
1139 }
1140
1141 static void qemu_cpu_stop(CPUState *cpu, bool exit)
1142 {
1143     g_assert(qemu_cpu_is_self(cpu));
1144     cpu->stop = false;
1145     cpu->stopped = true;
1146     if (exit) {
1147         cpu_exit(cpu);
1148     }
1149     qemu_cond_broadcast(&qemu_pause_cond);
1150 }
1151
1152 static void qemu_wait_io_event_common(CPUState *cpu)
1153 {
1154     atomic_mb_set(&cpu->thread_kicked, false);
1155     if (cpu->stop) {
1156         qemu_cpu_stop(cpu, false);
1157     }
1158     process_queued_cpu_work(cpu);
1159 }
1160
1161 static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
1162 {
1163     while (all_cpu_threads_idle()) {
1164         stop_tcg_kick_timer();
1165         qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1166     }
1167
1168     start_tcg_kick_timer();
1169
1170     qemu_wait_io_event_common(cpu);
1171 }
1172
1173 static void qemu_wait_io_event(CPUState *cpu)
1174 {
1175     while (cpu_thread_is_idle(cpu)) {
1176         qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1177     }
1178
1179 #ifdef _WIN32
1180     /* Eat dummy APC queued by qemu_cpu_kick_thread.  */
1181     if (!tcg_enabled()) {
1182         SleepEx(0, TRUE);
1183     }
1184 #endif
1185     qemu_wait_io_event_common(cpu);
1186 }
1187
1188 static void *qemu_kvm_cpu_thread_fn(void *arg)
1189 {
1190     CPUState *cpu = arg;
1191     int r;
1192
1193     rcu_register_thread();
1194
1195     qemu_mutex_lock_iothread();
1196     qemu_thread_get_self(cpu->thread);
1197     cpu->thread_id = qemu_get_thread_id();
1198     cpu->can_do_io = 1;
1199     current_cpu = cpu;
1200
1201     r = kvm_init_vcpu(cpu);
1202     if (r < 0) {
1203         error_report("kvm_init_vcpu failed: %s", strerror(-r));
1204         exit(1);
1205     }
1206
1207     kvm_init_cpu_signals(cpu);
1208
1209     /* signal CPU creation */
1210     cpu->created = true;
1211     qemu_cond_signal(&qemu_cpu_cond);
1212
1213     do {
1214         if (cpu_can_run(cpu)) {
1215             r = kvm_cpu_exec(cpu);
1216             if (r == EXCP_DEBUG) {
1217                 cpu_handle_guest_debug(cpu);
1218             }
1219         }
1220         qemu_wait_io_event(cpu);
1221     } while (!cpu->unplug || cpu_can_run(cpu));
1222
1223     qemu_kvm_destroy_vcpu(cpu);
1224     cpu->created = false;
1225     qemu_cond_signal(&qemu_cpu_cond);
1226     qemu_mutex_unlock_iothread();
1227     rcu_unregister_thread();
1228     return NULL;
1229 }
1230
1231 static void *qemu_dummy_cpu_thread_fn(void *arg)
1232 {
1233 #ifdef _WIN32
1234     error_report("qtest is not supported under Windows");
1235     exit(1);
1236 #else
1237     CPUState *cpu = arg;
1238     sigset_t waitset;
1239     int r;
1240
1241     rcu_register_thread();
1242
1243     qemu_mutex_lock_iothread();
1244     qemu_thread_get_self(cpu->thread);
1245     cpu->thread_id = qemu_get_thread_id();
1246     cpu->can_do_io = 1;
1247     current_cpu = cpu;
1248
1249     sigemptyset(&waitset);
1250     sigaddset(&waitset, SIG_IPI);
1251
1252     /* signal CPU creation */
1253     cpu->created = true;
1254     qemu_cond_signal(&qemu_cpu_cond);
1255
1256     do {
1257         qemu_mutex_unlock_iothread();
1258         do {
1259             int sig;
1260             r = sigwait(&waitset, &sig);
1261         } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1262         if (r == -1) {
1263             perror("sigwait");
1264             exit(1);
1265         }
1266         qemu_mutex_lock_iothread();
1267         qemu_wait_io_event(cpu);
1268     } while (!cpu->unplug);
1269
1270     rcu_unregister_thread();
1271     return NULL;
1272 #endif
1273 }
1274
1275 static int64_t tcg_get_icount_limit(void)
1276 {
1277     int64_t deadline;
1278
1279     if (replay_mode != REPLAY_MODE_PLAY) {
1280         deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1281
1282         /* Maintain prior (possibly buggy) behaviour where if no deadline
1283          * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1284          * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1285          * nanoseconds.
1286          */
1287         if ((deadline < 0) || (deadline > INT32_MAX)) {
1288             deadline = INT32_MAX;
1289         }
1290
1291         return qemu_icount_round(deadline);
1292     } else {
1293         return replay_get_instructions();
1294     }
1295 }
1296
1297 static void handle_icount_deadline(void)
1298 {
1299     assert(qemu_in_vcpu_thread());
1300     if (use_icount) {
1301         int64_t deadline =
1302             qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1303
1304         if (deadline == 0) {
1305             /* Wake up other AioContexts.  */
1306             qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1307             qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
1308         }
1309     }
1310 }
1311
1312 static void prepare_icount_for_run(CPUState *cpu)
1313 {
1314     if (use_icount) {
1315         int insns_left;
1316
1317         /* These should always be cleared by process_icount_data after
1318          * each vCPU execution. However u16.high can be raised
1319          * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1320          */
1321         g_assert(cpu->icount_decr.u16.low == 0);
1322         g_assert(cpu->icount_extra == 0);
1323
1324         cpu->icount_budget = tcg_get_icount_limit();
1325         insns_left = MIN(0xffff, cpu->icount_budget);
1326         cpu->icount_decr.u16.low = insns_left;
1327         cpu->icount_extra = cpu->icount_budget - insns_left;
1328
1329         replay_mutex_lock();
1330     }
1331 }
1332
1333 static void process_icount_data(CPUState *cpu)
1334 {
1335     if (use_icount) {
1336         /* Account for executed instructions */
1337         cpu_update_icount(cpu);
1338
1339         /* Reset the counters */
1340         cpu->icount_decr.u16.low = 0;
1341         cpu->icount_extra = 0;
1342         cpu->icount_budget = 0;
1343
1344         replay_account_executed_instructions();
1345
1346         replay_mutex_unlock();
1347     }
1348 }
1349
1350
1351 static int tcg_cpu_exec(CPUState *cpu)
1352 {
1353     int ret;
1354 #ifdef CONFIG_PROFILER
1355     int64_t ti;
1356 #endif
1357
1358 #ifdef CONFIG_PROFILER
1359     ti = profile_getclock();
1360 #endif
1361     cpu_exec_start(cpu);
1362     ret = cpu_exec(cpu);
1363     cpu_exec_end(cpu);
1364 #ifdef CONFIG_PROFILER
1365     tcg_time += profile_getclock() - ti;
1366 #endif
1367     return ret;
1368 }
1369
1370 /* Destroy any remaining vCPUs which have been unplugged and have
1371  * finished running
1372  */
1373 static void deal_with_unplugged_cpus(void)
1374 {
1375     CPUState *cpu;
1376
1377     CPU_FOREACH(cpu) {
1378         if (cpu->unplug && !cpu_can_run(cpu)) {
1379             qemu_tcg_destroy_vcpu(cpu);
1380             cpu->created = false;
1381             qemu_cond_signal(&qemu_cpu_cond);
1382             break;
1383         }
1384     }
1385 }
1386
1387 /* Single-threaded TCG
1388  *
1389  * In the single-threaded case each vCPU is simulated in turn. If
1390  * there is more than a single vCPU we create a simple timer to kick
1391  * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1392  * This is done explicitly rather than relying on side-effects
1393  * elsewhere.
1394  */
1395
1396 static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
1397 {
1398     CPUState *cpu = arg;
1399
1400     rcu_register_thread();
1401     tcg_register_thread();
1402
1403     qemu_mutex_lock_iothread();
1404     qemu_thread_get_self(cpu->thread);
1405
1406     cpu->thread_id = qemu_get_thread_id();
1407     cpu->created = true;
1408     cpu->can_do_io = 1;
1409     qemu_cond_signal(&qemu_cpu_cond);
1410
1411     /* wait for initial kick-off after machine start */
1412     while (first_cpu->stopped) {
1413         qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1414
1415         /* process any pending work */
1416         CPU_FOREACH(cpu) {
1417             current_cpu = cpu;
1418             qemu_wait_io_event_common(cpu);
1419         }
1420     }
1421
1422     start_tcg_kick_timer();
1423
1424     cpu = first_cpu;
1425
1426     /* process any pending work */
1427     cpu->exit_request = 1;
1428
1429     while (1) {
1430         qemu_mutex_unlock_iothread();
1431         replay_mutex_lock();
1432         qemu_mutex_lock_iothread();
1433         /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
1434         qemu_account_warp_timer();
1435
1436         /* Run the timers here.  This is much more efficient than
1437          * waking up the I/O thread and waiting for completion.
1438          */
1439         handle_icount_deadline();
1440
1441         replay_mutex_unlock();
1442
1443         if (!cpu) {
1444             cpu = first_cpu;
1445         }
1446
1447         while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1448
1449             atomic_mb_set(&tcg_current_rr_cpu, cpu);
1450             current_cpu = cpu;
1451
1452             qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1453                               (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1454
1455             if (cpu_can_run(cpu)) {
1456                 int r;
1457
1458                 qemu_mutex_unlock_iothread();
1459                 prepare_icount_for_run(cpu);
1460
1461                 r = tcg_cpu_exec(cpu);
1462
1463                 process_icount_data(cpu);
1464                 qemu_mutex_lock_iothread();
1465
1466                 if (r == EXCP_DEBUG) {
1467                     cpu_handle_guest_debug(cpu);
1468                     break;
1469                 } else if (r == EXCP_ATOMIC) {
1470                     qemu_mutex_unlock_iothread();
1471                     cpu_exec_step_atomic(cpu);
1472                     qemu_mutex_lock_iothread();
1473                     break;
1474                 }
1475             } else if (cpu->stop) {
1476                 if (cpu->unplug) {
1477                     cpu = CPU_NEXT(cpu);
1478                 }
1479                 break;
1480             }
1481
1482             cpu = CPU_NEXT(cpu);
1483         } /* while (cpu && !cpu->exit_request).. */
1484
1485         /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
1486         atomic_set(&tcg_current_rr_cpu, NULL);
1487
1488         if (cpu && cpu->exit_request) {
1489             atomic_mb_set(&cpu->exit_request, 0);
1490         }
1491
1492         qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
1493         deal_with_unplugged_cpus();
1494     }
1495
1496     rcu_unregister_thread();
1497     return NULL;
1498 }
1499
1500 static void *qemu_hax_cpu_thread_fn(void *arg)
1501 {
1502     CPUState *cpu = arg;
1503     int r;
1504
1505     rcu_register_thread();
1506     qemu_mutex_lock_iothread();
1507     qemu_thread_get_self(cpu->thread);
1508
1509     cpu->thread_id = qemu_get_thread_id();
1510     cpu->created = true;
1511     cpu->halted = 0;
1512     current_cpu = cpu;
1513
1514     hax_init_vcpu(cpu);
1515     qemu_cond_signal(&qemu_cpu_cond);
1516
1517     do {
1518         if (cpu_can_run(cpu)) {
1519             r = hax_smp_cpu_exec(cpu);
1520             if (r == EXCP_DEBUG) {
1521                 cpu_handle_guest_debug(cpu);
1522             }
1523         }
1524
1525         qemu_wait_io_event(cpu);
1526     } while (!cpu->unplug || cpu_can_run(cpu));
1527     rcu_unregister_thread();
1528     return NULL;
1529 }
1530
1531 /* The HVF-specific vCPU thread function. This one should only run when the host
1532  * CPU supports the VMX "unrestricted guest" feature. */
1533 static void *qemu_hvf_cpu_thread_fn(void *arg)
1534 {
1535     CPUState *cpu = arg;
1536
1537     int r;
1538
1539     assert(hvf_enabled());
1540
1541     rcu_register_thread();
1542
1543     qemu_mutex_lock_iothread();
1544     qemu_thread_get_self(cpu->thread);
1545
1546     cpu->thread_id = qemu_get_thread_id();
1547     cpu->can_do_io = 1;
1548     current_cpu = cpu;
1549
1550     hvf_init_vcpu(cpu);
1551
1552     /* signal CPU creation */
1553     cpu->created = true;
1554     qemu_cond_signal(&qemu_cpu_cond);
1555
1556     do {
1557         if (cpu_can_run(cpu)) {
1558             r = hvf_vcpu_exec(cpu);
1559             if (r == EXCP_DEBUG) {
1560                 cpu_handle_guest_debug(cpu);
1561             }
1562         }
1563         qemu_wait_io_event(cpu);
1564     } while (!cpu->unplug || cpu_can_run(cpu));
1565
1566     hvf_vcpu_destroy(cpu);
1567     cpu->created = false;
1568     qemu_cond_signal(&qemu_cpu_cond);
1569     qemu_mutex_unlock_iothread();
1570     rcu_unregister_thread();
1571     return NULL;
1572 }
1573
1574 static void *qemu_whpx_cpu_thread_fn(void *arg)
1575 {
1576     CPUState *cpu = arg;
1577     int r;
1578
1579     rcu_register_thread();
1580
1581     qemu_mutex_lock_iothread();
1582     qemu_thread_get_self(cpu->thread);
1583     cpu->thread_id = qemu_get_thread_id();
1584     current_cpu = cpu;
1585
1586     r = whpx_init_vcpu(cpu);
1587     if (r < 0) {
1588         fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1589         exit(1);
1590     }
1591
1592     /* signal CPU creation */
1593     cpu->created = true;
1594     qemu_cond_signal(&qemu_cpu_cond);
1595
1596     do {
1597         if (cpu_can_run(cpu)) {
1598             r = whpx_vcpu_exec(cpu);
1599             if (r == EXCP_DEBUG) {
1600                 cpu_handle_guest_debug(cpu);
1601             }
1602         }
1603         while (cpu_thread_is_idle(cpu)) {
1604             qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1605         }
1606         qemu_wait_io_event_common(cpu);
1607     } while (!cpu->unplug || cpu_can_run(cpu));
1608
1609     whpx_destroy_vcpu(cpu);
1610     cpu->created = false;
1611     qemu_cond_signal(&qemu_cpu_cond);
1612     qemu_mutex_unlock_iothread();
1613     rcu_unregister_thread();
1614     return NULL;
1615 }
1616
1617 #ifdef _WIN32
1618 static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1619 {
1620 }
1621 #endif
1622
1623 /* Multi-threaded TCG
1624  *
1625  * In the multi-threaded case each vCPU has its own thread. The TLS
1626  * variable current_cpu can be used deep in the code to find the
1627  * current CPUState for a given thread.
1628  */
1629
1630 static void *qemu_tcg_cpu_thread_fn(void *arg)
1631 {
1632     CPUState *cpu = arg;
1633
1634     g_assert(!use_icount);
1635
1636     rcu_register_thread();
1637     tcg_register_thread();
1638
1639     qemu_mutex_lock_iothread();
1640     qemu_thread_get_self(cpu->thread);
1641
1642     cpu->thread_id = qemu_get_thread_id();
1643     cpu->created = true;
1644     cpu->can_do_io = 1;
1645     current_cpu = cpu;
1646     qemu_cond_signal(&qemu_cpu_cond);
1647
1648     /* process any pending work */
1649     cpu->exit_request = 1;
1650
1651     do {
1652         if (cpu_can_run(cpu)) {
1653             int r;
1654             qemu_mutex_unlock_iothread();
1655             r = tcg_cpu_exec(cpu);
1656             qemu_mutex_lock_iothread();
1657             switch (r) {
1658             case EXCP_DEBUG:
1659                 cpu_handle_guest_debug(cpu);
1660                 break;
1661             case EXCP_HALTED:
1662                 /* during start-up the vCPU is reset and the thread is
1663                  * kicked several times. If we don't ensure we go back
1664                  * to sleep in the halted state we won't cleanly
1665                  * start-up when the vCPU is enabled.
1666                  *
1667                  * cpu->halted should ensure we sleep in wait_io_event
1668                  */
1669                 g_assert(cpu->halted);
1670                 break;
1671             case EXCP_ATOMIC:
1672                 qemu_mutex_unlock_iothread();
1673                 cpu_exec_step_atomic(cpu);
1674                 qemu_mutex_lock_iothread();
1675             default:
1676                 /* Ignore everything else? */
1677                 break;
1678             }
1679         }
1680
1681         atomic_mb_set(&cpu->exit_request, 0);
1682         qemu_wait_io_event(cpu);
1683     } while (!cpu->unplug || cpu_can_run(cpu));
1684
1685     qemu_tcg_destroy_vcpu(cpu);
1686     cpu->created = false;
1687     qemu_cond_signal(&qemu_cpu_cond);
1688     qemu_mutex_unlock_iothread();
1689     rcu_unregister_thread();
1690     return NULL;
1691 }
1692
1693 static void qemu_cpu_kick_thread(CPUState *cpu)
1694 {
1695 #ifndef _WIN32
1696     int err;
1697
1698     if (cpu->thread_kicked) {
1699         return;
1700     }
1701     cpu->thread_kicked = true;
1702     err = pthread_kill(cpu->thread->thread, SIG_IPI);
1703     if (err) {
1704         fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1705         exit(1);
1706     }
1707 #else /* _WIN32 */
1708     if (!qemu_cpu_is_self(cpu)) {
1709         if (whpx_enabled()) {
1710             whpx_vcpu_kick(cpu);
1711         } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
1712             fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1713                     __func__, GetLastError());
1714             exit(1);
1715         }
1716     }
1717 #endif
1718 }
1719
1720 void qemu_cpu_kick(CPUState *cpu)
1721 {
1722     qemu_cond_broadcast(cpu->halt_cond);
1723     if (tcg_enabled()) {
1724         cpu_exit(cpu);
1725         /* NOP unless doing single-thread RR */
1726         qemu_cpu_kick_rr_cpu();
1727     } else {
1728         if (hax_enabled()) {
1729             /*
1730              * FIXME: race condition with the exit_request check in
1731              * hax_vcpu_hax_exec
1732              */
1733             cpu->exit_request = 1;
1734         }
1735         qemu_cpu_kick_thread(cpu);
1736     }
1737 }
1738
1739 void qemu_cpu_kick_self(void)
1740 {
1741     assert(current_cpu);
1742     qemu_cpu_kick_thread(current_cpu);
1743 }
1744
1745 bool qemu_cpu_is_self(CPUState *cpu)
1746 {
1747     return qemu_thread_is_self(cpu->thread);
1748 }
1749
1750 bool qemu_in_vcpu_thread(void)
1751 {
1752     return current_cpu && qemu_cpu_is_self(current_cpu);
1753 }
1754
1755 static __thread bool iothread_locked = false;
1756
1757 bool qemu_mutex_iothread_locked(void)
1758 {
1759     return iothread_locked;
1760 }
1761
1762 void qemu_mutex_lock_iothread(void)
1763 {
1764     g_assert(!qemu_mutex_iothread_locked());
1765     qemu_mutex_lock(&qemu_global_mutex);
1766     iothread_locked = true;
1767 }
1768
1769 void qemu_mutex_unlock_iothread(void)
1770 {
1771     g_assert(qemu_mutex_iothread_locked());
1772     iothread_locked = false;
1773     qemu_mutex_unlock(&qemu_global_mutex);
1774 }
1775
1776 static bool all_vcpus_paused(void)
1777 {
1778     CPUState *cpu;
1779
1780     CPU_FOREACH(cpu) {
1781         if (!cpu->stopped) {
1782             return false;
1783         }
1784     }
1785
1786     return true;
1787 }
1788
1789 void pause_all_vcpus(void)
1790 {
1791     CPUState *cpu;
1792
1793     qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1794     CPU_FOREACH(cpu) {
1795         if (qemu_cpu_is_self(cpu)) {
1796             qemu_cpu_stop(cpu, true);
1797         } else {
1798             cpu->stop = true;
1799             qemu_cpu_kick(cpu);
1800         }
1801     }
1802
1803     /* We need to drop the replay_lock so any vCPU threads woken up
1804      * can finish their replay tasks
1805      */
1806     replay_mutex_unlock();
1807
1808     while (!all_vcpus_paused()) {
1809         qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1810         CPU_FOREACH(cpu) {
1811             qemu_cpu_kick(cpu);
1812         }
1813     }
1814
1815     qemu_mutex_unlock_iothread();
1816     replay_mutex_lock();
1817     qemu_mutex_lock_iothread();
1818 }
1819
1820 void cpu_resume(CPUState *cpu)
1821 {
1822     cpu->stop = false;
1823     cpu->stopped = false;
1824     qemu_cpu_kick(cpu);
1825 }
1826
1827 void resume_all_vcpus(void)
1828 {
1829     CPUState *cpu;
1830
1831     qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1832     CPU_FOREACH(cpu) {
1833         cpu_resume(cpu);
1834     }
1835 }
1836
1837 void cpu_remove_sync(CPUState *cpu)
1838 {
1839     cpu->stop = true;
1840     cpu->unplug = true;
1841     qemu_cpu_kick(cpu);
1842     qemu_mutex_unlock_iothread();
1843     qemu_thread_join(cpu->thread);
1844     qemu_mutex_lock_iothread();
1845 }
1846
1847 /* For temporary buffers for forming a name */
1848 #define VCPU_THREAD_NAME_SIZE 16
1849
1850 static void qemu_tcg_init_vcpu(CPUState *cpu)
1851 {
1852     char thread_name[VCPU_THREAD_NAME_SIZE];
1853     static QemuCond *single_tcg_halt_cond;
1854     static QemuThread *single_tcg_cpu_thread;
1855     static int tcg_region_inited;
1856
1857     /*
1858      * Initialize TCG regions--once. Now is a good time, because:
1859      * (1) TCG's init context, prologue and target globals have been set up.
1860      * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1861      *     -accel flag is processed, so the check doesn't work then).
1862      */
1863     if (!tcg_region_inited) {
1864         tcg_region_inited = 1;
1865         tcg_region_init();
1866     }
1867
1868     if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
1869         cpu->thread = g_malloc0(sizeof(QemuThread));
1870         cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1871         qemu_cond_init(cpu->halt_cond);
1872
1873         if (qemu_tcg_mttcg_enabled()) {
1874             /* create a thread per vCPU with TCG (MTTCG) */
1875             parallel_cpus = true;
1876             snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1877                  cpu->cpu_index);
1878
1879             qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1880                                cpu, QEMU_THREAD_JOINABLE);
1881
1882         } else {
1883             /* share a single thread for all cpus with TCG */
1884             snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1885             qemu_thread_create(cpu->thread, thread_name,
1886                                qemu_tcg_rr_cpu_thread_fn,
1887                                cpu, QEMU_THREAD_JOINABLE);
1888
1889             single_tcg_halt_cond = cpu->halt_cond;
1890             single_tcg_cpu_thread = cpu->thread;
1891         }
1892 #ifdef _WIN32
1893         cpu->hThread = qemu_thread_get_handle(cpu->thread);
1894 #endif
1895     } else {
1896         /* For non-MTTCG cases we share the thread */
1897         cpu->thread = single_tcg_cpu_thread;
1898         cpu->halt_cond = single_tcg_halt_cond;
1899         cpu->thread_id = first_cpu->thread_id;
1900         cpu->can_do_io = 1;
1901         cpu->created = true;
1902     }
1903 }
1904
1905 static void qemu_hax_start_vcpu(CPUState *cpu)
1906 {
1907     char thread_name[VCPU_THREAD_NAME_SIZE];
1908
1909     cpu->thread = g_malloc0(sizeof(QemuThread));
1910     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1911     qemu_cond_init(cpu->halt_cond);
1912
1913     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1914              cpu->cpu_index);
1915     qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1916                        cpu, QEMU_THREAD_JOINABLE);
1917 #ifdef _WIN32
1918     cpu->hThread = qemu_thread_get_handle(cpu->thread);
1919 #endif
1920 }
1921
1922 static void qemu_kvm_start_vcpu(CPUState *cpu)
1923 {
1924     char thread_name[VCPU_THREAD_NAME_SIZE];
1925
1926     cpu->thread = g_malloc0(sizeof(QemuThread));
1927     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1928     qemu_cond_init(cpu->halt_cond);
1929     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1930              cpu->cpu_index);
1931     qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1932                        cpu, QEMU_THREAD_JOINABLE);
1933 }
1934
1935 static void qemu_hvf_start_vcpu(CPUState *cpu)
1936 {
1937     char thread_name[VCPU_THREAD_NAME_SIZE];
1938
1939     /* HVF currently does not support TCG, and only runs in
1940      * unrestricted-guest mode. */
1941     assert(hvf_enabled());
1942
1943     cpu->thread = g_malloc0(sizeof(QemuThread));
1944     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1945     qemu_cond_init(cpu->halt_cond);
1946
1947     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
1948              cpu->cpu_index);
1949     qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
1950                        cpu, QEMU_THREAD_JOINABLE);
1951 }
1952
1953 static void qemu_whpx_start_vcpu(CPUState *cpu)
1954 {
1955     char thread_name[VCPU_THREAD_NAME_SIZE];
1956
1957     cpu->thread = g_malloc0(sizeof(QemuThread));
1958     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1959     qemu_cond_init(cpu->halt_cond);
1960     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
1961              cpu->cpu_index);
1962     qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
1963                        cpu, QEMU_THREAD_JOINABLE);
1964 #ifdef _WIN32
1965     cpu->hThread = qemu_thread_get_handle(cpu->thread);
1966 #endif
1967 }
1968
1969 static void qemu_dummy_start_vcpu(CPUState *cpu)
1970 {
1971     char thread_name[VCPU_THREAD_NAME_SIZE];
1972
1973     cpu->thread = g_malloc0(sizeof(QemuThread));
1974     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1975     qemu_cond_init(cpu->halt_cond);
1976     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1977              cpu->cpu_index);
1978     qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1979                        QEMU_THREAD_JOINABLE);
1980 }
1981
1982 void qemu_init_vcpu(CPUState *cpu)
1983 {
1984     cpu->nr_cores = smp_cores;
1985     cpu->nr_threads = smp_threads;
1986     cpu->stopped = true;
1987
1988     if (!cpu->as) {
1989         /* If the target cpu hasn't set up any address spaces itself,
1990          * give it the default one.
1991          */
1992         cpu->num_ases = 1;
1993         cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
1994     }
1995
1996     if (kvm_enabled()) {
1997         qemu_kvm_start_vcpu(cpu);
1998     } else if (hax_enabled()) {
1999         qemu_hax_start_vcpu(cpu);
2000     } else if (hvf_enabled()) {
2001         qemu_hvf_start_vcpu(cpu);
2002     } else if (tcg_enabled()) {
2003         qemu_tcg_init_vcpu(cpu);
2004     } else if (whpx_enabled()) {
2005         qemu_whpx_start_vcpu(cpu);
2006     } else {
2007         qemu_dummy_start_vcpu(cpu);
2008     }
2009
2010     while (!cpu->created) {
2011         qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2012     }
2013 }
2014
2015 void cpu_stop_current(void)
2016 {
2017     if (current_cpu) {
2018         qemu_cpu_stop(current_cpu, true);
2019     }
2020 }
2021
2022 int vm_stop(RunState state)
2023 {
2024     if (qemu_in_vcpu_thread()) {
2025         qemu_system_vmstop_request_prepare();
2026         qemu_system_vmstop_request(state);
2027         /*
2028          * FIXME: should not return to device code in case
2029          * vm_stop() has been requested.
2030          */
2031         cpu_stop_current();
2032         return 0;
2033     }
2034
2035     return do_vm_stop(state, true);
2036 }
2037
2038 /**
2039  * Prepare for (re)starting the VM.
2040  * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2041  * running or in case of an error condition), 0 otherwise.
2042  */
2043 int vm_prepare_start(void)
2044 {
2045     RunState requested;
2046
2047     qemu_vmstop_requested(&requested);
2048     if (runstate_is_running() && requested == RUN_STATE__MAX) {
2049         return -1;
2050     }
2051
2052     /* Ensure that a STOP/RESUME pair of events is emitted if a
2053      * vmstop request was pending.  The BLOCK_IO_ERROR event, for
2054      * example, according to documentation is always followed by
2055      * the STOP event.
2056      */
2057     if (runstate_is_running()) {
2058         qapi_event_send_stop(&error_abort);
2059         qapi_event_send_resume(&error_abort);
2060         return -1;
2061     }
2062
2063     /* We are sending this now, but the CPUs will be resumed shortly later */
2064     qapi_event_send_resume(&error_abort);
2065
2066     replay_enable_events();
2067     cpu_enable_ticks();
2068     runstate_set(RUN_STATE_RUNNING);
2069     vm_state_notify(1, RUN_STATE_RUNNING);
2070     return 0;
2071 }
2072
2073 void vm_start(void)
2074 {
2075     if (!vm_prepare_start()) {
2076         resume_all_vcpus();
2077     }
2078 }
2079
2080 /* does a state transition even if the VM is already stopped,
2081    current state is forgotten forever */
2082 int vm_stop_force_state(RunState state)
2083 {
2084     if (runstate_is_running()) {
2085         return vm_stop(state);
2086     } else {
2087         runstate_set(state);
2088
2089         bdrv_drain_all();
2090         /* Make sure to return an error if the flush in a previous vm_stop()
2091          * failed. */
2092         return bdrv_flush_all();
2093     }
2094 }
2095
2096 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
2097 {
2098     /* XXX: implement xxx_cpu_list for targets that still miss it */
2099 #if defined(cpu_list)
2100     cpu_list(f, cpu_fprintf);
2101 #endif
2102 }
2103
2104 CpuInfoList *qmp_query_cpus(Error **errp)
2105 {
2106     MachineState *ms = MACHINE(qdev_get_machine());
2107     MachineClass *mc = MACHINE_GET_CLASS(ms);
2108     CpuInfoList *head = NULL, *cur_item = NULL;
2109     CPUState *cpu;
2110
2111     CPU_FOREACH(cpu) {
2112         CpuInfoList *info;
2113 #if defined(TARGET_I386)
2114         X86CPU *x86_cpu = X86_CPU(cpu);
2115         CPUX86State *env = &x86_cpu->env;
2116 #elif defined(TARGET_PPC)
2117         PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2118         CPUPPCState *env = &ppc_cpu->env;
2119 #elif defined(TARGET_SPARC)
2120         SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2121         CPUSPARCState *env = &sparc_cpu->env;
2122 #elif defined(TARGET_RISCV)
2123         RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2124         CPURISCVState *env = &riscv_cpu->env;
2125 #elif defined(TARGET_MIPS)
2126         MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2127         CPUMIPSState *env = &mips_cpu->env;
2128 #elif defined(TARGET_TRICORE)
2129         TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2130         CPUTriCoreState *env = &tricore_cpu->env;
2131 #elif defined(TARGET_S390X)
2132         S390CPU *s390_cpu = S390_CPU(cpu);
2133         CPUS390XState *env = &s390_cpu->env;
2134 #endif
2135
2136         cpu_synchronize_state(cpu);
2137
2138         info = g_malloc0(sizeof(*info));
2139         info->value = g_malloc0(sizeof(*info->value));
2140         info->value->CPU = cpu->cpu_index;
2141         info->value->current = (cpu == first_cpu);
2142         info->value->halted = cpu->halted;
2143         info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2144         info->value->thread_id = cpu->thread_id;
2145 #if defined(TARGET_I386)
2146         info->value->arch = CPU_INFO_ARCH_X86;
2147         info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
2148 #elif defined(TARGET_PPC)
2149         info->value->arch = CPU_INFO_ARCH_PPC;
2150         info->value->u.ppc.nip = env->nip;
2151 #elif defined(TARGET_SPARC)
2152         info->value->arch = CPU_INFO_ARCH_SPARC;
2153         info->value->u.q_sparc.pc = env->pc;
2154         info->value->u.q_sparc.npc = env->npc;
2155 #elif defined(TARGET_MIPS)
2156         info->value->arch = CPU_INFO_ARCH_MIPS;
2157         info->value->u.q_mips.PC = env->active_tc.PC;
2158 #elif defined(TARGET_TRICORE)
2159         info->value->arch = CPU_INFO_ARCH_TRICORE;
2160         info->value->u.tricore.PC = env->PC;
2161 #elif defined(TARGET_S390X)
2162         info->value->arch = CPU_INFO_ARCH_S390;
2163         info->value->u.s390.cpu_state = env->cpu_state;
2164 #elif defined(TARGET_RISCV)
2165         info->value->arch = CPU_INFO_ARCH_RISCV;
2166         info->value->u.riscv.pc = env->pc;
2167 #else
2168         info->value->arch = CPU_INFO_ARCH_OTHER;
2169 #endif
2170         info->value->has_props = !!mc->cpu_index_to_instance_props;
2171         if (info->value->has_props) {
2172             CpuInstanceProperties *props;
2173             props = g_malloc0(sizeof(*props));
2174             *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2175             info->value->props = props;
2176         }
2177
2178         /* XXX: waiting for the qapi to support GSList */
2179         if (!cur_item) {
2180             head = cur_item = info;
2181         } else {
2182             cur_item->next = info;
2183             cur_item = info;
2184         }
2185     }
2186
2187     return head;
2188 }
2189
2190 static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2191 {
2192     /*
2193      * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2194      * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2195      */
2196     switch (target) {
2197     case SYS_EMU_TARGET_I386:
2198     case SYS_EMU_TARGET_X86_64:
2199         return CPU_INFO_ARCH_X86;
2200
2201     case SYS_EMU_TARGET_PPC:
2202     case SYS_EMU_TARGET_PPCEMB:
2203     case SYS_EMU_TARGET_PPC64:
2204         return CPU_INFO_ARCH_PPC;
2205
2206     case SYS_EMU_TARGET_SPARC:
2207     case SYS_EMU_TARGET_SPARC64:
2208         return CPU_INFO_ARCH_SPARC;
2209
2210     case SYS_EMU_TARGET_MIPS:
2211     case SYS_EMU_TARGET_MIPSEL:
2212     case SYS_EMU_TARGET_MIPS64:
2213     case SYS_EMU_TARGET_MIPS64EL:
2214         return CPU_INFO_ARCH_MIPS;
2215
2216     case SYS_EMU_TARGET_TRICORE:
2217         return CPU_INFO_ARCH_TRICORE;
2218
2219     case SYS_EMU_TARGET_S390X:
2220         return CPU_INFO_ARCH_S390;
2221
2222     case SYS_EMU_TARGET_RISCV32:
2223     case SYS_EMU_TARGET_RISCV64:
2224         return CPU_INFO_ARCH_RISCV;
2225
2226     default:
2227         return CPU_INFO_ARCH_OTHER;
2228     }
2229 }
2230
2231 static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2232 {
2233 #ifdef TARGET_S390X
2234     S390CPU *s390_cpu = S390_CPU(cpu);
2235     CPUS390XState *env = &s390_cpu->env;
2236
2237     info->cpu_state = env->cpu_state;
2238 #else
2239     abort();
2240 #endif
2241 }
2242
2243 /*
2244  * fast means: we NEVER interrupt vCPU threads to retrieve
2245  * information from KVM.
2246  */
2247 CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2248 {
2249     MachineState *ms = MACHINE(qdev_get_machine());
2250     MachineClass *mc = MACHINE_GET_CLASS(ms);
2251     CpuInfoFastList *head = NULL, *cur_item = NULL;
2252     SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2253                                           -1, &error_abort);
2254     CPUState *cpu;
2255
2256     CPU_FOREACH(cpu) {
2257         CpuInfoFastList *info = g_malloc0(sizeof(*info));
2258         info->value = g_malloc0(sizeof(*info->value));
2259
2260         info->value->cpu_index = cpu->cpu_index;
2261         info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2262         info->value->thread_id = cpu->thread_id;
2263
2264         info->value->has_props = !!mc->cpu_index_to_instance_props;
2265         if (info->value->has_props) {
2266             CpuInstanceProperties *props;
2267             props = g_malloc0(sizeof(*props));
2268             *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2269             info->value->props = props;
2270         }
2271
2272         info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2273         info->value->target = target;
2274         if (target == SYS_EMU_TARGET_S390X) {
2275             cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
2276         } else {
2277             /* do nothing for @CpuInfoOther */
2278         }
2279
2280         if (!cur_item) {
2281             head = cur_item = info;
2282         } else {
2283             cur_item->next = info;
2284             cur_item = info;
2285         }
2286     }
2287
2288     return head;
2289 }
2290
2291 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2292                  bool has_cpu, int64_t cpu_index, Error **errp)
2293 {
2294     FILE *f;
2295     uint32_t l;
2296     CPUState *cpu;
2297     uint8_t buf[1024];
2298     int64_t orig_addr = addr, orig_size = size;
2299
2300     if (!has_cpu) {
2301         cpu_index = 0;
2302     }
2303
2304     cpu = qemu_get_cpu(cpu_index);
2305     if (cpu == NULL) {
2306         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2307                    "a CPU number");
2308         return;
2309     }
2310
2311     f = fopen(filename, "wb");
2312     if (!f) {
2313         error_setg_file_open(errp, errno, filename);
2314         return;
2315     }
2316
2317     while (size != 0) {
2318         l = sizeof(buf);
2319         if (l > size)
2320             l = size;
2321         if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
2322             error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2323                              " specified", orig_addr, orig_size);
2324             goto exit;
2325         }
2326         if (fwrite(buf, 1, l, f) != l) {
2327             error_setg(errp, QERR_IO_ERROR);
2328             goto exit;
2329         }
2330         addr += l;
2331         size -= l;
2332     }
2333
2334 exit:
2335     fclose(f);
2336 }
2337
2338 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2339                   Error **errp)
2340 {
2341     FILE *f;
2342     uint32_t l;
2343     uint8_t buf[1024];
2344
2345     f = fopen(filename, "wb");
2346     if (!f) {
2347         error_setg_file_open(errp, errno, filename);
2348         return;
2349     }
2350
2351     while (size != 0) {
2352         l = sizeof(buf);
2353         if (l > size)
2354             l = size;
2355         cpu_physical_memory_read(addr, buf, l);
2356         if (fwrite(buf, 1, l, f) != l) {
2357             error_setg(errp, QERR_IO_ERROR);
2358             goto exit;
2359         }
2360         addr += l;
2361         size -= l;
2362     }
2363
2364 exit:
2365     fclose(f);
2366 }
2367
2368 void qmp_inject_nmi(Error **errp)
2369 {
2370     nmi_monitor_handle(monitor_get_cpu_index(), errp);
2371 }
2372
2373 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2374 {
2375     if (!use_icount) {
2376         return;
2377     }
2378
2379     cpu_fprintf(f, "Host - Guest clock  %"PRIi64" ms\n",
2380                 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2381     if (icount_align_option) {
2382         cpu_fprintf(f, "Max guest delay     %"PRIi64" ms\n", -max_delay/SCALE_MS);
2383         cpu_fprintf(f, "Max guest advance   %"PRIi64" ms\n", max_advance/SCALE_MS);
2384     } else {
2385         cpu_fprintf(f, "Max guest delay     NA\n");
2386         cpu_fprintf(f, "Max guest advance   NA\n");
2387     }
2388 }
This page took 0.152709 seconds and 4 git commands to generate.