]> Git Repo - qemu.git/blob - cpus.c
target/m68k: implement fatanh
[qemu.git] / cpus.c
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24
25 #include "qemu/osdep.h"
26 #include "qemu/config-file.h"
27 #include "cpu.h"
28 #include "monitor/monitor.h"
29 #include "qapi/error.h"
30 #include "qapi/qapi-commands-misc.h"
31 #include "qapi/qapi-events-run-state.h"
32 #include "qapi/qmp/qerror.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/block-backend.h"
36 #include "exec/gdbstub.h"
37 #include "sysemu/dma.h"
38 #include "sysemu/hw_accel.h"
39 #include "sysemu/kvm.h"
40 #include "sysemu/hax.h"
41 #include "sysemu/hvf.h"
42 #include "sysemu/whpx.h"
43 #include "exec/exec-all.h"
44
45 #include "qemu/thread.h"
46 #include "sysemu/cpus.h"
47 #include "sysemu/qtest.h"
48 #include "qemu/main-loop.h"
49 #include "qemu/option.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/seqlock.h"
52 #include "tcg.h"
53 #include "hw/nmi.h"
54 #include "sysemu/replay.h"
55 #include "hw/boards.h"
56
57 #ifdef CONFIG_LINUX
58
59 #include <sys/prctl.h>
60
61 #ifndef PR_MCE_KILL
62 #define PR_MCE_KILL 33
63 #endif
64
65 #ifndef PR_MCE_KILL_SET
66 #define PR_MCE_KILL_SET 1
67 #endif
68
69 #ifndef PR_MCE_KILL_EARLY
70 #define PR_MCE_KILL_EARLY 1
71 #endif
72
73 #endif /* CONFIG_LINUX */
74
75 int64_t max_delay;
76 int64_t max_advance;
77
78 /* vcpu throttling controls */
79 static QEMUTimer *throttle_timer;
80 static unsigned int throttle_percentage;
81
82 #define CPU_THROTTLE_PCT_MIN 1
83 #define CPU_THROTTLE_PCT_MAX 99
84 #define CPU_THROTTLE_TIMESLICE_NS 10000000
85
86 bool cpu_is_stopped(CPUState *cpu)
87 {
88     return cpu->stopped || !runstate_is_running();
89 }
90
91 static bool cpu_thread_is_idle(CPUState *cpu)
92 {
93     if (cpu->stop || cpu->queued_work_first) {
94         return false;
95     }
96     if (cpu_is_stopped(cpu)) {
97         return true;
98     }
99     if (!cpu->halted || cpu_has_work(cpu) ||
100         kvm_halt_in_kernel()) {
101         return false;
102     }
103     return true;
104 }
105
106 static bool all_cpu_threads_idle(void)
107 {
108     CPUState *cpu;
109
110     CPU_FOREACH(cpu) {
111         if (!cpu_thread_is_idle(cpu)) {
112             return false;
113         }
114     }
115     return true;
116 }
117
118 /***********************************************************/
119 /* guest cycle counter */
120
121 /* Protected by TimersState seqlock */
122
123 static bool icount_sleep = true;
124 /* Conversion factor from emulated instructions to virtual clock ticks.  */
125 static int icount_time_shift;
126 /* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
127 #define MAX_ICOUNT_SHIFT 10
128
129 typedef struct TimersState {
130     /* Protected by BQL.  */
131     int64_t cpu_ticks_prev;
132     int64_t cpu_ticks_offset;
133
134     /* cpu_clock_offset can be read out of BQL, so protect it with
135      * this lock.
136      */
137     QemuSeqLock vm_clock_seqlock;
138     int64_t cpu_clock_offset;
139     int32_t cpu_ticks_enabled;
140     int64_t dummy;
141
142     /* Compensate for varying guest execution speed.  */
143     int64_t qemu_icount_bias;
144     /* Only written by TCG thread */
145     int64_t qemu_icount;
146     /* for adjusting icount */
147     int64_t vm_clock_warp_start;
148     QEMUTimer *icount_rt_timer;
149     QEMUTimer *icount_vm_timer;
150     QEMUTimer *icount_warp_timer;
151 } TimersState;
152
153 static TimersState timers_state;
154 bool mttcg_enabled;
155
156 /*
157  * We default to false if we know other options have been enabled
158  * which are currently incompatible with MTTCG. Otherwise when each
159  * guest (target) has been updated to support:
160  *   - atomic instructions
161  *   - memory ordering primitives (barriers)
162  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
163  *
164  * Once a guest architecture has been converted to the new primitives
165  * there are two remaining limitations to check.
166  *
167  * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
168  * - The host must have a stronger memory order than the guest
169  *
170  * It may be possible in future to support strong guests on weak hosts
171  * but that will require tagging all load/stores in a guest with their
172  * implicit memory order requirements which would likely slow things
173  * down a lot.
174  */
175
176 static bool check_tcg_memory_orders_compatible(void)
177 {
178 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
179     return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
180 #else
181     return false;
182 #endif
183 }
184
185 static bool default_mttcg_enabled(void)
186 {
187     if (use_icount || TCG_OVERSIZED_GUEST) {
188         return false;
189     } else {
190 #ifdef TARGET_SUPPORTS_MTTCG
191         return check_tcg_memory_orders_compatible();
192 #else
193         return false;
194 #endif
195     }
196 }
197
198 void qemu_tcg_configure(QemuOpts *opts, Error **errp)
199 {
200     const char *t = qemu_opt_get(opts, "thread");
201     if (t) {
202         if (strcmp(t, "multi") == 0) {
203             if (TCG_OVERSIZED_GUEST) {
204                 error_setg(errp, "No MTTCG when guest word size > hosts");
205             } else if (use_icount) {
206                 error_setg(errp, "No MTTCG when icount is enabled");
207             } else {
208 #ifndef TARGET_SUPPORTS_MTTCG
209                 error_report("Guest not yet converted to MTTCG - "
210                              "you may get unexpected results");
211 #endif
212                 if (!check_tcg_memory_orders_compatible()) {
213                     error_report("Guest expects a stronger memory ordering "
214                                  "than the host provides");
215                     error_printf("This may cause strange/hard to debug errors\n");
216                 }
217                 mttcg_enabled = true;
218             }
219         } else if (strcmp(t, "single") == 0) {
220             mttcg_enabled = false;
221         } else {
222             error_setg(errp, "Invalid 'thread' setting %s", t);
223         }
224     } else {
225         mttcg_enabled = default_mttcg_enabled();
226     }
227 }
228
229 /* The current number of executed instructions is based on what we
230  * originally budgeted minus the current state of the decrementing
231  * icount counters in extra/u16.low.
232  */
233 static int64_t cpu_get_icount_executed(CPUState *cpu)
234 {
235     return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
236 }
237
238 /*
239  * Update the global shared timer_state.qemu_icount to take into
240  * account executed instructions. This is done by the TCG vCPU
241  * thread so the main-loop can see time has moved forward.
242  */
243 void cpu_update_icount(CPUState *cpu)
244 {
245     int64_t executed = cpu_get_icount_executed(cpu);
246     cpu->icount_budget -= executed;
247
248 #ifdef CONFIG_ATOMIC64
249     atomic_set__nocheck(&timers_state.qemu_icount,
250                         atomic_read__nocheck(&timers_state.qemu_icount) +
251                         executed);
252 #else /* FIXME: we need 64bit atomics to do this safely */
253     timers_state.qemu_icount += executed;
254 #endif
255 }
256
257 int64_t cpu_get_icount_raw(void)
258 {
259     CPUState *cpu = current_cpu;
260
261     if (cpu && cpu->running) {
262         if (!cpu->can_do_io) {
263             error_report("Bad icount read");
264             exit(1);
265         }
266         /* Take into account what has run */
267         cpu_update_icount(cpu);
268     }
269 #ifdef CONFIG_ATOMIC64
270     return atomic_read__nocheck(&timers_state.qemu_icount);
271 #else /* FIXME: we need 64bit atomics to do this safely */
272     return timers_state.qemu_icount;
273 #endif
274 }
275
276 /* Return the virtual CPU time, based on the instruction counter.  */
277 static int64_t cpu_get_icount_locked(void)
278 {
279     int64_t icount = cpu_get_icount_raw();
280     return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
281 }
282
283 int64_t cpu_get_icount(void)
284 {
285     int64_t icount;
286     unsigned start;
287
288     do {
289         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
290         icount = cpu_get_icount_locked();
291     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
292
293     return icount;
294 }
295
296 int64_t cpu_icount_to_ns(int64_t icount)
297 {
298     return icount << icount_time_shift;
299 }
300
301 /* return the time elapsed in VM between vm_start and vm_stop.  Unless
302  * icount is active, cpu_get_ticks() uses units of the host CPU cycle
303  * counter.
304  *
305  * Caller must hold the BQL
306  */
307 int64_t cpu_get_ticks(void)
308 {
309     int64_t ticks;
310
311     if (use_icount) {
312         return cpu_get_icount();
313     }
314
315     ticks = timers_state.cpu_ticks_offset;
316     if (timers_state.cpu_ticks_enabled) {
317         ticks += cpu_get_host_ticks();
318     }
319
320     if (timers_state.cpu_ticks_prev > ticks) {
321         /* Note: non increasing ticks may happen if the host uses
322            software suspend */
323         timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
324         ticks = timers_state.cpu_ticks_prev;
325     }
326
327     timers_state.cpu_ticks_prev = ticks;
328     return ticks;
329 }
330
331 static int64_t cpu_get_clock_locked(void)
332 {
333     int64_t time;
334
335     time = timers_state.cpu_clock_offset;
336     if (timers_state.cpu_ticks_enabled) {
337         time += get_clock();
338     }
339
340     return time;
341 }
342
343 /* Return the monotonic time elapsed in VM, i.e.,
344  * the time between vm_start and vm_stop
345  */
346 int64_t cpu_get_clock(void)
347 {
348     int64_t ti;
349     unsigned start;
350
351     do {
352         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
353         ti = cpu_get_clock_locked();
354     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
355
356     return ti;
357 }
358
359 /* enable cpu_get_ticks()
360  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
361  */
362 void cpu_enable_ticks(void)
363 {
364     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
365     seqlock_write_begin(&timers_state.vm_clock_seqlock);
366     if (!timers_state.cpu_ticks_enabled) {
367         timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
368         timers_state.cpu_clock_offset -= get_clock();
369         timers_state.cpu_ticks_enabled = 1;
370     }
371     seqlock_write_end(&timers_state.vm_clock_seqlock);
372 }
373
374 /* disable cpu_get_ticks() : the clock is stopped. You must not call
375  * cpu_get_ticks() after that.
376  * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
377  */
378 void cpu_disable_ticks(void)
379 {
380     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
381     seqlock_write_begin(&timers_state.vm_clock_seqlock);
382     if (timers_state.cpu_ticks_enabled) {
383         timers_state.cpu_ticks_offset += cpu_get_host_ticks();
384         timers_state.cpu_clock_offset = cpu_get_clock_locked();
385         timers_state.cpu_ticks_enabled = 0;
386     }
387     seqlock_write_end(&timers_state.vm_clock_seqlock);
388 }
389
390 /* Correlation between real and virtual time is always going to be
391    fairly approximate, so ignore small variation.
392    When the guest is idle real and virtual time will be aligned in
393    the IO wait loop.  */
394 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
395
396 static void icount_adjust(void)
397 {
398     int64_t cur_time;
399     int64_t cur_icount;
400     int64_t delta;
401
402     /* Protected by TimersState mutex.  */
403     static int64_t last_delta;
404
405     /* If the VM is not running, then do nothing.  */
406     if (!runstate_is_running()) {
407         return;
408     }
409
410     seqlock_write_begin(&timers_state.vm_clock_seqlock);
411     cur_time = cpu_get_clock_locked();
412     cur_icount = cpu_get_icount_locked();
413
414     delta = cur_icount - cur_time;
415     /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
416     if (delta > 0
417         && last_delta + ICOUNT_WOBBLE < delta * 2
418         && icount_time_shift > 0) {
419         /* The guest is getting too far ahead.  Slow time down.  */
420         icount_time_shift--;
421     }
422     if (delta < 0
423         && last_delta - ICOUNT_WOBBLE > delta * 2
424         && icount_time_shift < MAX_ICOUNT_SHIFT) {
425         /* The guest is getting too far behind.  Speed time up.  */
426         icount_time_shift++;
427     }
428     last_delta = delta;
429     timers_state.qemu_icount_bias = cur_icount
430                               - (timers_state.qemu_icount << icount_time_shift);
431     seqlock_write_end(&timers_state.vm_clock_seqlock);
432 }
433
434 static void icount_adjust_rt(void *opaque)
435 {
436     timer_mod(timers_state.icount_rt_timer,
437               qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
438     icount_adjust();
439 }
440
441 static void icount_adjust_vm(void *opaque)
442 {
443     timer_mod(timers_state.icount_vm_timer,
444                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
445                    NANOSECONDS_PER_SECOND / 10);
446     icount_adjust();
447 }
448
449 static int64_t qemu_icount_round(int64_t count)
450 {
451     return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
452 }
453
454 static void icount_warp_rt(void)
455 {
456     unsigned seq;
457     int64_t warp_start;
458
459     /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
460      * changes from -1 to another value, so the race here is okay.
461      */
462     do {
463         seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
464         warp_start = timers_state.vm_clock_warp_start;
465     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
466
467     if (warp_start == -1) {
468         return;
469     }
470
471     seqlock_write_begin(&timers_state.vm_clock_seqlock);
472     if (runstate_is_running()) {
473         int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
474                                      cpu_get_clock_locked());
475         int64_t warp_delta;
476
477         warp_delta = clock - timers_state.vm_clock_warp_start;
478         if (use_icount == 2) {
479             /*
480              * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
481              * far ahead of real time.
482              */
483             int64_t cur_icount = cpu_get_icount_locked();
484             int64_t delta = clock - cur_icount;
485             warp_delta = MIN(warp_delta, delta);
486         }
487         timers_state.qemu_icount_bias += warp_delta;
488     }
489     timers_state.vm_clock_warp_start = -1;
490     seqlock_write_end(&timers_state.vm_clock_seqlock);
491
492     if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
493         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
494     }
495 }
496
497 static void icount_timer_cb(void *opaque)
498 {
499     /* No need for a checkpoint because the timer already synchronizes
500      * with CHECKPOINT_CLOCK_VIRTUAL_RT.
501      */
502     icount_warp_rt();
503 }
504
505 void qtest_clock_warp(int64_t dest)
506 {
507     int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
508     AioContext *aio_context;
509     assert(qtest_enabled());
510     aio_context = qemu_get_aio_context();
511     while (clock < dest) {
512         int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
513         int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
514
515         seqlock_write_begin(&timers_state.vm_clock_seqlock);
516         timers_state.qemu_icount_bias += warp;
517         seqlock_write_end(&timers_state.vm_clock_seqlock);
518
519         qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
520         timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
521         clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
522     }
523     qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
524 }
525
526 void qemu_start_warp_timer(void)
527 {
528     int64_t clock;
529     int64_t deadline;
530
531     if (!use_icount) {
532         return;
533     }
534
535     /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
536      * do not fire, so computing the deadline does not make sense.
537      */
538     if (!runstate_is_running()) {
539         return;
540     }
541
542     /* warp clock deterministically in record/replay mode */
543     if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
544         return;
545     }
546
547     if (!all_cpu_threads_idle()) {
548         return;
549     }
550
551     if (qtest_enabled()) {
552         /* When testing, qtest commands advance icount.  */
553         return;
554     }
555
556     /* We want to use the earliest deadline from ALL vm_clocks */
557     clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
558     deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
559     if (deadline < 0) {
560         static bool notified;
561         if (!icount_sleep && !notified) {
562             warn_report("icount sleep disabled and no active timers");
563             notified = true;
564         }
565         return;
566     }
567
568     if (deadline > 0) {
569         /*
570          * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
571          * sleep.  Otherwise, the CPU might be waiting for a future timer
572          * interrupt to wake it up, but the interrupt never comes because
573          * the vCPU isn't running any insns and thus doesn't advance the
574          * QEMU_CLOCK_VIRTUAL.
575          */
576         if (!icount_sleep) {
577             /*
578              * We never let VCPUs sleep in no sleep icount mode.
579              * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
580              * to the next QEMU_CLOCK_VIRTUAL event and notify it.
581              * It is useful when we want a deterministic execution time,
582              * isolated from host latencies.
583              */
584             seqlock_write_begin(&timers_state.vm_clock_seqlock);
585             timers_state.qemu_icount_bias += deadline;
586             seqlock_write_end(&timers_state.vm_clock_seqlock);
587             qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
588         } else {
589             /*
590              * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
591              * "real" time, (related to the time left until the next event) has
592              * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
593              * This avoids that the warps are visible externally; for example,
594              * you will not be sending network packets continuously instead of
595              * every 100ms.
596              */
597             seqlock_write_begin(&timers_state.vm_clock_seqlock);
598             if (timers_state.vm_clock_warp_start == -1
599                 || timers_state.vm_clock_warp_start > clock) {
600                 timers_state.vm_clock_warp_start = clock;
601             }
602             seqlock_write_end(&timers_state.vm_clock_seqlock);
603             timer_mod_anticipate(timers_state.icount_warp_timer,
604                                  clock + deadline);
605         }
606     } else if (deadline == 0) {
607         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
608     }
609 }
610
611 static void qemu_account_warp_timer(void)
612 {
613     if (!use_icount || !icount_sleep) {
614         return;
615     }
616
617     /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
618      * do not fire, so computing the deadline does not make sense.
619      */
620     if (!runstate_is_running()) {
621         return;
622     }
623
624     /* warp clock deterministically in record/replay mode */
625     if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
626         return;
627     }
628
629     timer_del(timers_state.icount_warp_timer);
630     icount_warp_rt();
631 }
632
633 static bool icount_state_needed(void *opaque)
634 {
635     return use_icount;
636 }
637
638 static bool warp_timer_state_needed(void *opaque)
639 {
640     TimersState *s = opaque;
641     return s->icount_warp_timer != NULL;
642 }
643
644 static bool adjust_timers_state_needed(void *opaque)
645 {
646     TimersState *s = opaque;
647     return s->icount_rt_timer != NULL;
648 }
649
650 /*
651  * Subsection for warp timer migration is optional, because may not be created
652  */
653 static const VMStateDescription icount_vmstate_warp_timer = {
654     .name = "timer/icount/warp_timer",
655     .version_id = 1,
656     .minimum_version_id = 1,
657     .needed = warp_timer_state_needed,
658     .fields = (VMStateField[]) {
659         VMSTATE_INT64(vm_clock_warp_start, TimersState),
660         VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
661         VMSTATE_END_OF_LIST()
662     }
663 };
664
665 static const VMStateDescription icount_vmstate_adjust_timers = {
666     .name = "timer/icount/timers",
667     .version_id = 1,
668     .minimum_version_id = 1,
669     .needed = adjust_timers_state_needed,
670     .fields = (VMStateField[]) {
671         VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
672         VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
673         VMSTATE_END_OF_LIST()
674     }
675 };
676
677 /*
678  * This is a subsection for icount migration.
679  */
680 static const VMStateDescription icount_vmstate_timers = {
681     .name = "timer/icount",
682     .version_id = 1,
683     .minimum_version_id = 1,
684     .needed = icount_state_needed,
685     .fields = (VMStateField[]) {
686         VMSTATE_INT64(qemu_icount_bias, TimersState),
687         VMSTATE_INT64(qemu_icount, TimersState),
688         VMSTATE_END_OF_LIST()
689     },
690     .subsections = (const VMStateDescription*[]) {
691         &icount_vmstate_warp_timer,
692         &icount_vmstate_adjust_timers,
693         NULL
694     }
695 };
696
697 static const VMStateDescription vmstate_timers = {
698     .name = "timer",
699     .version_id = 2,
700     .minimum_version_id = 1,
701     .fields = (VMStateField[]) {
702         VMSTATE_INT64(cpu_ticks_offset, TimersState),
703         VMSTATE_INT64(dummy, TimersState),
704         VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
705         VMSTATE_END_OF_LIST()
706     },
707     .subsections = (const VMStateDescription*[]) {
708         &icount_vmstate_timers,
709         NULL
710     }
711 };
712
713 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
714 {
715     double pct;
716     double throttle_ratio;
717     long sleeptime_ns;
718
719     if (!cpu_throttle_get_percentage()) {
720         return;
721     }
722
723     pct = (double)cpu_throttle_get_percentage()/100;
724     throttle_ratio = pct / (1 - pct);
725     sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
726
727     qemu_mutex_unlock_iothread();
728     g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
729     qemu_mutex_lock_iothread();
730     atomic_set(&cpu->throttle_thread_scheduled, 0);
731 }
732
733 static void cpu_throttle_timer_tick(void *opaque)
734 {
735     CPUState *cpu;
736     double pct;
737
738     /* Stop the timer if needed */
739     if (!cpu_throttle_get_percentage()) {
740         return;
741     }
742     CPU_FOREACH(cpu) {
743         if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
744             async_run_on_cpu(cpu, cpu_throttle_thread,
745                              RUN_ON_CPU_NULL);
746         }
747     }
748
749     pct = (double)cpu_throttle_get_percentage()/100;
750     timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
751                                    CPU_THROTTLE_TIMESLICE_NS / (1-pct));
752 }
753
754 void cpu_throttle_set(int new_throttle_pct)
755 {
756     /* Ensure throttle percentage is within valid range */
757     new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
758     new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
759
760     atomic_set(&throttle_percentage, new_throttle_pct);
761
762     timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
763                                        CPU_THROTTLE_TIMESLICE_NS);
764 }
765
766 void cpu_throttle_stop(void)
767 {
768     atomic_set(&throttle_percentage, 0);
769 }
770
771 bool cpu_throttle_active(void)
772 {
773     return (cpu_throttle_get_percentage() != 0);
774 }
775
776 int cpu_throttle_get_percentage(void)
777 {
778     return atomic_read(&throttle_percentage);
779 }
780
781 void cpu_ticks_init(void)
782 {
783     seqlock_init(&timers_state.vm_clock_seqlock);
784     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
785     throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
786                                            cpu_throttle_timer_tick, NULL);
787 }
788
789 void configure_icount(QemuOpts *opts, Error **errp)
790 {
791     const char *option;
792     char *rem_str = NULL;
793
794     option = qemu_opt_get(opts, "shift");
795     if (!option) {
796         if (qemu_opt_get(opts, "align") != NULL) {
797             error_setg(errp, "Please specify shift option when using align");
798         }
799         return;
800     }
801
802     icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
803     if (icount_sleep) {
804         timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
805                                          icount_timer_cb, NULL);
806     }
807
808     icount_align_option = qemu_opt_get_bool(opts, "align", false);
809
810     if (icount_align_option && !icount_sleep) {
811         error_setg(errp, "align=on and sleep=off are incompatible");
812     }
813     if (strcmp(option, "auto") != 0) {
814         errno = 0;
815         icount_time_shift = strtol(option, &rem_str, 0);
816         if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
817             error_setg(errp, "icount: Invalid shift value");
818         }
819         use_icount = 1;
820         return;
821     } else if (icount_align_option) {
822         error_setg(errp, "shift=auto and align=on are incompatible");
823     } else if (!icount_sleep) {
824         error_setg(errp, "shift=auto and sleep=off are incompatible");
825     }
826
827     use_icount = 2;
828
829     /* 125MIPS seems a reasonable initial guess at the guest speed.
830        It will be corrected fairly quickly anyway.  */
831     icount_time_shift = 3;
832
833     /* Have both realtime and virtual time triggers for speed adjustment.
834        The realtime trigger catches emulated time passing too slowly,
835        the virtual time trigger catches emulated time passing too fast.
836        Realtime triggers occur even when idle, so use them less frequently
837        than VM triggers.  */
838     timers_state.vm_clock_warp_start = -1;
839     timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
840                                    icount_adjust_rt, NULL);
841     timer_mod(timers_state.icount_rt_timer,
842                    qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
843     timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
844                                         icount_adjust_vm, NULL);
845     timer_mod(timers_state.icount_vm_timer,
846                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
847                    NANOSECONDS_PER_SECOND / 10);
848 }
849
850 /***********************************************************/
851 /* TCG vCPU kick timer
852  *
853  * The kick timer is responsible for moving single threaded vCPU
854  * emulation on to the next vCPU. If more than one vCPU is running a
855  * timer event with force a cpu->exit so the next vCPU can get
856  * scheduled.
857  *
858  * The timer is removed if all vCPUs are idle and restarted again once
859  * idleness is complete.
860  */
861
862 static QEMUTimer *tcg_kick_vcpu_timer;
863 static CPUState *tcg_current_rr_cpu;
864
865 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
866
867 static inline int64_t qemu_tcg_next_kick(void)
868 {
869     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
870 }
871
872 /* Kick the currently round-robin scheduled vCPU */
873 static void qemu_cpu_kick_rr_cpu(void)
874 {
875     CPUState *cpu;
876     do {
877         cpu = atomic_mb_read(&tcg_current_rr_cpu);
878         if (cpu) {
879             cpu_exit(cpu);
880         }
881     } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
882 }
883
884 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
885 {
886 }
887
888 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
889 {
890     if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
891         qemu_notify_event();
892         return;
893     }
894
895     if (!qemu_in_vcpu_thread() && first_cpu) {
896         /* qemu_cpu_kick is not enough to kick a halted CPU out of
897          * qemu_tcg_wait_io_event.  async_run_on_cpu, instead,
898          * causes cpu_thread_is_idle to return false.  This way,
899          * handle_icount_deadline can run.
900          */
901         async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
902     }
903 }
904
905 static void kick_tcg_thread(void *opaque)
906 {
907     timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
908     qemu_cpu_kick_rr_cpu();
909 }
910
911 static void start_tcg_kick_timer(void)
912 {
913     assert(!mttcg_enabled);
914     if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
915         tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
916                                            kick_tcg_thread, NULL);
917         timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
918     }
919 }
920
921 static void stop_tcg_kick_timer(void)
922 {
923     assert(!mttcg_enabled);
924     if (tcg_kick_vcpu_timer) {
925         timer_del(tcg_kick_vcpu_timer);
926         tcg_kick_vcpu_timer = NULL;
927     }
928 }
929
930 /***********************************************************/
931 void hw_error(const char *fmt, ...)
932 {
933     va_list ap;
934     CPUState *cpu;
935
936     va_start(ap, fmt);
937     fprintf(stderr, "qemu: hardware error: ");
938     vfprintf(stderr, fmt, ap);
939     fprintf(stderr, "\n");
940     CPU_FOREACH(cpu) {
941         fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
942         cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
943     }
944     va_end(ap);
945     abort();
946 }
947
948 void cpu_synchronize_all_states(void)
949 {
950     CPUState *cpu;
951
952     CPU_FOREACH(cpu) {
953         cpu_synchronize_state(cpu);
954         /* TODO: move to cpu_synchronize_state() */
955         if (hvf_enabled()) {
956             hvf_cpu_synchronize_state(cpu);
957         }
958     }
959 }
960
961 void cpu_synchronize_all_post_reset(void)
962 {
963     CPUState *cpu;
964
965     CPU_FOREACH(cpu) {
966         cpu_synchronize_post_reset(cpu);
967         /* TODO: move to cpu_synchronize_post_reset() */
968         if (hvf_enabled()) {
969             hvf_cpu_synchronize_post_reset(cpu);
970         }
971     }
972 }
973
974 void cpu_synchronize_all_post_init(void)
975 {
976     CPUState *cpu;
977
978     CPU_FOREACH(cpu) {
979         cpu_synchronize_post_init(cpu);
980         /* TODO: move to cpu_synchronize_post_init() */
981         if (hvf_enabled()) {
982             hvf_cpu_synchronize_post_init(cpu);
983         }
984     }
985 }
986
987 void cpu_synchronize_all_pre_loadvm(void)
988 {
989     CPUState *cpu;
990
991     CPU_FOREACH(cpu) {
992         cpu_synchronize_pre_loadvm(cpu);
993     }
994 }
995
996 static int do_vm_stop(RunState state, bool send_stop)
997 {
998     int ret = 0;
999
1000     if (runstate_is_running()) {
1001         cpu_disable_ticks();
1002         pause_all_vcpus();
1003         runstate_set(state);
1004         vm_state_notify(0, state);
1005         if (send_stop) {
1006             qapi_event_send_stop(&error_abort);
1007         }
1008     }
1009
1010     bdrv_drain_all();
1011     replay_disable_events();
1012     ret = bdrv_flush_all();
1013
1014     return ret;
1015 }
1016
1017 /* Special vm_stop() variant for terminating the process.  Historically clients
1018  * did not expect a QMP STOP event and so we need to retain compatibility.
1019  */
1020 int vm_shutdown(void)
1021 {
1022     return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1023 }
1024
1025 static bool cpu_can_run(CPUState *cpu)
1026 {
1027     if (cpu->stop) {
1028         return false;
1029     }
1030     if (cpu_is_stopped(cpu)) {
1031         return false;
1032     }
1033     return true;
1034 }
1035
1036 static void cpu_handle_guest_debug(CPUState *cpu)
1037 {
1038     gdb_set_stop_cpu(cpu);
1039     qemu_system_debug_request();
1040     cpu->stopped = true;
1041 }
1042
1043 #ifdef CONFIG_LINUX
1044 static void sigbus_reraise(void)
1045 {
1046     sigset_t set;
1047     struct sigaction action;
1048
1049     memset(&action, 0, sizeof(action));
1050     action.sa_handler = SIG_DFL;
1051     if (!sigaction(SIGBUS, &action, NULL)) {
1052         raise(SIGBUS);
1053         sigemptyset(&set);
1054         sigaddset(&set, SIGBUS);
1055         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1056     }
1057     perror("Failed to re-raise SIGBUS!\n");
1058     abort();
1059 }
1060
1061 static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
1062 {
1063     if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1064         sigbus_reraise();
1065     }
1066
1067     if (current_cpu) {
1068         /* Called asynchronously in VCPU thread.  */
1069         if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1070             sigbus_reraise();
1071         }
1072     } else {
1073         /* Called synchronously (via signalfd) in main thread.  */
1074         if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1075             sigbus_reraise();
1076         }
1077     }
1078 }
1079
1080 static void qemu_init_sigbus(void)
1081 {
1082     struct sigaction action;
1083
1084     memset(&action, 0, sizeof(action));
1085     action.sa_flags = SA_SIGINFO;
1086     action.sa_sigaction = sigbus_handler;
1087     sigaction(SIGBUS, &action, NULL);
1088
1089     prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1090 }
1091 #else /* !CONFIG_LINUX */
1092 static void qemu_init_sigbus(void)
1093 {
1094 }
1095 #endif /* !CONFIG_LINUX */
1096
1097 static QemuMutex qemu_global_mutex;
1098
1099 static QemuThread io_thread;
1100
1101 /* cpu creation */
1102 static QemuCond qemu_cpu_cond;
1103 /* system init */
1104 static QemuCond qemu_pause_cond;
1105
1106 void qemu_init_cpu_loop(void)
1107 {
1108     qemu_init_sigbus();
1109     qemu_cond_init(&qemu_cpu_cond);
1110     qemu_cond_init(&qemu_pause_cond);
1111     qemu_mutex_init(&qemu_global_mutex);
1112
1113     qemu_thread_get_self(&io_thread);
1114 }
1115
1116 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
1117 {
1118     do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
1119 }
1120
1121 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1122 {
1123     if (kvm_destroy_vcpu(cpu) < 0) {
1124         error_report("kvm_destroy_vcpu failed");
1125         exit(EXIT_FAILURE);
1126     }
1127 }
1128
1129 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1130 {
1131 }
1132
1133 static void qemu_cpu_stop(CPUState *cpu, bool exit)
1134 {
1135     g_assert(qemu_cpu_is_self(cpu));
1136     cpu->stop = false;
1137     cpu->stopped = true;
1138     if (exit) {
1139         cpu_exit(cpu);
1140     }
1141     qemu_cond_broadcast(&qemu_pause_cond);
1142 }
1143
1144 static void qemu_wait_io_event_common(CPUState *cpu)
1145 {
1146     atomic_mb_set(&cpu->thread_kicked, false);
1147     if (cpu->stop) {
1148         qemu_cpu_stop(cpu, false);
1149     }
1150     process_queued_cpu_work(cpu);
1151 }
1152
1153 static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
1154 {
1155     while (all_cpu_threads_idle()) {
1156         stop_tcg_kick_timer();
1157         qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1158     }
1159
1160     start_tcg_kick_timer();
1161
1162     qemu_wait_io_event_common(cpu);
1163 }
1164
1165 static void qemu_wait_io_event(CPUState *cpu)
1166 {
1167     while (cpu_thread_is_idle(cpu)) {
1168         qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1169     }
1170
1171 #ifdef _WIN32
1172     /* Eat dummy APC queued by qemu_cpu_kick_thread.  */
1173     if (!tcg_enabled()) {
1174         SleepEx(0, TRUE);
1175     }
1176 #endif
1177     qemu_wait_io_event_common(cpu);
1178 }
1179
1180 static void *qemu_kvm_cpu_thread_fn(void *arg)
1181 {
1182     CPUState *cpu = arg;
1183     int r;
1184
1185     rcu_register_thread();
1186
1187     qemu_mutex_lock_iothread();
1188     qemu_thread_get_self(cpu->thread);
1189     cpu->thread_id = qemu_get_thread_id();
1190     cpu->can_do_io = 1;
1191     current_cpu = cpu;
1192
1193     r = kvm_init_vcpu(cpu);
1194     if (r < 0) {
1195         error_report("kvm_init_vcpu failed: %s", strerror(-r));
1196         exit(1);
1197     }
1198
1199     kvm_init_cpu_signals(cpu);
1200
1201     /* signal CPU creation */
1202     cpu->created = true;
1203     qemu_cond_signal(&qemu_cpu_cond);
1204
1205     do {
1206         if (cpu_can_run(cpu)) {
1207             r = kvm_cpu_exec(cpu);
1208             if (r == EXCP_DEBUG) {
1209                 cpu_handle_guest_debug(cpu);
1210             }
1211         }
1212         qemu_wait_io_event(cpu);
1213     } while (!cpu->unplug || cpu_can_run(cpu));
1214
1215     qemu_kvm_destroy_vcpu(cpu);
1216     cpu->created = false;
1217     qemu_cond_signal(&qemu_cpu_cond);
1218     qemu_mutex_unlock_iothread();
1219     rcu_unregister_thread();
1220     return NULL;
1221 }
1222
1223 static void *qemu_dummy_cpu_thread_fn(void *arg)
1224 {
1225 #ifdef _WIN32
1226     error_report("qtest is not supported under Windows");
1227     exit(1);
1228 #else
1229     CPUState *cpu = arg;
1230     sigset_t waitset;
1231     int r;
1232
1233     rcu_register_thread();
1234
1235     qemu_mutex_lock_iothread();
1236     qemu_thread_get_self(cpu->thread);
1237     cpu->thread_id = qemu_get_thread_id();
1238     cpu->can_do_io = 1;
1239     current_cpu = cpu;
1240
1241     sigemptyset(&waitset);
1242     sigaddset(&waitset, SIG_IPI);
1243
1244     /* signal CPU creation */
1245     cpu->created = true;
1246     qemu_cond_signal(&qemu_cpu_cond);
1247
1248     do {
1249         qemu_mutex_unlock_iothread();
1250         do {
1251             int sig;
1252             r = sigwait(&waitset, &sig);
1253         } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1254         if (r == -1) {
1255             perror("sigwait");
1256             exit(1);
1257         }
1258         qemu_mutex_lock_iothread();
1259         qemu_wait_io_event(cpu);
1260     } while (!cpu->unplug);
1261
1262     rcu_unregister_thread();
1263     return NULL;
1264 #endif
1265 }
1266
1267 static int64_t tcg_get_icount_limit(void)
1268 {
1269     int64_t deadline;
1270
1271     if (replay_mode != REPLAY_MODE_PLAY) {
1272         deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1273
1274         /* Maintain prior (possibly buggy) behaviour where if no deadline
1275          * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1276          * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1277          * nanoseconds.
1278          */
1279         if ((deadline < 0) || (deadline > INT32_MAX)) {
1280             deadline = INT32_MAX;
1281         }
1282
1283         return qemu_icount_round(deadline);
1284     } else {
1285         return replay_get_instructions();
1286     }
1287 }
1288
1289 static void handle_icount_deadline(void)
1290 {
1291     assert(qemu_in_vcpu_thread());
1292     if (use_icount) {
1293         int64_t deadline =
1294             qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1295
1296         if (deadline == 0) {
1297             /* Wake up other AioContexts.  */
1298             qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1299             qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
1300         }
1301     }
1302 }
1303
1304 static void prepare_icount_for_run(CPUState *cpu)
1305 {
1306     if (use_icount) {
1307         int insns_left;
1308
1309         /* These should always be cleared by process_icount_data after
1310          * each vCPU execution. However u16.high can be raised
1311          * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1312          */
1313         g_assert(cpu->icount_decr.u16.low == 0);
1314         g_assert(cpu->icount_extra == 0);
1315
1316         cpu->icount_budget = tcg_get_icount_limit();
1317         insns_left = MIN(0xffff, cpu->icount_budget);
1318         cpu->icount_decr.u16.low = insns_left;
1319         cpu->icount_extra = cpu->icount_budget - insns_left;
1320     }
1321 }
1322
1323 static void process_icount_data(CPUState *cpu)
1324 {
1325     if (use_icount) {
1326         /* Account for executed instructions */
1327         cpu_update_icount(cpu);
1328
1329         /* Reset the counters */
1330         cpu->icount_decr.u16.low = 0;
1331         cpu->icount_extra = 0;
1332         cpu->icount_budget = 0;
1333
1334         replay_account_executed_instructions();
1335     }
1336 }
1337
1338
1339 static int tcg_cpu_exec(CPUState *cpu)
1340 {
1341     int ret;
1342 #ifdef CONFIG_PROFILER
1343     int64_t ti;
1344 #endif
1345
1346 #ifdef CONFIG_PROFILER
1347     ti = profile_getclock();
1348 #endif
1349     qemu_mutex_unlock_iothread();
1350     cpu_exec_start(cpu);
1351     ret = cpu_exec(cpu);
1352     cpu_exec_end(cpu);
1353     qemu_mutex_lock_iothread();
1354 #ifdef CONFIG_PROFILER
1355     tcg_time += profile_getclock() - ti;
1356 #endif
1357     return ret;
1358 }
1359
1360 /* Destroy any remaining vCPUs which have been unplugged and have
1361  * finished running
1362  */
1363 static void deal_with_unplugged_cpus(void)
1364 {
1365     CPUState *cpu;
1366
1367     CPU_FOREACH(cpu) {
1368         if (cpu->unplug && !cpu_can_run(cpu)) {
1369             qemu_tcg_destroy_vcpu(cpu);
1370             cpu->created = false;
1371             qemu_cond_signal(&qemu_cpu_cond);
1372             break;
1373         }
1374     }
1375 }
1376
1377 /* Single-threaded TCG
1378  *
1379  * In the single-threaded case each vCPU is simulated in turn. If
1380  * there is more than a single vCPU we create a simple timer to kick
1381  * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1382  * This is done explicitly rather than relying on side-effects
1383  * elsewhere.
1384  */
1385
1386 static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
1387 {
1388     CPUState *cpu = arg;
1389
1390     rcu_register_thread();
1391     tcg_register_thread();
1392
1393     qemu_mutex_lock_iothread();
1394     qemu_thread_get_self(cpu->thread);
1395
1396     cpu->thread_id = qemu_get_thread_id();
1397     cpu->created = true;
1398     cpu->can_do_io = 1;
1399     qemu_cond_signal(&qemu_cpu_cond);
1400
1401     /* wait for initial kick-off after machine start */
1402     while (first_cpu->stopped) {
1403         qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1404
1405         /* process any pending work */
1406         CPU_FOREACH(cpu) {
1407             current_cpu = cpu;
1408             qemu_wait_io_event_common(cpu);
1409         }
1410     }
1411
1412     start_tcg_kick_timer();
1413
1414     cpu = first_cpu;
1415
1416     /* process any pending work */
1417     cpu->exit_request = 1;
1418
1419     while (1) {
1420         /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
1421         qemu_account_warp_timer();
1422
1423         /* Run the timers here.  This is much more efficient than
1424          * waking up the I/O thread and waiting for completion.
1425          */
1426         handle_icount_deadline();
1427
1428         if (!cpu) {
1429             cpu = first_cpu;
1430         }
1431
1432         while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1433
1434             atomic_mb_set(&tcg_current_rr_cpu, cpu);
1435             current_cpu = cpu;
1436
1437             qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1438                               (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1439
1440             if (cpu_can_run(cpu)) {
1441                 int r;
1442
1443                 prepare_icount_for_run(cpu);
1444
1445                 r = tcg_cpu_exec(cpu);
1446
1447                 process_icount_data(cpu);
1448
1449                 if (r == EXCP_DEBUG) {
1450                     cpu_handle_guest_debug(cpu);
1451                     break;
1452                 } else if (r == EXCP_ATOMIC) {
1453                     qemu_mutex_unlock_iothread();
1454                     cpu_exec_step_atomic(cpu);
1455                     qemu_mutex_lock_iothread();
1456                     break;
1457                 }
1458             } else if (cpu->stop) {
1459                 if (cpu->unplug) {
1460                     cpu = CPU_NEXT(cpu);
1461                 }
1462                 break;
1463             }
1464
1465             cpu = CPU_NEXT(cpu);
1466         } /* while (cpu && !cpu->exit_request).. */
1467
1468         /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
1469         atomic_set(&tcg_current_rr_cpu, NULL);
1470
1471         if (cpu && cpu->exit_request) {
1472             atomic_mb_set(&cpu->exit_request, 0);
1473         }
1474
1475         qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
1476         deal_with_unplugged_cpus();
1477     }
1478
1479     rcu_unregister_thread();
1480     return NULL;
1481 }
1482
1483 static void *qemu_hax_cpu_thread_fn(void *arg)
1484 {
1485     CPUState *cpu = arg;
1486     int r;
1487
1488     rcu_register_thread();
1489     qemu_mutex_lock_iothread();
1490     qemu_thread_get_self(cpu->thread);
1491
1492     cpu->thread_id = qemu_get_thread_id();
1493     cpu->created = true;
1494     cpu->halted = 0;
1495     current_cpu = cpu;
1496
1497     hax_init_vcpu(cpu);
1498     qemu_cond_signal(&qemu_cpu_cond);
1499
1500     do {
1501         if (cpu_can_run(cpu)) {
1502             r = hax_smp_cpu_exec(cpu);
1503             if (r == EXCP_DEBUG) {
1504                 cpu_handle_guest_debug(cpu);
1505             }
1506         }
1507
1508         qemu_wait_io_event(cpu);
1509     } while (!cpu->unplug || cpu_can_run(cpu));
1510     rcu_unregister_thread();
1511     return NULL;
1512 }
1513
1514 /* The HVF-specific vCPU thread function. This one should only run when the host
1515  * CPU supports the VMX "unrestricted guest" feature. */
1516 static void *qemu_hvf_cpu_thread_fn(void *arg)
1517 {
1518     CPUState *cpu = arg;
1519
1520     int r;
1521
1522     assert(hvf_enabled());
1523
1524     rcu_register_thread();
1525
1526     qemu_mutex_lock_iothread();
1527     qemu_thread_get_self(cpu->thread);
1528
1529     cpu->thread_id = qemu_get_thread_id();
1530     cpu->can_do_io = 1;
1531     current_cpu = cpu;
1532
1533     hvf_init_vcpu(cpu);
1534
1535     /* signal CPU creation */
1536     cpu->created = true;
1537     qemu_cond_signal(&qemu_cpu_cond);
1538
1539     do {
1540         if (cpu_can_run(cpu)) {
1541             r = hvf_vcpu_exec(cpu);
1542             if (r == EXCP_DEBUG) {
1543                 cpu_handle_guest_debug(cpu);
1544             }
1545         }
1546         qemu_wait_io_event(cpu);
1547     } while (!cpu->unplug || cpu_can_run(cpu));
1548
1549     hvf_vcpu_destroy(cpu);
1550     cpu->created = false;
1551     qemu_cond_signal(&qemu_cpu_cond);
1552     qemu_mutex_unlock_iothread();
1553     rcu_unregister_thread();
1554     return NULL;
1555 }
1556
1557 static void *qemu_whpx_cpu_thread_fn(void *arg)
1558 {
1559     CPUState *cpu = arg;
1560     int r;
1561
1562     rcu_register_thread();
1563
1564     qemu_mutex_lock_iothread();
1565     qemu_thread_get_self(cpu->thread);
1566     cpu->thread_id = qemu_get_thread_id();
1567     current_cpu = cpu;
1568
1569     r = whpx_init_vcpu(cpu);
1570     if (r < 0) {
1571         fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1572         exit(1);
1573     }
1574
1575     /* signal CPU creation */
1576     cpu->created = true;
1577     qemu_cond_signal(&qemu_cpu_cond);
1578
1579     do {
1580         if (cpu_can_run(cpu)) {
1581             r = whpx_vcpu_exec(cpu);
1582             if (r == EXCP_DEBUG) {
1583                 cpu_handle_guest_debug(cpu);
1584             }
1585         }
1586         while (cpu_thread_is_idle(cpu)) {
1587             qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1588         }
1589         qemu_wait_io_event_common(cpu);
1590     } while (!cpu->unplug || cpu_can_run(cpu));
1591
1592     whpx_destroy_vcpu(cpu);
1593     cpu->created = false;
1594     qemu_cond_signal(&qemu_cpu_cond);
1595     qemu_mutex_unlock_iothread();
1596     rcu_unregister_thread();
1597     return NULL;
1598 }
1599
1600 #ifdef _WIN32
1601 static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1602 {
1603 }
1604 #endif
1605
1606 /* Multi-threaded TCG
1607  *
1608  * In the multi-threaded case each vCPU has its own thread. The TLS
1609  * variable current_cpu can be used deep in the code to find the
1610  * current CPUState for a given thread.
1611  */
1612
1613 static void *qemu_tcg_cpu_thread_fn(void *arg)
1614 {
1615     CPUState *cpu = arg;
1616
1617     g_assert(!use_icount);
1618
1619     rcu_register_thread();
1620     tcg_register_thread();
1621
1622     qemu_mutex_lock_iothread();
1623     qemu_thread_get_self(cpu->thread);
1624
1625     cpu->thread_id = qemu_get_thread_id();
1626     cpu->created = true;
1627     cpu->can_do_io = 1;
1628     current_cpu = cpu;
1629     qemu_cond_signal(&qemu_cpu_cond);
1630
1631     /* process any pending work */
1632     cpu->exit_request = 1;
1633
1634     while (1) {
1635         if (cpu_can_run(cpu)) {
1636             int r;
1637             r = tcg_cpu_exec(cpu);
1638             switch (r) {
1639             case EXCP_DEBUG:
1640                 cpu_handle_guest_debug(cpu);
1641                 break;
1642             case EXCP_HALTED:
1643                 /* during start-up the vCPU is reset and the thread is
1644                  * kicked several times. If we don't ensure we go back
1645                  * to sleep in the halted state we won't cleanly
1646                  * start-up when the vCPU is enabled.
1647                  *
1648                  * cpu->halted should ensure we sleep in wait_io_event
1649                  */
1650                 g_assert(cpu->halted);
1651                 break;
1652             case EXCP_ATOMIC:
1653                 qemu_mutex_unlock_iothread();
1654                 cpu_exec_step_atomic(cpu);
1655                 qemu_mutex_lock_iothread();
1656             default:
1657                 /* Ignore everything else? */
1658                 break;
1659             }
1660         }
1661
1662         atomic_mb_set(&cpu->exit_request, 0);
1663         qemu_wait_io_event(cpu);
1664     } while (!cpu->unplug || cpu_can_run(cpu));
1665
1666     qemu_tcg_destroy_vcpu(cpu);
1667     cpu->created = false;
1668     qemu_cond_signal(&qemu_cpu_cond);
1669     qemu_mutex_unlock_iothread();
1670     rcu_unregister_thread();
1671     return NULL;
1672 }
1673
1674 static void qemu_cpu_kick_thread(CPUState *cpu)
1675 {
1676 #ifndef _WIN32
1677     int err;
1678
1679     if (cpu->thread_kicked) {
1680         return;
1681     }
1682     cpu->thread_kicked = true;
1683     err = pthread_kill(cpu->thread->thread, SIG_IPI);
1684     if (err) {
1685         fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1686         exit(1);
1687     }
1688 #else /* _WIN32 */
1689     if (!qemu_cpu_is_self(cpu)) {
1690         if (whpx_enabled()) {
1691             whpx_vcpu_kick(cpu);
1692         } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
1693             fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1694                     __func__, GetLastError());
1695             exit(1);
1696         }
1697     }
1698 #endif
1699 }
1700
1701 void qemu_cpu_kick(CPUState *cpu)
1702 {
1703     qemu_cond_broadcast(cpu->halt_cond);
1704     if (tcg_enabled()) {
1705         cpu_exit(cpu);
1706         /* NOP unless doing single-thread RR */
1707         qemu_cpu_kick_rr_cpu();
1708     } else {
1709         if (hax_enabled()) {
1710             /*
1711              * FIXME: race condition with the exit_request check in
1712              * hax_vcpu_hax_exec
1713              */
1714             cpu->exit_request = 1;
1715         }
1716         qemu_cpu_kick_thread(cpu);
1717     }
1718 }
1719
1720 void qemu_cpu_kick_self(void)
1721 {
1722     assert(current_cpu);
1723     qemu_cpu_kick_thread(current_cpu);
1724 }
1725
1726 bool qemu_cpu_is_self(CPUState *cpu)
1727 {
1728     return qemu_thread_is_self(cpu->thread);
1729 }
1730
1731 bool qemu_in_vcpu_thread(void)
1732 {
1733     return current_cpu && qemu_cpu_is_self(current_cpu);
1734 }
1735
1736 static __thread bool iothread_locked = false;
1737
1738 bool qemu_mutex_iothread_locked(void)
1739 {
1740     return iothread_locked;
1741 }
1742
1743 void qemu_mutex_lock_iothread(void)
1744 {
1745     g_assert(!qemu_mutex_iothread_locked());
1746     qemu_mutex_lock(&qemu_global_mutex);
1747     iothread_locked = true;
1748 }
1749
1750 void qemu_mutex_unlock_iothread(void)
1751 {
1752     g_assert(qemu_mutex_iothread_locked());
1753     iothread_locked = false;
1754     qemu_mutex_unlock(&qemu_global_mutex);
1755 }
1756
1757 static bool all_vcpus_paused(void)
1758 {
1759     CPUState *cpu;
1760
1761     CPU_FOREACH(cpu) {
1762         if (!cpu->stopped) {
1763             return false;
1764         }
1765     }
1766
1767     return true;
1768 }
1769
1770 void pause_all_vcpus(void)
1771 {
1772     CPUState *cpu;
1773
1774     qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1775     CPU_FOREACH(cpu) {
1776         if (qemu_cpu_is_self(cpu)) {
1777             qemu_cpu_stop(cpu, true);
1778         } else {
1779             cpu->stop = true;
1780             qemu_cpu_kick(cpu);
1781         }
1782     }
1783
1784     while (!all_vcpus_paused()) {
1785         qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1786         CPU_FOREACH(cpu) {
1787             qemu_cpu_kick(cpu);
1788         }
1789     }
1790 }
1791
1792 void cpu_resume(CPUState *cpu)
1793 {
1794     cpu->stop = false;
1795     cpu->stopped = false;
1796     qemu_cpu_kick(cpu);
1797 }
1798
1799 void resume_all_vcpus(void)
1800 {
1801     CPUState *cpu;
1802
1803     qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1804     CPU_FOREACH(cpu) {
1805         cpu_resume(cpu);
1806     }
1807 }
1808
1809 void cpu_remove_sync(CPUState *cpu)
1810 {
1811     cpu->stop = true;
1812     cpu->unplug = true;
1813     qemu_cpu_kick(cpu);
1814     qemu_mutex_unlock_iothread();
1815     qemu_thread_join(cpu->thread);
1816     qemu_mutex_lock_iothread();
1817 }
1818
1819 /* For temporary buffers for forming a name */
1820 #define VCPU_THREAD_NAME_SIZE 16
1821
1822 static void qemu_tcg_init_vcpu(CPUState *cpu)
1823 {
1824     char thread_name[VCPU_THREAD_NAME_SIZE];
1825     static QemuCond *single_tcg_halt_cond;
1826     static QemuThread *single_tcg_cpu_thread;
1827     static int tcg_region_inited;
1828
1829     /*
1830      * Initialize TCG regions--once. Now is a good time, because:
1831      * (1) TCG's init context, prologue and target globals have been set up.
1832      * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1833      *     -accel flag is processed, so the check doesn't work then).
1834      */
1835     if (!tcg_region_inited) {
1836         tcg_region_inited = 1;
1837         tcg_region_init();
1838     }
1839
1840     if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
1841         cpu->thread = g_malloc0(sizeof(QemuThread));
1842         cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1843         qemu_cond_init(cpu->halt_cond);
1844
1845         if (qemu_tcg_mttcg_enabled()) {
1846             /* create a thread per vCPU with TCG (MTTCG) */
1847             parallel_cpus = true;
1848             snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1849                  cpu->cpu_index);
1850
1851             qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1852                                cpu, QEMU_THREAD_JOINABLE);
1853
1854         } else {
1855             /* share a single thread for all cpus with TCG */
1856             snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1857             qemu_thread_create(cpu->thread, thread_name,
1858                                qemu_tcg_rr_cpu_thread_fn,
1859                                cpu, QEMU_THREAD_JOINABLE);
1860
1861             single_tcg_halt_cond = cpu->halt_cond;
1862             single_tcg_cpu_thread = cpu->thread;
1863         }
1864 #ifdef _WIN32
1865         cpu->hThread = qemu_thread_get_handle(cpu->thread);
1866 #endif
1867     } else {
1868         /* For non-MTTCG cases we share the thread */
1869         cpu->thread = single_tcg_cpu_thread;
1870         cpu->halt_cond = single_tcg_halt_cond;
1871         cpu->thread_id = first_cpu->thread_id;
1872         cpu->can_do_io = 1;
1873         cpu->created = true;
1874     }
1875 }
1876
1877 static void qemu_hax_start_vcpu(CPUState *cpu)
1878 {
1879     char thread_name[VCPU_THREAD_NAME_SIZE];
1880
1881     cpu->thread = g_malloc0(sizeof(QemuThread));
1882     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1883     qemu_cond_init(cpu->halt_cond);
1884
1885     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1886              cpu->cpu_index);
1887     qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1888                        cpu, QEMU_THREAD_JOINABLE);
1889 #ifdef _WIN32
1890     cpu->hThread = qemu_thread_get_handle(cpu->thread);
1891 #endif
1892 }
1893
1894 static void qemu_kvm_start_vcpu(CPUState *cpu)
1895 {
1896     char thread_name[VCPU_THREAD_NAME_SIZE];
1897
1898     cpu->thread = g_malloc0(sizeof(QemuThread));
1899     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1900     qemu_cond_init(cpu->halt_cond);
1901     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1902              cpu->cpu_index);
1903     qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1904                        cpu, QEMU_THREAD_JOINABLE);
1905 }
1906
1907 static void qemu_hvf_start_vcpu(CPUState *cpu)
1908 {
1909     char thread_name[VCPU_THREAD_NAME_SIZE];
1910
1911     /* HVF currently does not support TCG, and only runs in
1912      * unrestricted-guest mode. */
1913     assert(hvf_enabled());
1914
1915     cpu->thread = g_malloc0(sizeof(QemuThread));
1916     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1917     qemu_cond_init(cpu->halt_cond);
1918
1919     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
1920              cpu->cpu_index);
1921     qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
1922                        cpu, QEMU_THREAD_JOINABLE);
1923 }
1924
1925 static void qemu_whpx_start_vcpu(CPUState *cpu)
1926 {
1927     char thread_name[VCPU_THREAD_NAME_SIZE];
1928
1929     cpu->thread = g_malloc0(sizeof(QemuThread));
1930     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1931     qemu_cond_init(cpu->halt_cond);
1932     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
1933              cpu->cpu_index);
1934     qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
1935                        cpu, QEMU_THREAD_JOINABLE);
1936 #ifdef _WIN32
1937     cpu->hThread = qemu_thread_get_handle(cpu->thread);
1938 #endif
1939 }
1940
1941 static void qemu_dummy_start_vcpu(CPUState *cpu)
1942 {
1943     char thread_name[VCPU_THREAD_NAME_SIZE];
1944
1945     cpu->thread = g_malloc0(sizeof(QemuThread));
1946     cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1947     qemu_cond_init(cpu->halt_cond);
1948     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1949              cpu->cpu_index);
1950     qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1951                        QEMU_THREAD_JOINABLE);
1952 }
1953
1954 void qemu_init_vcpu(CPUState *cpu)
1955 {
1956     cpu->nr_cores = smp_cores;
1957     cpu->nr_threads = smp_threads;
1958     cpu->stopped = true;
1959
1960     if (!cpu->as) {
1961         /* If the target cpu hasn't set up any address spaces itself,
1962          * give it the default one.
1963          */
1964         cpu->num_ases = 1;
1965         cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
1966     }
1967
1968     if (kvm_enabled()) {
1969         qemu_kvm_start_vcpu(cpu);
1970     } else if (hax_enabled()) {
1971         qemu_hax_start_vcpu(cpu);
1972     } else if (hvf_enabled()) {
1973         qemu_hvf_start_vcpu(cpu);
1974     } else if (tcg_enabled()) {
1975         qemu_tcg_init_vcpu(cpu);
1976     } else if (whpx_enabled()) {
1977         qemu_whpx_start_vcpu(cpu);
1978     } else {
1979         qemu_dummy_start_vcpu(cpu);
1980     }
1981
1982     while (!cpu->created) {
1983         qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1984     }
1985 }
1986
1987 void cpu_stop_current(void)
1988 {
1989     if (current_cpu) {
1990         qemu_cpu_stop(current_cpu, true);
1991     }
1992 }
1993
1994 int vm_stop(RunState state)
1995 {
1996     if (qemu_in_vcpu_thread()) {
1997         qemu_system_vmstop_request_prepare();
1998         qemu_system_vmstop_request(state);
1999         /*
2000          * FIXME: should not return to device code in case
2001          * vm_stop() has been requested.
2002          */
2003         cpu_stop_current();
2004         return 0;
2005     }
2006
2007     return do_vm_stop(state, true);
2008 }
2009
2010 /**
2011  * Prepare for (re)starting the VM.
2012  * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2013  * running or in case of an error condition), 0 otherwise.
2014  */
2015 int vm_prepare_start(void)
2016 {
2017     RunState requested;
2018     int res = 0;
2019
2020     qemu_vmstop_requested(&requested);
2021     if (runstate_is_running() && requested == RUN_STATE__MAX) {
2022         return -1;
2023     }
2024
2025     /* Ensure that a STOP/RESUME pair of events is emitted if a
2026      * vmstop request was pending.  The BLOCK_IO_ERROR event, for
2027      * example, according to documentation is always followed by
2028      * the STOP event.
2029      */
2030     if (runstate_is_running()) {
2031         qapi_event_send_stop(&error_abort);
2032         res = -1;
2033     } else {
2034         replay_enable_events();
2035         cpu_enable_ticks();
2036         runstate_set(RUN_STATE_RUNNING);
2037         vm_state_notify(1, RUN_STATE_RUNNING);
2038     }
2039
2040     /* We are sending this now, but the CPUs will be resumed shortly later */
2041     qapi_event_send_resume(&error_abort);
2042     return res;
2043 }
2044
2045 void vm_start(void)
2046 {
2047     if (!vm_prepare_start()) {
2048         resume_all_vcpus();
2049     }
2050 }
2051
2052 /* does a state transition even if the VM is already stopped,
2053    current state is forgotten forever */
2054 int vm_stop_force_state(RunState state)
2055 {
2056     if (runstate_is_running()) {
2057         return vm_stop(state);
2058     } else {
2059         runstate_set(state);
2060
2061         bdrv_drain_all();
2062         /* Make sure to return an error if the flush in a previous vm_stop()
2063          * failed. */
2064         return bdrv_flush_all();
2065     }
2066 }
2067
2068 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
2069 {
2070     /* XXX: implement xxx_cpu_list for targets that still miss it */
2071 #if defined(cpu_list)
2072     cpu_list(f, cpu_fprintf);
2073 #endif
2074 }
2075
2076 CpuInfoList *qmp_query_cpus(Error **errp)
2077 {
2078     MachineState *ms = MACHINE(qdev_get_machine());
2079     MachineClass *mc = MACHINE_GET_CLASS(ms);
2080     CpuInfoList *head = NULL, *cur_item = NULL;
2081     CPUState *cpu;
2082
2083     CPU_FOREACH(cpu) {
2084         CpuInfoList *info;
2085 #if defined(TARGET_I386)
2086         X86CPU *x86_cpu = X86_CPU(cpu);
2087         CPUX86State *env = &x86_cpu->env;
2088 #elif defined(TARGET_PPC)
2089         PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2090         CPUPPCState *env = &ppc_cpu->env;
2091 #elif defined(TARGET_SPARC)
2092         SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2093         CPUSPARCState *env = &sparc_cpu->env;
2094 #elif defined(TARGET_RISCV)
2095         RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2096         CPURISCVState *env = &riscv_cpu->env;
2097 #elif defined(TARGET_MIPS)
2098         MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2099         CPUMIPSState *env = &mips_cpu->env;
2100 #elif defined(TARGET_TRICORE)
2101         TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2102         CPUTriCoreState *env = &tricore_cpu->env;
2103 #elif defined(TARGET_S390X)
2104         S390CPU *s390_cpu = S390_CPU(cpu);
2105         CPUS390XState *env = &s390_cpu->env;
2106 #endif
2107
2108         cpu_synchronize_state(cpu);
2109
2110         info = g_malloc0(sizeof(*info));
2111         info->value = g_malloc0(sizeof(*info->value));
2112         info->value->CPU = cpu->cpu_index;
2113         info->value->current = (cpu == first_cpu);
2114         info->value->halted = cpu->halted;
2115         info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2116         info->value->thread_id = cpu->thread_id;
2117 #if defined(TARGET_I386)
2118         info->value->arch = CPU_INFO_ARCH_X86;
2119         info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
2120 #elif defined(TARGET_PPC)
2121         info->value->arch = CPU_INFO_ARCH_PPC;
2122         info->value->u.ppc.nip = env->nip;
2123 #elif defined(TARGET_SPARC)
2124         info->value->arch = CPU_INFO_ARCH_SPARC;
2125         info->value->u.q_sparc.pc = env->pc;
2126         info->value->u.q_sparc.npc = env->npc;
2127 #elif defined(TARGET_MIPS)
2128         info->value->arch = CPU_INFO_ARCH_MIPS;
2129         info->value->u.q_mips.PC = env->active_tc.PC;
2130 #elif defined(TARGET_TRICORE)
2131         info->value->arch = CPU_INFO_ARCH_TRICORE;
2132         info->value->u.tricore.PC = env->PC;
2133 #elif defined(TARGET_S390X)
2134         info->value->arch = CPU_INFO_ARCH_S390;
2135         info->value->u.s390.cpu_state = env->cpu_state;
2136 #elif defined(TARGET_RISCV)
2137         info->value->arch = CPU_INFO_ARCH_RISCV;
2138         info->value->u.riscv.pc = env->pc;
2139 #else
2140         info->value->arch = CPU_INFO_ARCH_OTHER;
2141 #endif
2142         info->value->has_props = !!mc->cpu_index_to_instance_props;
2143         if (info->value->has_props) {
2144             CpuInstanceProperties *props;
2145             props = g_malloc0(sizeof(*props));
2146             *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2147             info->value->props = props;
2148         }
2149
2150         /* XXX: waiting for the qapi to support GSList */
2151         if (!cur_item) {
2152             head = cur_item = info;
2153         } else {
2154             cur_item->next = info;
2155             cur_item = info;
2156         }
2157     }
2158
2159     return head;
2160 }
2161
2162 /*
2163  * fast means: we NEVER interrupt vCPU threads to retrieve
2164  * information from KVM.
2165  */
2166 CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2167 {
2168     MachineState *ms = MACHINE(qdev_get_machine());
2169     MachineClass *mc = MACHINE_GET_CLASS(ms);
2170     CpuInfoFastList *head = NULL, *cur_item = NULL;
2171     CPUState *cpu;
2172 #if defined(TARGET_S390X)
2173     S390CPU *s390_cpu;
2174     CPUS390XState *env;
2175 #endif
2176
2177     CPU_FOREACH(cpu) {
2178         CpuInfoFastList *info = g_malloc0(sizeof(*info));
2179         info->value = g_malloc0(sizeof(*info->value));
2180
2181         info->value->cpu_index = cpu->cpu_index;
2182         info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2183         info->value->thread_id = cpu->thread_id;
2184
2185         info->value->has_props = !!mc->cpu_index_to_instance_props;
2186         if (info->value->has_props) {
2187             CpuInstanceProperties *props;
2188             props = g_malloc0(sizeof(*props));
2189             *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2190             info->value->props = props;
2191         }
2192
2193 #if defined(TARGET_S390X)
2194         s390_cpu = S390_CPU(cpu);
2195         env = &s390_cpu->env;
2196         info->value->arch = CPU_INFO_ARCH_S390;
2197         info->value->u.s390.cpu_state = env->cpu_state;
2198 #endif
2199         if (!cur_item) {
2200             head = cur_item = info;
2201         } else {
2202             cur_item->next = info;
2203             cur_item = info;
2204         }
2205     }
2206
2207     return head;
2208 }
2209
2210 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2211                  bool has_cpu, int64_t cpu_index, Error **errp)
2212 {
2213     FILE *f;
2214     uint32_t l;
2215     CPUState *cpu;
2216     uint8_t buf[1024];
2217     int64_t orig_addr = addr, orig_size = size;
2218
2219     if (!has_cpu) {
2220         cpu_index = 0;
2221     }
2222
2223     cpu = qemu_get_cpu(cpu_index);
2224     if (cpu == NULL) {
2225         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2226                    "a CPU number");
2227         return;
2228     }
2229
2230     f = fopen(filename, "wb");
2231     if (!f) {
2232         error_setg_file_open(errp, errno, filename);
2233         return;
2234     }
2235
2236     while (size != 0) {
2237         l = sizeof(buf);
2238         if (l > size)
2239             l = size;
2240         if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
2241             error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2242                              " specified", orig_addr, orig_size);
2243             goto exit;
2244         }
2245         if (fwrite(buf, 1, l, f) != l) {
2246             error_setg(errp, QERR_IO_ERROR);
2247             goto exit;
2248         }
2249         addr += l;
2250         size -= l;
2251     }
2252
2253 exit:
2254     fclose(f);
2255 }
2256
2257 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2258                   Error **errp)
2259 {
2260     FILE *f;
2261     uint32_t l;
2262     uint8_t buf[1024];
2263
2264     f = fopen(filename, "wb");
2265     if (!f) {
2266         error_setg_file_open(errp, errno, filename);
2267         return;
2268     }
2269
2270     while (size != 0) {
2271         l = sizeof(buf);
2272         if (l > size)
2273             l = size;
2274         cpu_physical_memory_read(addr, buf, l);
2275         if (fwrite(buf, 1, l, f) != l) {
2276             error_setg(errp, QERR_IO_ERROR);
2277             goto exit;
2278         }
2279         addr += l;
2280         size -= l;
2281     }
2282
2283 exit:
2284     fclose(f);
2285 }
2286
2287 void qmp_inject_nmi(Error **errp)
2288 {
2289     nmi_monitor_handle(monitor_get_cpu_index(), errp);
2290 }
2291
2292 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2293 {
2294     if (!use_icount) {
2295         return;
2296     }
2297
2298     cpu_fprintf(f, "Host - Guest clock  %"PRIi64" ms\n",
2299                 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2300     if (icount_align_option) {
2301         cpu_fprintf(f, "Max guest delay     %"PRIi64" ms\n", -max_delay/SCALE_MS);
2302         cpu_fprintf(f, "Max guest advance   %"PRIi64" ms\n", max_advance/SCALE_MS);
2303     } else {
2304         cpu_fprintf(f, "Max guest delay     NA\n");
2305         cpu_fprintf(f, "Max guest advance   NA\n");
2306     }
2307 }
This page took 0.141853 seconds and 4 git commands to generate.