*/
#include "qemu/osdep.h"
+#include "qemu-common.h"
#include "qemu/config-file.h"
#include "cpu.h"
#include "monitor/monitor.h"
#include "qapi/qapi-events-run-state.h"
#include "qapi/qmp/qerror.h"
#include "qemu/error-report.h"
+#include "qemu/qemu-print.h"
#include "sysemu/sysemu.h"
+#include "sysemu/tcg.h"
#include "sysemu/block-backend.h"
#include "exec/gdbstub.h"
#include "sysemu/dma.h"
#include "qemu/option.h"
#include "qemu/bitmap.h"
#include "qemu/seqlock.h"
+#include "qemu/guest-random.h"
#include "tcg.h"
#include "hw/nmi.h"
#include "sysemu/replay.h"
error_setg(errp, "No MTTCG when icount is enabled");
} else {
#ifndef TARGET_SUPPORTS_MTTCG
- error_report("Guest not yet converted to MTTCG - "
- "you may get unexpected results");
+ warn_report("Guest not yet converted to MTTCG - "
+ "you may get unexpected results");
#endif
if (!check_tcg_memory_orders_compatible()) {
- error_report("Guest expects a stronger memory ordering "
- "than the host provides");
+ warn_report("Guest expects a stronger memory ordering "
+ "than the host provides");
error_printf("This may cause strange/hard to debug errors\n");
}
mttcg_enabled = true;
*/
static int64_t cpu_get_icount_executed(CPUState *cpu)
{
- return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
+ return (cpu->icount_budget -
+ (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
}
/*
fprintf(stderr, "\n");
CPU_FOREACH(cpu) {
fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
- cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
+ cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
}
va_end(ap);
abort();
process_queued_cpu_work(cpu);
}
-static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
+static void qemu_tcg_rr_wait_io_event(void)
{
+ CPUState *cpu;
+
while (all_cpu_threads_idle()) {
stop_tcg_kick_timer();
- qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
}
start_tcg_kick_timer();
- qemu_wait_io_event_common(cpu);
+ CPU_FOREACH(cpu) {
+ qemu_wait_io_event_common(cpu);
+ }
}
static void qemu_wait_io_event(CPUState *cpu)
/* signal CPU creation */
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
/* signal CPU creation */
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
qemu_mutex_unlock_iothread();
qemu_wait_io_event(cpu);
} while (!cpu->unplug);
+ qemu_mutex_unlock_iothread();
rcu_unregister_thread();
return NULL;
#endif
* each vCPU execution. However u16.high can be raised
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
*/
- g_assert(cpu->icount_decr.u16.low == 0);
+ g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
cpu->icount_budget = tcg_get_icount_limit();
insns_left = MIN(0xffff, cpu->icount_budget);
- cpu->icount_decr.u16.low = insns_left;
+ cpu_neg(cpu)->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
replay_mutex_lock();
cpu_update_icount(cpu);
/* Reset the counters */
- cpu->icount_decr.u16.low = 0;
+ cpu_neg(cpu)->icount_decr.u16.low = 0;
cpu->icount_extra = 0;
cpu->icount_budget = 0;
cpu->created = true;
cpu->can_do_io = 1;
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
atomic_mb_set(&cpu->exit_request, 0);
}
- qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu);
+ if (use_icount && all_cpu_threads_idle()) {
+ /*
+ * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
+ * in the main_loop, wake it up in order to start the warp timer.
+ */
+ qemu_notify_event();
+ }
+
+ qemu_tcg_rr_wait_io_event();
deal_with_unplugged_cpus();
}
hax_init_vcpu(cpu);
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
/* signal CPU creation */
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
/* signal CPU creation */
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
if (cpu_can_run(cpu)) {
cpu->can_do_io = 1;
current_cpu = cpu;
qemu_cond_signal(&qemu_cpu_cond);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
/* process any pending work */
cpu->exit_request = 1;
}
cpu->thread_kicked = true;
err = pthread_kill(cpu->thread->thread, SIG_IPI);
- if (err) {
+ if (err && err != ESRCH) {
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
exit(1);
}
cpu->nr_cores = smp_cores;
cpu->nr_threads = smp_threads;
cpu->stopped = true;
+ cpu->random_seed = qemu_guest_random_seed_thread_part1();
if (!cpu->as) {
/* If the target cpu hasn't set up any address spaces itself,
void cpu_stop_current(void)
{
if (current_cpu) {
- qemu_cpu_stop(current_cpu, true);
+ current_cpu->stop = true;
+ cpu_exit(current_cpu);
}
}
}
}
-void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
+void list_cpus(const char *optarg)
{
/* XXX: implement xxx_cpu_list for targets that still miss it */
#if defined(cpu_list)
- cpu_list(f, cpu_fprintf);
+ cpu_list();
#endif
}
nmi_monitor_handle(monitor_get_cpu_index(), errp);
}
-void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
+void dump_drift_info(void)
{
if (!use_icount) {
return;
}
- cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
+ qemu_printf("Host - Guest clock %"PRIi64" ms\n",
(cpu_get_clock() - cpu_get_icount())/SCALE_MS);
if (icount_align_option) {
- cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
- cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
+ qemu_printf("Max guest delay %"PRIi64" ms\n",
+ -max_delay / SCALE_MS);
+ qemu_printf("Max guest advance %"PRIi64" ms\n",
+ max_advance / SCALE_MS);
} else {
- cpu_fprintf(f, "Max guest delay NA\n");
- cpu_fprintf(f, "Max guest advance NA\n");
+ qemu_printf("Max guest delay NA\n");
+ qemu_printf("Max guest advance NA\n");
}
}