From: Claudio Fontana Date: Mon, 29 Jun 2020 09:35:02 +0000 (+0200) Subject: softmmu: move softmmu only files from root X-Git-Url: https://repo.jachan.dev/qemu.git/commitdiff_plain/c7f419f584 softmmu: move softmmu only files from root move arch_init, balloon, cpus, ioport, memory, memory_mapping, qtest. They are all specific to CONFIG_SOFTMMU. Signed-off-by: Claudio Fontana Reviewed-by: Alex BennĂ©e Reviewed-by: Laurent Vivier Reviewed-by: Thomas Huth Message-Id: <20200629093504.3228-2-cfontana@suse.de> Signed-off-by: Paolo Bonzini --- diff --git a/MAINTAINERS b/MAINTAINERS index 51f6ddf814..28f33123ec 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -115,7 +115,7 @@ Overall TCG CPUs M: Richard Henderson R: Paolo Bonzini S: Maintained -F: cpus.c +F: softmmu/cpus.c F: cpus-common.c F: exec.c F: accel/tcg/ @@ -1724,7 +1724,7 @@ M: David Hildenbrand S: Maintained F: hw/virtio/virtio-balloon*.c F: include/hw/virtio/virtio-balloon.h -F: balloon.c +F: softmmu/balloon.c F: include/sysemu/balloon.h virtio-9p @@ -2203,12 +2203,12 @@ Memory API M: Paolo Bonzini S: Supported F: include/exec/ioport.h -F: ioport.c F: include/exec/memop.h F: include/exec/memory.h F: include/exec/ram_addr.h F: include/exec/ramblock.h -F: memory.c +F: softmmu/ioport.c +F: softmmu/memory.c F: include/exec/memory-internal.h F: exec.c F: scripts/coccinelle/memory-region-housekeeping.cocci @@ -2240,13 +2240,13 @@ F: ui/cocoa.m Main loop M: Paolo Bonzini S: Maintained -F: cpus.c F: include/qemu/main-loop.h F: include/sysemu/runstate.h F: util/main-loop.c F: util/qemu-timer.c F: softmmu/vl.c F: softmmu/main.c +F: softmmu/cpus.c F: qapi/run-state.json Human Monitor (HMP) @@ -2401,7 +2401,7 @@ M: Thomas Huth M: Laurent Vivier R: Paolo Bonzini S: Maintained -F: qtest.c +F: softmmu/qtest.c F: accel/qtest.c F: tests/qtest/ X: tests/qtest/bios-tables-test-allowed-diff.h diff --git a/Makefile.target b/Makefile.target index 02bd9d7117..ffa2657269 100644 --- a/Makefile.target +++ b/Makefile.target @@ -152,16 +152,13 @@ endif #CONFIG_BSD_USER ######################################################### # System emulator target ifdef CONFIG_SOFTMMU -obj-y += arch_init.o cpus.o gdbstub.o balloon.o ioport.o -obj-y += qtest.o +obj-y += softmmu/ +obj-y += gdbstub.o obj-y += dump/ obj-y += hw/ obj-y += monitor/ obj-y += qapi/ -obj-y += memory.o -obj-y += memory_mapping.o obj-y += migration/ram.o -obj-y += softmmu/ LIBS := $(libs_softmmu) $(LIBS) # Hardware support diff --git a/arch_init.c b/arch_init.c deleted file mode 100644 index 8afea4748b..0000000000 --- a/arch_init.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * QEMU System Emulator - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "sysemu/sysemu.h" -#include "sysemu/arch_init.h" -#include "hw/pci/pci.h" -#include "hw/audio/soundhw.h" -#include "qapi/error.h" -#include "qemu/config-file.h" -#include "qemu/error-report.h" -#include "hw/acpi/acpi.h" -#include "qemu/help_option.h" - -#ifdef TARGET_SPARC -int graphic_width = 1024; -int graphic_height = 768; -int graphic_depth = 8; -#elif defined(TARGET_M68K) -int graphic_width = 800; -int graphic_height = 600; -int graphic_depth = 8; -#else -int graphic_width = 800; -int graphic_height = 600; -int graphic_depth = 32; -#endif - - -#if defined(TARGET_ALPHA) -#define QEMU_ARCH QEMU_ARCH_ALPHA -#elif defined(TARGET_ARM) -#define QEMU_ARCH QEMU_ARCH_ARM -#elif defined(TARGET_CRIS) -#define QEMU_ARCH QEMU_ARCH_CRIS -#elif defined(TARGET_HPPA) -#define QEMU_ARCH QEMU_ARCH_HPPA -#elif defined(TARGET_I386) -#define QEMU_ARCH QEMU_ARCH_I386 -#elif defined(TARGET_LM32) -#define QEMU_ARCH QEMU_ARCH_LM32 -#elif defined(TARGET_M68K) -#define QEMU_ARCH QEMU_ARCH_M68K -#elif defined(TARGET_MICROBLAZE) -#define QEMU_ARCH QEMU_ARCH_MICROBLAZE -#elif defined(TARGET_MIPS) -#define QEMU_ARCH QEMU_ARCH_MIPS -#elif defined(TARGET_MOXIE) -#define QEMU_ARCH QEMU_ARCH_MOXIE -#elif defined(TARGET_NIOS2) -#define QEMU_ARCH QEMU_ARCH_NIOS2 -#elif defined(TARGET_OPENRISC) -#define QEMU_ARCH QEMU_ARCH_OPENRISC -#elif defined(TARGET_PPC) -#define QEMU_ARCH QEMU_ARCH_PPC -#elif defined(TARGET_RISCV) -#define QEMU_ARCH QEMU_ARCH_RISCV -#elif defined(TARGET_RX) -#define QEMU_ARCH QEMU_ARCH_RX -#elif defined(TARGET_S390X) -#define QEMU_ARCH QEMU_ARCH_S390X -#elif defined(TARGET_SH4) -#define QEMU_ARCH QEMU_ARCH_SH4 -#elif defined(TARGET_SPARC) -#define QEMU_ARCH QEMU_ARCH_SPARC -#elif defined(TARGET_TRICORE) -#define QEMU_ARCH QEMU_ARCH_TRICORE -#elif defined(TARGET_UNICORE32) -#define QEMU_ARCH QEMU_ARCH_UNICORE32 -#elif defined(TARGET_XTENSA) -#define QEMU_ARCH QEMU_ARCH_XTENSA -#endif - -const uint32_t arch_type = QEMU_ARCH; - -int kvm_available(void) -{ -#ifdef CONFIG_KVM - return 1; -#else - return 0; -#endif -} - -int xen_available(void) -{ -#ifdef CONFIG_XEN - return 1; -#else - return 0; -#endif -} diff --git a/balloon.c b/balloon.c deleted file mode 100644 index 354408c6ea..0000000000 --- a/balloon.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Generic Balloon handlers and management - * - * Copyright (c) 2003-2008 Fabrice Bellard - * Copyright (C) 2011 Red Hat, Inc. - * Copyright (C) 2011 Amit Shah - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/atomic.h" -#include "sysemu/kvm.h" -#include "sysemu/balloon.h" -#include "trace-root.h" -#include "qapi/error.h" -#include "qapi/qapi-commands-misc.h" -#include "qapi/qmp/qerror.h" - -static QEMUBalloonEvent *balloon_event_fn; -static QEMUBalloonStatus *balloon_stat_fn; -static void *balloon_opaque; - -static bool have_balloon(Error **errp) -{ - if (kvm_enabled() && !kvm_has_sync_mmu()) { - error_set(errp, ERROR_CLASS_KVM_MISSING_CAP, - "Using KVM without synchronous MMU, balloon unavailable"); - return false; - } - if (!balloon_event_fn) { - error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, - "No balloon device has been activated"); - return false; - } - return true; -} - -int qemu_add_balloon_handler(QEMUBalloonEvent *event_func, - QEMUBalloonStatus *stat_func, void *opaque) -{ - if (balloon_event_fn || balloon_stat_fn || balloon_opaque) { - /* We're already registered one balloon handler. How many can - * a guest really have? - */ - return -1; - } - balloon_event_fn = event_func; - balloon_stat_fn = stat_func; - balloon_opaque = opaque; - return 0; -} - -void qemu_remove_balloon_handler(void *opaque) -{ - if (balloon_opaque != opaque) { - return; - } - balloon_event_fn = NULL; - balloon_stat_fn = NULL; - balloon_opaque = NULL; -} - -BalloonInfo *qmp_query_balloon(Error **errp) -{ - BalloonInfo *info; - - if (!have_balloon(errp)) { - return NULL; - } - - info = g_malloc0(sizeof(*info)); - balloon_stat_fn(balloon_opaque, info); - return info; -} - -void qmp_balloon(int64_t target, Error **errp) -{ - if (!have_balloon(errp)) { - return; - } - - if (target <= 0) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "target", "a size"); - return; - } - - trace_balloon_event(balloon_opaque, target); - balloon_event_fn(balloon_opaque, target); -} diff --git a/cpus.c b/cpus.c deleted file mode 100644 index d94456ed29..0000000000 --- a/cpus.c +++ /dev/null @@ -1,2317 +0,0 @@ -/* - * QEMU System Emulator - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu-common.h" -#include "qemu/config-file.h" -#include "qemu/cutils.h" -#include "migration/vmstate.h" -#include "monitor/monitor.h" -#include "qapi/error.h" -#include "qapi/qapi-commands-misc.h" -#include "qapi/qapi-events-run-state.h" -#include "qapi/qmp/qerror.h" -#include "qemu/error-report.h" -#include "qemu/qemu-print.h" -#include "sysemu/tcg.h" -#include "sysemu/block-backend.h" -#include "exec/gdbstub.h" -#include "sysemu/dma.h" -#include "sysemu/hw_accel.h" -#include "sysemu/kvm.h" -#include "sysemu/hax.h" -#include "sysemu/hvf.h" -#include "sysemu/whpx.h" -#include "exec/exec-all.h" - -#include "qemu/thread.h" -#include "qemu/plugin.h" -#include "sysemu/cpus.h" -#include "sysemu/qtest.h" -#include "qemu/main-loop.h" -#include "qemu/option.h" -#include "qemu/bitmap.h" -#include "qemu/seqlock.h" -#include "qemu/guest-random.h" -#include "tcg/tcg.h" -#include "hw/nmi.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" -#include "hw/boards.h" -#include "hw/hw.h" - -#ifdef CONFIG_LINUX - -#include - -#ifndef PR_MCE_KILL -#define PR_MCE_KILL 33 -#endif - -#ifndef PR_MCE_KILL_SET -#define PR_MCE_KILL_SET 1 -#endif - -#ifndef PR_MCE_KILL_EARLY -#define PR_MCE_KILL_EARLY 1 -#endif - -#endif /* CONFIG_LINUX */ - -static QemuMutex qemu_global_mutex; - -int64_t max_delay; -int64_t max_advance; - -/* vcpu throttling controls */ -static QEMUTimer *throttle_timer; -static unsigned int throttle_percentage; - -#define CPU_THROTTLE_PCT_MIN 1 -#define CPU_THROTTLE_PCT_MAX 99 -#define CPU_THROTTLE_TIMESLICE_NS 10000000 - -bool cpu_is_stopped(CPUState *cpu) -{ - return cpu->stopped || !runstate_is_running(); -} - -static inline bool cpu_work_list_empty(CPUState *cpu) -{ - bool ret; - - qemu_mutex_lock(&cpu->work_mutex); - ret = QSIMPLEQ_EMPTY(&cpu->work_list); - qemu_mutex_unlock(&cpu->work_mutex); - return ret; -} - -static bool cpu_thread_is_idle(CPUState *cpu) -{ - if (cpu->stop || !cpu_work_list_empty(cpu)) { - return false; - } - if (cpu_is_stopped(cpu)) { - return true; - } - if (!cpu->halted || cpu_has_work(cpu) || - kvm_halt_in_kernel()) { - return false; - } - return true; -} - -static bool all_cpu_threads_idle(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - if (!cpu_thread_is_idle(cpu)) { - return false; - } - } - return true; -} - -/***********************************************************/ -/* guest cycle counter */ - -/* Protected by TimersState seqlock */ - -static bool icount_sleep = true; -/* Arbitrarily pick 1MIPS as the minimum allowable speed. */ -#define MAX_ICOUNT_SHIFT 10 - -typedef struct TimersState { - /* Protected by BQL. */ - int64_t cpu_ticks_prev; - int64_t cpu_ticks_offset; - - /* Protect fields that can be respectively read outside the - * BQL, and written from multiple threads. - */ - QemuSeqLock vm_clock_seqlock; - QemuSpin vm_clock_lock; - - int16_t cpu_ticks_enabled; - - /* Conversion factor from emulated instructions to virtual clock ticks. */ - int16_t icount_time_shift; - - /* Compensate for varying guest execution speed. */ - int64_t qemu_icount_bias; - - int64_t vm_clock_warp_start; - int64_t cpu_clock_offset; - - /* Only written by TCG thread */ - int64_t qemu_icount; - - /* for adjusting icount */ - QEMUTimer *icount_rt_timer; - QEMUTimer *icount_vm_timer; - QEMUTimer *icount_warp_timer; -} TimersState; - -static TimersState timers_state; -bool mttcg_enabled; - - -/* The current number of executed instructions is based on what we - * originally budgeted minus the current state of the decrementing - * icount counters in extra/u16.low. - */ -static int64_t cpu_get_icount_executed(CPUState *cpu) -{ - return (cpu->icount_budget - - (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra)); -} - -/* - * Update the global shared timer_state.qemu_icount to take into - * account executed instructions. This is done by the TCG vCPU - * thread so the main-loop can see time has moved forward. - */ -static void cpu_update_icount_locked(CPUState *cpu) -{ - int64_t executed = cpu_get_icount_executed(cpu); - cpu->icount_budget -= executed; - - atomic_set_i64(&timers_state.qemu_icount, - timers_state.qemu_icount + executed); -} - -/* - * Update the global shared timer_state.qemu_icount to take into - * account executed instructions. This is done by the TCG vCPU - * thread so the main-loop can see time has moved forward. - */ -void cpu_update_icount(CPUState *cpu) -{ - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - cpu_update_icount_locked(cpu); - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); -} - -static int64_t cpu_get_icount_raw_locked(void) -{ - CPUState *cpu = current_cpu; - - if (cpu && cpu->running) { - if (!cpu->can_do_io) { - error_report("Bad icount read"); - exit(1); - } - /* Take into account what has run */ - cpu_update_icount_locked(cpu); - } - /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ - return atomic_read_i64(&timers_state.qemu_icount); -} - -static int64_t cpu_get_icount_locked(void) -{ - int64_t icount = cpu_get_icount_raw_locked(); - return atomic_read_i64(&timers_state.qemu_icount_bias) + - cpu_icount_to_ns(icount); -} - -int64_t cpu_get_icount_raw(void) -{ - int64_t icount; - unsigned start; - - do { - start = seqlock_read_begin(&timers_state.vm_clock_seqlock); - icount = cpu_get_icount_raw_locked(); - } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); - - return icount; -} - -/* Return the virtual CPU time, based on the instruction counter. */ -int64_t cpu_get_icount(void) -{ - int64_t icount; - unsigned start; - - do { - start = seqlock_read_begin(&timers_state.vm_clock_seqlock); - icount = cpu_get_icount_locked(); - } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); - - return icount; -} - -int64_t cpu_icount_to_ns(int64_t icount) -{ - return icount << atomic_read(&timers_state.icount_time_shift); -} - -static int64_t cpu_get_ticks_locked(void) -{ - int64_t ticks = timers_state.cpu_ticks_offset; - if (timers_state.cpu_ticks_enabled) { - ticks += cpu_get_host_ticks(); - } - - if (timers_state.cpu_ticks_prev > ticks) { - /* Non increasing ticks may happen if the host uses software suspend. */ - timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; - ticks = timers_state.cpu_ticks_prev; - } - - timers_state.cpu_ticks_prev = ticks; - return ticks; -} - -/* return the time elapsed in VM between vm_start and vm_stop. Unless - * icount is active, cpu_get_ticks() uses units of the host CPU cycle - * counter. - */ -int64_t cpu_get_ticks(void) -{ - int64_t ticks; - - if (use_icount) { - return cpu_get_icount(); - } - - qemu_spin_lock(&timers_state.vm_clock_lock); - ticks = cpu_get_ticks_locked(); - qemu_spin_unlock(&timers_state.vm_clock_lock); - return ticks; -} - -static int64_t cpu_get_clock_locked(void) -{ - int64_t time; - - time = timers_state.cpu_clock_offset; - if (timers_state.cpu_ticks_enabled) { - time += get_clock(); - } - - return time; -} - -/* Return the monotonic time elapsed in VM, i.e., - * the time between vm_start and vm_stop - */ -int64_t cpu_get_clock(void) -{ - int64_t ti; - unsigned start; - - do { - start = seqlock_read_begin(&timers_state.vm_clock_seqlock); - ti = cpu_get_clock_locked(); - } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); - - return ti; -} - -/* enable cpu_get_ticks() - * Caller must hold BQL which serves as mutex for vm_clock_seqlock. - */ -void cpu_enable_ticks(void) -{ - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - if (!timers_state.cpu_ticks_enabled) { - timers_state.cpu_ticks_offset -= cpu_get_host_ticks(); - timers_state.cpu_clock_offset -= get_clock(); - timers_state.cpu_ticks_enabled = 1; - } - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); -} - -/* disable cpu_get_ticks() : the clock is stopped. You must not call - * cpu_get_ticks() after that. - * Caller must hold BQL which serves as mutex for vm_clock_seqlock. - */ -void cpu_disable_ticks(void) -{ - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - if (timers_state.cpu_ticks_enabled) { - timers_state.cpu_ticks_offset += cpu_get_host_ticks(); - timers_state.cpu_clock_offset = cpu_get_clock_locked(); - timers_state.cpu_ticks_enabled = 0; - } - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); -} - -/* Correlation between real and virtual time is always going to be - fairly approximate, so ignore small variation. - When the guest is idle real and virtual time will be aligned in - the IO wait loop. */ -#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10) - -static void icount_adjust(void) -{ - int64_t cur_time; - int64_t cur_icount; - int64_t delta; - - /* Protected by TimersState mutex. */ - static int64_t last_delta; - - /* If the VM is not running, then do nothing. */ - if (!runstate_is_running()) { - return; - } - - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT, - cpu_get_clock_locked()); - cur_icount = cpu_get_icount_locked(); - - delta = cur_icount - cur_time; - /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ - if (delta > 0 - && last_delta + ICOUNT_WOBBLE < delta * 2 - && timers_state.icount_time_shift > 0) { - /* The guest is getting too far ahead. Slow time down. */ - atomic_set(&timers_state.icount_time_shift, - timers_state.icount_time_shift - 1); - } - if (delta < 0 - && last_delta - ICOUNT_WOBBLE > delta * 2 - && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) { - /* The guest is getting too far behind. Speed time up. */ - atomic_set(&timers_state.icount_time_shift, - timers_state.icount_time_shift + 1); - } - last_delta = delta; - atomic_set_i64(&timers_state.qemu_icount_bias, - cur_icount - (timers_state.qemu_icount - << timers_state.icount_time_shift)); - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); -} - -static void icount_adjust_rt(void *opaque) -{ - timer_mod(timers_state.icount_rt_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); - icount_adjust(); -} - -static void icount_adjust_vm(void *opaque) -{ - timer_mod(timers_state.icount_vm_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - NANOSECONDS_PER_SECOND / 10); - icount_adjust(); -} - -static int64_t qemu_icount_round(int64_t count) -{ - int shift = atomic_read(&timers_state.icount_time_shift); - return (count + (1 << shift) - 1) >> shift; -} - -static void icount_warp_rt(void) -{ - unsigned seq; - int64_t warp_start; - - /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start - * changes from -1 to another value, so the race here is okay. - */ - do { - seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); - warp_start = timers_state.vm_clock_warp_start; - } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); - - if (warp_start == -1) { - return; - } - - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - if (runstate_is_running()) { - int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT, - cpu_get_clock_locked()); - int64_t warp_delta; - - warp_delta = clock - timers_state.vm_clock_warp_start; - if (use_icount == 2) { - /* - * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too - * far ahead of real time. - */ - int64_t cur_icount = cpu_get_icount_locked(); - int64_t delta = clock - cur_icount; - warp_delta = MIN(warp_delta, delta); - } - atomic_set_i64(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + warp_delta); - } - timers_state.vm_clock_warp_start = -1; - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - - if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); - } -} - -static void icount_timer_cb(void *opaque) -{ - /* No need for a checkpoint because the timer already synchronizes - * with CHECKPOINT_CLOCK_VIRTUAL_RT. - */ - icount_warp_rt(); -} - -void qtest_clock_warp(int64_t dest) -{ - int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - AioContext *aio_context; - assert(qtest_enabled()); - aio_context = qemu_get_aio_context(); - while (clock < dest) { - int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, - QEMU_TIMER_ATTR_ALL); - int64_t warp = qemu_soonest_timeout(dest - clock, deadline); - - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - atomic_set_i64(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + warp); - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - - qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); - timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]); - clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - } - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); -} - -void qemu_start_warp_timer(void) -{ - int64_t clock; - int64_t deadline; - - if (!use_icount) { - return; - } - - /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers - * do not fire, so computing the deadline does not make sense. - */ - if (!runstate_is_running()) { - return; - } - - if (replay_mode != REPLAY_MODE_PLAY) { - if (!all_cpu_threads_idle()) { - return; - } - - if (qtest_enabled()) { - /* When testing, qtest commands advance icount. */ - return; - } - - replay_checkpoint(CHECKPOINT_CLOCK_WARP_START); - } else { - /* warp clock deterministically in record/replay mode */ - if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { - /* vCPU is sleeping and warp can't be started. - It is probably a race condition: notification sent - to vCPU was processed in advance and vCPU went to sleep. - Therefore we have to wake it up for doing someting. */ - if (replay_has_checkpoint()) { - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); - } - return; - } - } - - /* We want to use the earliest deadline from ALL vm_clocks */ - clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); - deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, - ~QEMU_TIMER_ATTR_EXTERNAL); - if (deadline < 0) { - static bool notified; - if (!icount_sleep && !notified) { - warn_report("icount sleep disabled and no active timers"); - notified = true; - } - return; - } - - if (deadline > 0) { - /* - * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to - * sleep. Otherwise, the CPU might be waiting for a future timer - * interrupt to wake it up, but the interrupt never comes because - * the vCPU isn't running any insns and thus doesn't advance the - * QEMU_CLOCK_VIRTUAL. - */ - if (!icount_sleep) { - /* - * We never let VCPUs sleep in no sleep icount mode. - * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance - * to the next QEMU_CLOCK_VIRTUAL event and notify it. - * It is useful when we want a deterministic execution time, - * isolated from host latencies. - */ - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - atomic_set_i64(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + deadline); - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); - } else { - /* - * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some - * "real" time, (related to the time left until the next event) has - * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this. - * This avoids that the warps are visible externally; for example, - * you will not be sending network packets continuously instead of - * every 100ms. - */ - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - if (timers_state.vm_clock_warp_start == -1 - || timers_state.vm_clock_warp_start > clock) { - timers_state.vm_clock_warp_start = clock; - } - seqlock_write_unlock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); - timer_mod_anticipate(timers_state.icount_warp_timer, - clock + deadline); - } - } else if (deadline == 0) { - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); - } -} - -static void qemu_account_warp_timer(void) -{ - if (!use_icount || !icount_sleep) { - return; - } - - /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers - * do not fire, so computing the deadline does not make sense. - */ - if (!runstate_is_running()) { - return; - } - - /* warp clock deterministically in record/replay mode */ - if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) { - return; - } - - timer_del(timers_state.icount_warp_timer); - icount_warp_rt(); -} - -static bool icount_state_needed(void *opaque) -{ - return use_icount; -} - -static bool warp_timer_state_needed(void *opaque) -{ - TimersState *s = opaque; - return s->icount_warp_timer != NULL; -} - -static bool adjust_timers_state_needed(void *opaque) -{ - TimersState *s = opaque; - return s->icount_rt_timer != NULL; -} - -static bool shift_state_needed(void *opaque) -{ - return use_icount == 2; -} - -/* - * Subsection for warp timer migration is optional, because may not be created - */ -static const VMStateDescription icount_vmstate_warp_timer = { - .name = "timer/icount/warp_timer", - .version_id = 1, - .minimum_version_id = 1, - .needed = warp_timer_state_needed, - .fields = (VMStateField[]) { - VMSTATE_INT64(vm_clock_warp_start, TimersState), - VMSTATE_TIMER_PTR(icount_warp_timer, TimersState), - VMSTATE_END_OF_LIST() - } -}; - -static const VMStateDescription icount_vmstate_adjust_timers = { - .name = "timer/icount/timers", - .version_id = 1, - .minimum_version_id = 1, - .needed = adjust_timers_state_needed, - .fields = (VMStateField[]) { - VMSTATE_TIMER_PTR(icount_rt_timer, TimersState), - VMSTATE_TIMER_PTR(icount_vm_timer, TimersState), - VMSTATE_END_OF_LIST() - } -}; - -static const VMStateDescription icount_vmstate_shift = { - .name = "timer/icount/shift", - .version_id = 1, - .minimum_version_id = 1, - .needed = shift_state_needed, - .fields = (VMStateField[]) { - VMSTATE_INT16(icount_time_shift, TimersState), - VMSTATE_END_OF_LIST() - } -}; - -/* - * This is a subsection for icount migration. - */ -static const VMStateDescription icount_vmstate_timers = { - .name = "timer/icount", - .version_id = 1, - .minimum_version_id = 1, - .needed = icount_state_needed, - .fields = (VMStateField[]) { - VMSTATE_INT64(qemu_icount_bias, TimersState), - VMSTATE_INT64(qemu_icount, TimersState), - VMSTATE_END_OF_LIST() - }, - .subsections = (const VMStateDescription*[]) { - &icount_vmstate_warp_timer, - &icount_vmstate_adjust_timers, - &icount_vmstate_shift, - NULL - } -}; - -static const VMStateDescription vmstate_timers = { - .name = "timer", - .version_id = 2, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_INT64(cpu_ticks_offset, TimersState), - VMSTATE_UNUSED(8), - VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), - VMSTATE_END_OF_LIST() - }, - .subsections = (const VMStateDescription*[]) { - &icount_vmstate_timers, - NULL - } -}; - -static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) -{ - double pct; - double throttle_ratio; - int64_t sleeptime_ns, endtime_ns; - - if (!cpu_throttle_get_percentage()) { - return; - } - - pct = (double)cpu_throttle_get_percentage()/100; - throttle_ratio = pct / (1 - pct); - /* Add 1ns to fix double's rounding error (like 0.9999999...) */ - sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1); - endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; - while (sleeptime_ns > 0 && !cpu->stop) { - if (sleeptime_ns > SCALE_MS) { - qemu_cond_timedwait(cpu->halt_cond, &qemu_global_mutex, - sleeptime_ns / SCALE_MS); - } else { - qemu_mutex_unlock_iothread(); - g_usleep(sleeptime_ns / SCALE_US); - qemu_mutex_lock_iothread(); - } - sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - } - atomic_set(&cpu->throttle_thread_scheduled, 0); -} - -static void cpu_throttle_timer_tick(void *opaque) -{ - CPUState *cpu; - double pct; - - /* Stop the timer if needed */ - if (!cpu_throttle_get_percentage()) { - return; - } - CPU_FOREACH(cpu) { - if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { - async_run_on_cpu(cpu, cpu_throttle_thread, - RUN_ON_CPU_NULL); - } - } - - pct = (double)cpu_throttle_get_percentage()/100; - timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + - CPU_THROTTLE_TIMESLICE_NS / (1-pct)); -} - -void cpu_throttle_set(int new_throttle_pct) -{ - /* Ensure throttle percentage is within valid range */ - new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); - new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); - - atomic_set(&throttle_percentage, new_throttle_pct); - - timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + - CPU_THROTTLE_TIMESLICE_NS); -} - -void cpu_throttle_stop(void) -{ - atomic_set(&throttle_percentage, 0); -} - -bool cpu_throttle_active(void) -{ - return (cpu_throttle_get_percentage() != 0); -} - -int cpu_throttle_get_percentage(void) -{ - return atomic_read(&throttle_percentage); -} - -void cpu_ticks_init(void) -{ - seqlock_init(&timers_state.vm_clock_seqlock); - qemu_spin_init(&timers_state.vm_clock_lock); - vmstate_register(NULL, 0, &vmstate_timers, &timers_state); - throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, - cpu_throttle_timer_tick, NULL); -} - -void configure_icount(QemuOpts *opts, Error **errp) -{ - const char *option = qemu_opt_get(opts, "shift"); - bool sleep = qemu_opt_get_bool(opts, "sleep", true); - bool align = qemu_opt_get_bool(opts, "align", false); - long time_shift = -1; - - if (!option) { - if (qemu_opt_get(opts, "align") != NULL) { - error_setg(errp, "Please specify shift option when using align"); - } - return; - } - - if (align && !sleep) { - error_setg(errp, "align=on and sleep=off are incompatible"); - return; - } - - if (strcmp(option, "auto") != 0) { - if (qemu_strtol(option, NULL, 0, &time_shift) < 0 - || time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) { - error_setg(errp, "icount: Invalid shift value"); - return; - } - } else if (icount_align_option) { - error_setg(errp, "shift=auto and align=on are incompatible"); - return; - } else if (!icount_sleep) { - error_setg(errp, "shift=auto and sleep=off are incompatible"); - return; - } - - icount_sleep = sleep; - if (icount_sleep) { - timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, - icount_timer_cb, NULL); - } - - icount_align_option = align; - - if (time_shift >= 0) { - timers_state.icount_time_shift = time_shift; - use_icount = 1; - return; - } - - use_icount = 2; - - /* 125MIPS seems a reasonable initial guess at the guest speed. - It will be corrected fairly quickly anyway. */ - timers_state.icount_time_shift = 3; - - /* Have both realtime and virtual time triggers for speed adjustment. - The realtime trigger catches emulated time passing too slowly, - the virtual time trigger catches emulated time passing too fast. - Realtime triggers occur even when idle, so use them less frequently - than VM triggers. */ - timers_state.vm_clock_warp_start = -1; - timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, - icount_adjust_rt, NULL); - timer_mod(timers_state.icount_rt_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); - timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - icount_adjust_vm, NULL); - timer_mod(timers_state.icount_vm_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - NANOSECONDS_PER_SECOND / 10); -} - -/***********************************************************/ -/* TCG vCPU kick timer - * - * The kick timer is responsible for moving single threaded vCPU - * emulation on to the next vCPU. If more than one vCPU is running a - * timer event with force a cpu->exit so the next vCPU can get - * scheduled. - * - * The timer is removed if all vCPUs are idle and restarted again once - * idleness is complete. - */ - -static QEMUTimer *tcg_kick_vcpu_timer; -static CPUState *tcg_current_rr_cpu; - -#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) - -static inline int64_t qemu_tcg_next_kick(void) -{ - return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; -} - -/* Kick the currently round-robin scheduled vCPU to next */ -static void qemu_cpu_kick_rr_next_cpu(void) -{ - CPUState *cpu; - do { - cpu = atomic_mb_read(&tcg_current_rr_cpu); - if (cpu) { - cpu_exit(cpu); - } - } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); -} - -/* Kick all RR vCPUs */ -static void qemu_cpu_kick_rr_cpus(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - cpu_exit(cpu); - }; -} - -static void do_nothing(CPUState *cpu, run_on_cpu_data unused) -{ -} - -void qemu_timer_notify_cb(void *opaque, QEMUClockType type) -{ - if (!use_icount || type != QEMU_CLOCK_VIRTUAL) { - qemu_notify_event(); - return; - } - - if (qemu_in_vcpu_thread()) { - /* A CPU is currently running; kick it back out to the - * tcg_cpu_exec() loop so it will recalculate its - * icount deadline immediately. - */ - qemu_cpu_kick(current_cpu); - } else if (first_cpu) { - /* qemu_cpu_kick is not enough to kick a halted CPU out of - * qemu_tcg_wait_io_event. async_run_on_cpu, instead, - * causes cpu_thread_is_idle to return false. This way, - * handle_icount_deadline can run. - * If we have no CPUs at all for some reason, we don't - * need to do anything. - */ - async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL); - } -} - -static void kick_tcg_thread(void *opaque) -{ - timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); - qemu_cpu_kick_rr_next_cpu(); -} - -static void start_tcg_kick_timer(void) -{ - assert(!mttcg_enabled); - if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { - tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - kick_tcg_thread, NULL); - } - if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) { - timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); - } -} - -static void stop_tcg_kick_timer(void) -{ - assert(!mttcg_enabled); - if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) { - timer_del(tcg_kick_vcpu_timer); - } -} - -/***********************************************************/ -void hw_error(const char *fmt, ...) -{ - va_list ap; - CPUState *cpu; - - va_start(ap, fmt); - fprintf(stderr, "qemu: hardware error: "); - vfprintf(stderr, fmt, ap); - fprintf(stderr, "\n"); - CPU_FOREACH(cpu) { - fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); - cpu_dump_state(cpu, stderr, CPU_DUMP_FPU); - } - va_end(ap); - abort(); -} - -void cpu_synchronize_all_states(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - cpu_synchronize_state(cpu); - } -} - -void cpu_synchronize_all_post_reset(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - cpu_synchronize_post_reset(cpu); - } -} - -void cpu_synchronize_all_post_init(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - cpu_synchronize_post_init(cpu); - } -} - -void cpu_synchronize_all_pre_loadvm(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - cpu_synchronize_pre_loadvm(cpu); - } -} - -static int do_vm_stop(RunState state, bool send_stop) -{ - int ret = 0; - - if (runstate_is_running()) { - runstate_set(state); - cpu_disable_ticks(); - pause_all_vcpus(); - vm_state_notify(0, state); - if (send_stop) { - qapi_event_send_stop(); - } - } - - bdrv_drain_all(); - ret = bdrv_flush_all(); - - return ret; -} - -/* Special vm_stop() variant for terminating the process. Historically clients - * did not expect a QMP STOP event and so we need to retain compatibility. - */ -int vm_shutdown(void) -{ - return do_vm_stop(RUN_STATE_SHUTDOWN, false); -} - -static bool cpu_can_run(CPUState *cpu) -{ - if (cpu->stop) { - return false; - } - if (cpu_is_stopped(cpu)) { - return false; - } - return true; -} - -static void cpu_handle_guest_debug(CPUState *cpu) -{ - gdb_set_stop_cpu(cpu); - qemu_system_debug_request(); - cpu->stopped = true; -} - -#ifdef CONFIG_LINUX -static void sigbus_reraise(void) -{ - sigset_t set; - struct sigaction action; - - memset(&action, 0, sizeof(action)); - action.sa_handler = SIG_DFL; - if (!sigaction(SIGBUS, &action, NULL)) { - raise(SIGBUS); - sigemptyset(&set); - sigaddset(&set, SIGBUS); - pthread_sigmask(SIG_UNBLOCK, &set, NULL); - } - perror("Failed to re-raise SIGBUS!\n"); - abort(); -} - -static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx) -{ - if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) { - sigbus_reraise(); - } - - if (current_cpu) { - /* Called asynchronously in VCPU thread. */ - if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) { - sigbus_reraise(); - } - } else { - /* Called synchronously (via signalfd) in main thread. */ - if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) { - sigbus_reraise(); - } - } -} - -static void qemu_init_sigbus(void) -{ - struct sigaction action; - - memset(&action, 0, sizeof(action)); - action.sa_flags = SA_SIGINFO; - action.sa_sigaction = sigbus_handler; - sigaction(SIGBUS, &action, NULL); - - prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); -} -#else /* !CONFIG_LINUX */ -static void qemu_init_sigbus(void) -{ -} -#endif /* !CONFIG_LINUX */ - -static QemuThread io_thread; - -/* cpu creation */ -static QemuCond qemu_cpu_cond; -/* system init */ -static QemuCond qemu_pause_cond; - -void qemu_init_cpu_loop(void) -{ - qemu_init_sigbus(); - qemu_cond_init(&qemu_cpu_cond); - qemu_cond_init(&qemu_pause_cond); - qemu_mutex_init(&qemu_global_mutex); - - qemu_thread_get_self(&io_thread); -} - -void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) -{ - do_run_on_cpu(cpu, func, data, &qemu_global_mutex); -} - -static void qemu_kvm_destroy_vcpu(CPUState *cpu) -{ - if (kvm_destroy_vcpu(cpu) < 0) { - error_report("kvm_destroy_vcpu failed"); - exit(EXIT_FAILURE); - } -} - -static void qemu_tcg_destroy_vcpu(CPUState *cpu) -{ -} - -static void qemu_cpu_stop(CPUState *cpu, bool exit) -{ - g_assert(qemu_cpu_is_self(cpu)); - cpu->stop = false; - cpu->stopped = true; - if (exit) { - cpu_exit(cpu); - } - qemu_cond_broadcast(&qemu_pause_cond); -} - -static void qemu_wait_io_event_common(CPUState *cpu) -{ - atomic_mb_set(&cpu->thread_kicked, false); - if (cpu->stop) { - qemu_cpu_stop(cpu, false); - } - process_queued_cpu_work(cpu); -} - -static void qemu_tcg_rr_wait_io_event(void) -{ - CPUState *cpu; - - while (all_cpu_threads_idle()) { - stop_tcg_kick_timer(); - qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); - } - - start_tcg_kick_timer(); - - CPU_FOREACH(cpu) { - qemu_wait_io_event_common(cpu); - } -} - -static void qemu_wait_io_event(CPUState *cpu) -{ - bool slept = false; - - while (cpu_thread_is_idle(cpu)) { - if (!slept) { - slept = true; - qemu_plugin_vcpu_idle_cb(cpu); - } - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); - } - if (slept) { - qemu_plugin_vcpu_resume_cb(cpu); - } - -#ifdef _WIN32 - /* Eat dummy APC queued by qemu_cpu_kick_thread. */ - if (!tcg_enabled()) { - SleepEx(0, TRUE); - } -#endif - qemu_wait_io_event_common(cpu); -} - -static void *qemu_kvm_cpu_thread_fn(void *arg) -{ - CPUState *cpu = arg; - int r; - - rcu_register_thread(); - - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - cpu->thread_id = qemu_get_thread_id(); - cpu->can_do_io = 1; - current_cpu = cpu; - - r = kvm_init_vcpu(cpu); - if (r < 0) { - error_report("kvm_init_vcpu failed: %s", strerror(-r)); - exit(1); - } - - kvm_init_cpu_signals(cpu); - - /* signal CPU creation */ - cpu->created = true; - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - do { - if (cpu_can_run(cpu)) { - r = kvm_cpu_exec(cpu); - if (r == EXCP_DEBUG) { - cpu_handle_guest_debug(cpu); - } - } - qemu_wait_io_event(cpu); - } while (!cpu->unplug || cpu_can_run(cpu)); - - qemu_kvm_destroy_vcpu(cpu); - cpu->created = false; - qemu_cond_signal(&qemu_cpu_cond); - qemu_mutex_unlock_iothread(); - rcu_unregister_thread(); - return NULL; -} - -static void *qemu_dummy_cpu_thread_fn(void *arg) -{ -#ifdef _WIN32 - error_report("qtest is not supported under Windows"); - exit(1); -#else - CPUState *cpu = arg; - sigset_t waitset; - int r; - - rcu_register_thread(); - - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - cpu->thread_id = qemu_get_thread_id(); - cpu->can_do_io = 1; - current_cpu = cpu; - - sigemptyset(&waitset); - sigaddset(&waitset, SIG_IPI); - - /* signal CPU creation */ - cpu->created = true; - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - do { - qemu_mutex_unlock_iothread(); - do { - int sig; - r = sigwait(&waitset, &sig); - } while (r == -1 && (errno == EAGAIN || errno == EINTR)); - if (r == -1) { - perror("sigwait"); - exit(1); - } - qemu_mutex_lock_iothread(); - qemu_wait_io_event(cpu); - } while (!cpu->unplug); - - qemu_mutex_unlock_iothread(); - rcu_unregister_thread(); - return NULL; -#endif -} - -static int64_t tcg_get_icount_limit(void) -{ - int64_t deadline; - - if (replay_mode != REPLAY_MODE_PLAY) { - /* - * Include all the timers, because they may need an attention. - * Too long CPU execution may create unnecessary delay in UI. - */ - deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, - QEMU_TIMER_ATTR_ALL); - /* Check realtime timers, because they help with input processing */ - deadline = qemu_soonest_timeout(deadline, - qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME, - QEMU_TIMER_ATTR_ALL)); - - /* Maintain prior (possibly buggy) behaviour where if no deadline - * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than - * INT32_MAX nanoseconds ahead, we still use INT32_MAX - * nanoseconds. - */ - if ((deadline < 0) || (deadline > INT32_MAX)) { - deadline = INT32_MAX; - } - - return qemu_icount_round(deadline); - } else { - return replay_get_instructions(); - } -} - -static void notify_aio_contexts(void) -{ - /* Wake up other AioContexts. */ - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); - qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); -} - -static void handle_icount_deadline(void) -{ - assert(qemu_in_vcpu_thread()); - if (use_icount) { - int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, - QEMU_TIMER_ATTR_ALL); - - if (deadline == 0) { - notify_aio_contexts(); - } - } -} - -static void prepare_icount_for_run(CPUState *cpu) -{ - if (use_icount) { - int insns_left; - - /* These should always be cleared by process_icount_data after - * each vCPU execution. However u16.high can be raised - * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt - */ - g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); - g_assert(cpu->icount_extra == 0); - - cpu->icount_budget = tcg_get_icount_limit(); - insns_left = MIN(0xffff, cpu->icount_budget); - cpu_neg(cpu)->icount_decr.u16.low = insns_left; - cpu->icount_extra = cpu->icount_budget - insns_left; - - replay_mutex_lock(); - - if (cpu->icount_budget == 0 && replay_has_checkpoint()) { - notify_aio_contexts(); - } - } -} - -static void process_icount_data(CPUState *cpu) -{ - if (use_icount) { - /* Account for executed instructions */ - cpu_update_icount(cpu); - - /* Reset the counters */ - cpu_neg(cpu)->icount_decr.u16.low = 0; - cpu->icount_extra = 0; - cpu->icount_budget = 0; - - replay_account_executed_instructions(); - - replay_mutex_unlock(); - } -} - - -static int tcg_cpu_exec(CPUState *cpu) -{ - int ret; -#ifdef CONFIG_PROFILER - int64_t ti; -#endif - - assert(tcg_enabled()); -#ifdef CONFIG_PROFILER - ti = profile_getclock(); -#endif - cpu_exec_start(cpu); - ret = cpu_exec(cpu); - cpu_exec_end(cpu); -#ifdef CONFIG_PROFILER - atomic_set(&tcg_ctx->prof.cpu_exec_time, - tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); -#endif - return ret; -} - -/* Destroy any remaining vCPUs which have been unplugged and have - * finished running - */ -static void deal_with_unplugged_cpus(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - if (cpu->unplug && !cpu_can_run(cpu)) { - qemu_tcg_destroy_vcpu(cpu); - cpu->created = false; - qemu_cond_signal(&qemu_cpu_cond); - break; - } - } -} - -/* Single-threaded TCG - * - * In the single-threaded case each vCPU is simulated in turn. If - * there is more than a single vCPU we create a simple timer to kick - * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. - * This is done explicitly rather than relying on side-effects - * elsewhere. - */ - -static void *qemu_tcg_rr_cpu_thread_fn(void *arg) -{ - CPUState *cpu = arg; - - assert(tcg_enabled()); - rcu_register_thread(); - tcg_register_thread(); - - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - - cpu->thread_id = qemu_get_thread_id(); - cpu->created = true; - cpu->can_do_io = 1; - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - /* wait for initial kick-off after machine start */ - while (first_cpu->stopped) { - qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); - - /* process any pending work */ - CPU_FOREACH(cpu) { - current_cpu = cpu; - qemu_wait_io_event_common(cpu); - } - } - - start_tcg_kick_timer(); - - cpu = first_cpu; - - /* process any pending work */ - cpu->exit_request = 1; - - while (1) { - qemu_mutex_unlock_iothread(); - replay_mutex_lock(); - qemu_mutex_lock_iothread(); - /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ - qemu_account_warp_timer(); - - /* Run the timers here. This is much more efficient than - * waking up the I/O thread and waiting for completion. - */ - handle_icount_deadline(); - - replay_mutex_unlock(); - - if (!cpu) { - cpu = first_cpu; - } - - while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { - - atomic_mb_set(&tcg_current_rr_cpu, cpu); - current_cpu = cpu; - - qemu_clock_enable(QEMU_CLOCK_VIRTUAL, - (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); - - if (cpu_can_run(cpu)) { - int r; - - qemu_mutex_unlock_iothread(); - prepare_icount_for_run(cpu); - - r = tcg_cpu_exec(cpu); - - process_icount_data(cpu); - qemu_mutex_lock_iothread(); - - if (r == EXCP_DEBUG) { - cpu_handle_guest_debug(cpu); - break; - } else if (r == EXCP_ATOMIC) { - qemu_mutex_unlock_iothread(); - cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); - break; - } - } else if (cpu->stop) { - if (cpu->unplug) { - cpu = CPU_NEXT(cpu); - } - break; - } - - cpu = CPU_NEXT(cpu); - } /* while (cpu && !cpu->exit_request).. */ - - /* Does not need atomic_mb_set because a spurious wakeup is okay. */ - atomic_set(&tcg_current_rr_cpu, NULL); - - if (cpu && cpu->exit_request) { - atomic_mb_set(&cpu->exit_request, 0); - } - - if (use_icount && all_cpu_threads_idle()) { - /* - * When all cpus are sleeping (e.g in WFI), to avoid a deadlock - * in the main_loop, wake it up in order to start the warp timer. - */ - qemu_notify_event(); - } - - qemu_tcg_rr_wait_io_event(); - deal_with_unplugged_cpus(); - } - - rcu_unregister_thread(); - return NULL; -} - -static void *qemu_hax_cpu_thread_fn(void *arg) -{ - CPUState *cpu = arg; - int r; - - rcu_register_thread(); - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - - cpu->thread_id = qemu_get_thread_id(); - cpu->created = true; - current_cpu = cpu; - - hax_init_vcpu(cpu); - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - do { - if (cpu_can_run(cpu)) { - r = hax_smp_cpu_exec(cpu); - if (r == EXCP_DEBUG) { - cpu_handle_guest_debug(cpu); - } - } - - qemu_wait_io_event(cpu); - } while (!cpu->unplug || cpu_can_run(cpu)); - rcu_unregister_thread(); - return NULL; -} - -/* The HVF-specific vCPU thread function. This one should only run when the host - * CPU supports the VMX "unrestricted guest" feature. */ -static void *qemu_hvf_cpu_thread_fn(void *arg) -{ - CPUState *cpu = arg; - - int r; - - assert(hvf_enabled()); - - rcu_register_thread(); - - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - - cpu->thread_id = qemu_get_thread_id(); - cpu->can_do_io = 1; - current_cpu = cpu; - - hvf_init_vcpu(cpu); - - /* signal CPU creation */ - cpu->created = true; - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - do { - if (cpu_can_run(cpu)) { - r = hvf_vcpu_exec(cpu); - if (r == EXCP_DEBUG) { - cpu_handle_guest_debug(cpu); - } - } - qemu_wait_io_event(cpu); - } while (!cpu->unplug || cpu_can_run(cpu)); - - hvf_vcpu_destroy(cpu); - cpu->created = false; - qemu_cond_signal(&qemu_cpu_cond); - qemu_mutex_unlock_iothread(); - rcu_unregister_thread(); - return NULL; -} - -static void *qemu_whpx_cpu_thread_fn(void *arg) -{ - CPUState *cpu = arg; - int r; - - rcu_register_thread(); - - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - cpu->thread_id = qemu_get_thread_id(); - current_cpu = cpu; - - r = whpx_init_vcpu(cpu); - if (r < 0) { - fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r)); - exit(1); - } - - /* signal CPU creation */ - cpu->created = true; - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - do { - if (cpu_can_run(cpu)) { - r = whpx_vcpu_exec(cpu); - if (r == EXCP_DEBUG) { - cpu_handle_guest_debug(cpu); - } - } - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); - } - qemu_wait_io_event_common(cpu); - } while (!cpu->unplug || cpu_can_run(cpu)); - - whpx_destroy_vcpu(cpu); - cpu->created = false; - qemu_cond_signal(&qemu_cpu_cond); - qemu_mutex_unlock_iothread(); - rcu_unregister_thread(); - return NULL; -} - -#ifdef _WIN32 -static void CALLBACK dummy_apc_func(ULONG_PTR unused) -{ -} -#endif - -/* Multi-threaded TCG - * - * In the multi-threaded case each vCPU has its own thread. The TLS - * variable current_cpu can be used deep in the code to find the - * current CPUState for a given thread. - */ - -static void *qemu_tcg_cpu_thread_fn(void *arg) -{ - CPUState *cpu = arg; - - assert(tcg_enabled()); - g_assert(!use_icount); - - rcu_register_thread(); - tcg_register_thread(); - - qemu_mutex_lock_iothread(); - qemu_thread_get_self(cpu->thread); - - cpu->thread_id = qemu_get_thread_id(); - cpu->created = true; - cpu->can_do_io = 1; - current_cpu = cpu; - qemu_cond_signal(&qemu_cpu_cond); - qemu_guest_random_seed_thread_part2(cpu->random_seed); - - /* process any pending work */ - cpu->exit_request = 1; - - do { - if (cpu_can_run(cpu)) { - int r; - qemu_mutex_unlock_iothread(); - r = tcg_cpu_exec(cpu); - qemu_mutex_lock_iothread(); - switch (r) { - case EXCP_DEBUG: - cpu_handle_guest_debug(cpu); - break; - case EXCP_HALTED: - /* during start-up the vCPU is reset and the thread is - * kicked several times. If we don't ensure we go back - * to sleep in the halted state we won't cleanly - * start-up when the vCPU is enabled. - * - * cpu->halted should ensure we sleep in wait_io_event - */ - g_assert(cpu->halted); - break; - case EXCP_ATOMIC: - qemu_mutex_unlock_iothread(); - cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); - default: - /* Ignore everything else? */ - break; - } - } - - atomic_mb_set(&cpu->exit_request, 0); - qemu_wait_io_event(cpu); - } while (!cpu->unplug || cpu_can_run(cpu)); - - qemu_tcg_destroy_vcpu(cpu); - cpu->created = false; - qemu_cond_signal(&qemu_cpu_cond); - qemu_mutex_unlock_iothread(); - rcu_unregister_thread(); - return NULL; -} - -static void qemu_cpu_kick_thread(CPUState *cpu) -{ -#ifndef _WIN32 - int err; - - if (cpu->thread_kicked) { - return; - } - cpu->thread_kicked = true; - err = pthread_kill(cpu->thread->thread, SIG_IPI); - if (err && err != ESRCH) { - fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); - exit(1); - } -#else /* _WIN32 */ - if (!qemu_cpu_is_self(cpu)) { - if (whpx_enabled()) { - whpx_vcpu_kick(cpu); - } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { - fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n", - __func__, GetLastError()); - exit(1); - } - } -#endif -} - -void qemu_cpu_kick(CPUState *cpu) -{ - qemu_cond_broadcast(cpu->halt_cond); - if (tcg_enabled()) { - if (qemu_tcg_mttcg_enabled()) { - cpu_exit(cpu); - } else { - qemu_cpu_kick_rr_cpus(); - } - } else { - if (hax_enabled()) { - /* - * FIXME: race condition with the exit_request check in - * hax_vcpu_hax_exec - */ - cpu->exit_request = 1; - } - qemu_cpu_kick_thread(cpu); - } -} - -void qemu_cpu_kick_self(void) -{ - assert(current_cpu); - qemu_cpu_kick_thread(current_cpu); -} - -bool qemu_cpu_is_self(CPUState *cpu) -{ - return qemu_thread_is_self(cpu->thread); -} - -bool qemu_in_vcpu_thread(void) -{ - return current_cpu && qemu_cpu_is_self(current_cpu); -} - -static __thread bool iothread_locked = false; - -bool qemu_mutex_iothread_locked(void) -{ - return iothread_locked; -} - -/* - * The BQL is taken from so many places that it is worth profiling the - * callers directly, instead of funneling them all through a single function. - */ -void qemu_mutex_lock_iothread_impl(const char *file, int line) -{ - QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func); - - g_assert(!qemu_mutex_iothread_locked()); - bql_lock(&qemu_global_mutex, file, line); - iothread_locked = true; -} - -void qemu_mutex_unlock_iothread(void) -{ - g_assert(qemu_mutex_iothread_locked()); - iothread_locked = false; - qemu_mutex_unlock(&qemu_global_mutex); -} - -void qemu_cond_wait_iothread(QemuCond *cond) -{ - qemu_cond_wait(cond, &qemu_global_mutex); -} - -static bool all_vcpus_paused(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - if (!cpu->stopped) { - return false; - } - } - - return true; -} - -void pause_all_vcpus(void) -{ - CPUState *cpu; - - qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); - CPU_FOREACH(cpu) { - if (qemu_cpu_is_self(cpu)) { - qemu_cpu_stop(cpu, true); - } else { - cpu->stop = true; - qemu_cpu_kick(cpu); - } - } - - /* We need to drop the replay_lock so any vCPU threads woken up - * can finish their replay tasks - */ - replay_mutex_unlock(); - - while (!all_vcpus_paused()) { - qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); - CPU_FOREACH(cpu) { - qemu_cpu_kick(cpu); - } - } - - qemu_mutex_unlock_iothread(); - replay_mutex_lock(); - qemu_mutex_lock_iothread(); -} - -void cpu_resume(CPUState *cpu) -{ - cpu->stop = false; - cpu->stopped = false; - qemu_cpu_kick(cpu); -} - -void resume_all_vcpus(void) -{ - CPUState *cpu; - - if (!runstate_is_running()) { - return; - } - - qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); - CPU_FOREACH(cpu) { - cpu_resume(cpu); - } -} - -void cpu_remove_sync(CPUState *cpu) -{ - cpu->stop = true; - cpu->unplug = true; - qemu_cpu_kick(cpu); - qemu_mutex_unlock_iothread(); - qemu_thread_join(cpu->thread); - qemu_mutex_lock_iothread(); -} - -/* For temporary buffers for forming a name */ -#define VCPU_THREAD_NAME_SIZE 16 - -static void qemu_tcg_init_vcpu(CPUState *cpu) -{ - char thread_name[VCPU_THREAD_NAME_SIZE]; - static QemuCond *single_tcg_halt_cond; - static QemuThread *single_tcg_cpu_thread; - static int tcg_region_inited; - - assert(tcg_enabled()); - /* - * Initialize TCG regions--once. Now is a good time, because: - * (1) TCG's init context, prologue and target globals have been set up. - * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the - * -accel flag is processed, so the check doesn't work then). - */ - if (!tcg_region_inited) { - tcg_region_inited = 1; - tcg_region_init(); - } - - if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) { - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); - qemu_cond_init(cpu->halt_cond); - - if (qemu_tcg_mttcg_enabled()) { - /* create a thread per vCPU with TCG (MTTCG) */ - parallel_cpus = true; - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", - cpu->cpu_index); - - qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, - cpu, QEMU_THREAD_JOINABLE); - - } else { - /* share a single thread for all cpus with TCG */ - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); - qemu_thread_create(cpu->thread, thread_name, - qemu_tcg_rr_cpu_thread_fn, - cpu, QEMU_THREAD_JOINABLE); - - single_tcg_halt_cond = cpu->halt_cond; - single_tcg_cpu_thread = cpu->thread; - } -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif - } else { - /* For non-MTTCG cases we share the thread */ - cpu->thread = single_tcg_cpu_thread; - cpu->halt_cond = single_tcg_halt_cond; - cpu->thread_id = first_cpu->thread_id; - cpu->can_do_io = 1; - cpu->created = true; - } -} - -static void qemu_hax_start_vcpu(CPUState *cpu) -{ - char thread_name[VCPU_THREAD_NAME_SIZE]; - - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); - qemu_cond_init(cpu->halt_cond); - - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX", - cpu->cpu_index); - qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn, - cpu, QEMU_THREAD_JOINABLE); -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif -} - -static void qemu_kvm_start_vcpu(CPUState *cpu) -{ - char thread_name[VCPU_THREAD_NAME_SIZE]; - - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); - qemu_cond_init(cpu->halt_cond); - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM", - cpu->cpu_index); - qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn, - cpu, QEMU_THREAD_JOINABLE); -} - -static void qemu_hvf_start_vcpu(CPUState *cpu) -{ - char thread_name[VCPU_THREAD_NAME_SIZE]; - - /* HVF currently does not support TCG, and only runs in - * unrestricted-guest mode. */ - assert(hvf_enabled()); - - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); - qemu_cond_init(cpu->halt_cond); - - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF", - cpu->cpu_index); - qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn, - cpu, QEMU_THREAD_JOINABLE); -} - -static void qemu_whpx_start_vcpu(CPUState *cpu) -{ - char thread_name[VCPU_THREAD_NAME_SIZE]; - - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); - qemu_cond_init(cpu->halt_cond); - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX", - cpu->cpu_index); - qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn, - cpu, QEMU_THREAD_JOINABLE); -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif -} - -static void qemu_dummy_start_vcpu(CPUState *cpu) -{ - char thread_name[VCPU_THREAD_NAME_SIZE]; - - cpu->thread = g_malloc0(sizeof(QemuThread)); - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); - qemu_cond_init(cpu->halt_cond); - snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY", - cpu->cpu_index); - qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu, - QEMU_THREAD_JOINABLE); -} - -void qemu_init_vcpu(CPUState *cpu) -{ - MachineState *ms = MACHINE(qdev_get_machine()); - - cpu->nr_cores = ms->smp.cores; - cpu->nr_threads = ms->smp.threads; - cpu->stopped = true; - cpu->random_seed = qemu_guest_random_seed_thread_part1(); - - if (!cpu->as) { - /* If the target cpu hasn't set up any address spaces itself, - * give it the default one. - */ - cpu->num_ases = 1; - cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory); - } - - if (kvm_enabled()) { - qemu_kvm_start_vcpu(cpu); - } else if (hax_enabled()) { - qemu_hax_start_vcpu(cpu); - } else if (hvf_enabled()) { - qemu_hvf_start_vcpu(cpu); - } else if (tcg_enabled()) { - qemu_tcg_init_vcpu(cpu); - } else if (whpx_enabled()) { - qemu_whpx_start_vcpu(cpu); - } else { - qemu_dummy_start_vcpu(cpu); - } - - while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); - } -} - -void cpu_stop_current(void) -{ - if (current_cpu) { - current_cpu->stop = true; - cpu_exit(current_cpu); - } -} - -int vm_stop(RunState state) -{ - if (qemu_in_vcpu_thread()) { - qemu_system_vmstop_request_prepare(); - qemu_system_vmstop_request(state); - /* - * FIXME: should not return to device code in case - * vm_stop() has been requested. - */ - cpu_stop_current(); - return 0; - } - - return do_vm_stop(state, true); -} - -/** - * Prepare for (re)starting the VM. - * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already - * running or in case of an error condition), 0 otherwise. - */ -int vm_prepare_start(void) -{ - RunState requested; - - qemu_vmstop_requested(&requested); - if (runstate_is_running() && requested == RUN_STATE__MAX) { - return -1; - } - - /* Ensure that a STOP/RESUME pair of events is emitted if a - * vmstop request was pending. The BLOCK_IO_ERROR event, for - * example, according to documentation is always followed by - * the STOP event. - */ - if (runstate_is_running()) { - qapi_event_send_stop(); - qapi_event_send_resume(); - return -1; - } - - /* We are sending this now, but the CPUs will be resumed shortly later */ - qapi_event_send_resume(); - - cpu_enable_ticks(); - runstate_set(RUN_STATE_RUNNING); - vm_state_notify(1, RUN_STATE_RUNNING); - return 0; -} - -void vm_start(void) -{ - if (!vm_prepare_start()) { - resume_all_vcpus(); - } -} - -/* does a state transition even if the VM is already stopped, - current state is forgotten forever */ -int vm_stop_force_state(RunState state) -{ - if (runstate_is_running()) { - return vm_stop(state); - } else { - runstate_set(state); - - bdrv_drain_all(); - /* Make sure to return an error if the flush in a previous vm_stop() - * failed. */ - return bdrv_flush_all(); - } -} - -void list_cpus(const char *optarg) -{ - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) - cpu_list(); -#endif -} - -void qmp_memsave(int64_t addr, int64_t size, const char *filename, - bool has_cpu, int64_t cpu_index, Error **errp) -{ - FILE *f; - uint32_t l; - CPUState *cpu; - uint8_t buf[1024]; - int64_t orig_addr = addr, orig_size = size; - - if (!has_cpu) { - cpu_index = 0; - } - - cpu = qemu_get_cpu(cpu_index); - if (cpu == NULL) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index", - "a CPU number"); - return; - } - - f = fopen(filename, "wb"); - if (!f) { - error_setg_file_open(errp, errno, filename); - return; - } - - while (size != 0) { - l = sizeof(buf); - if (l > size) - l = size; - if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { - error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64 - " specified", orig_addr, orig_size); - goto exit; - } - if (fwrite(buf, 1, l, f) != l) { - error_setg(errp, QERR_IO_ERROR); - goto exit; - } - addr += l; - size -= l; - } - -exit: - fclose(f); -} - -void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, - Error **errp) -{ - FILE *f; - uint32_t l; - uint8_t buf[1024]; - - f = fopen(filename, "wb"); - if (!f) { - error_setg_file_open(errp, errno, filename); - return; - } - - while (size != 0) { - l = sizeof(buf); - if (l > size) - l = size; - cpu_physical_memory_read(addr, buf, l); - if (fwrite(buf, 1, l, f) != l) { - error_setg(errp, QERR_IO_ERROR); - goto exit; - } - addr += l; - size -= l; - } - -exit: - fclose(f); -} - -void qmp_inject_nmi(Error **errp) -{ - nmi_monitor_handle(monitor_get_cpu_index(), errp); -} - -void dump_drift_info(void) -{ - if (!use_icount) { - return; - } - - qemu_printf("Host - Guest clock %"PRIi64" ms\n", - (cpu_get_clock() - cpu_get_icount())/SCALE_MS); - if (icount_align_option) { - qemu_printf("Max guest delay %"PRIi64" ms\n", - -max_delay / SCALE_MS); - qemu_printf("Max guest advance %"PRIi64" ms\n", - max_advance / SCALE_MS); - } else { - qemu_printf("Max guest delay NA\n"); - qemu_printf("Max guest advance NA\n"); - } -} diff --git a/ioport.c b/ioport.c deleted file mode 100644 index 04e360e79a..0000000000 --- a/ioport.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * QEMU System Emulator - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -/* - * splitted out ioport related stuffs from vl.c. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/ioport.h" -#include "trace-root.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" - -typedef struct MemoryRegionPortioList { - MemoryRegion mr; - void *portio_opaque; - MemoryRegionPortio ports[]; -} MemoryRegionPortioList; - -static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size) -{ - return -1ULL; -} - -static void unassigned_io_write(void *opaque, hwaddr addr, uint64_t val, - unsigned size) -{ -} - -const MemoryRegionOps unassigned_io_ops = { - .read = unassigned_io_read, - .write = unassigned_io_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -void cpu_outb(uint32_t addr, uint8_t val) -{ - trace_cpu_out(addr, 'b', val); - address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, - &val, 1); -} - -void cpu_outw(uint32_t addr, uint16_t val) -{ - uint8_t buf[2]; - - trace_cpu_out(addr, 'w', val); - stw_p(buf, val); - address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, - buf, 2); -} - -void cpu_outl(uint32_t addr, uint32_t val) -{ - uint8_t buf[4]; - - trace_cpu_out(addr, 'l', val); - stl_p(buf, val); - address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, - buf, 4); -} - -uint8_t cpu_inb(uint32_t addr) -{ - uint8_t val; - - address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, - &val, 1); - trace_cpu_in(addr, 'b', val); - return val; -} - -uint16_t cpu_inw(uint32_t addr) -{ - uint8_t buf[2]; - uint16_t val; - - address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 2); - val = lduw_p(buf); - trace_cpu_in(addr, 'w', val); - return val; -} - -uint32_t cpu_inl(uint32_t addr) -{ - uint8_t buf[4]; - uint32_t val; - - address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 4); - val = ldl_p(buf); - trace_cpu_in(addr, 'l', val); - return val; -} - -void portio_list_init(PortioList *piolist, - Object *owner, - const MemoryRegionPortio *callbacks, - void *opaque, const char *name) -{ - unsigned n = 0; - - while (callbacks[n].size) { - ++n; - } - - piolist->ports = callbacks; - piolist->nr = 0; - piolist->regions = g_new0(MemoryRegion *, n); - piolist->address_space = NULL; - piolist->opaque = opaque; - piolist->owner = owner; - piolist->name = name; - piolist->flush_coalesced_mmio = false; -} - -void portio_list_set_flush_coalesced(PortioList *piolist) -{ - piolist->flush_coalesced_mmio = true; -} - -void portio_list_destroy(PortioList *piolist) -{ - MemoryRegionPortioList *mrpio; - unsigned i; - - for (i = 0; i < piolist->nr; ++i) { - mrpio = container_of(piolist->regions[i], MemoryRegionPortioList, mr); - object_unparent(OBJECT(&mrpio->mr)); - g_free(mrpio); - } - g_free(piolist->regions); -} - -static const MemoryRegionPortio *find_portio(MemoryRegionPortioList *mrpio, - uint64_t offset, unsigned size, - bool write) -{ - const MemoryRegionPortio *mrp; - - for (mrp = mrpio->ports; mrp->size; ++mrp) { - if (offset >= mrp->offset && offset < mrp->offset + mrp->len && - size == mrp->size && - (write ? (bool)mrp->write : (bool)mrp->read)) { - return mrp; - } - } - return NULL; -} - -static uint64_t portio_read(void *opaque, hwaddr addr, unsigned size) -{ - MemoryRegionPortioList *mrpio = opaque; - const MemoryRegionPortio *mrp = find_portio(mrpio, addr, size, false); - uint64_t data; - - data = ((uint64_t)1 << (size * 8)) - 1; - if (mrp) { - data = mrp->read(mrpio->portio_opaque, mrp->base + addr); - } else if (size == 2) { - mrp = find_portio(mrpio, addr, 1, false); - if (mrp) { - data = mrp->read(mrpio->portio_opaque, mrp->base + addr); - if (addr + 1 < mrp->offset + mrp->len) { - data |= mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8; - } else { - data |= 0xff00; - } - } - } - return data; -} - -static void portio_write(void *opaque, hwaddr addr, uint64_t data, - unsigned size) -{ - MemoryRegionPortioList *mrpio = opaque; - const MemoryRegionPortio *mrp = find_portio(mrpio, addr, size, true); - - if (mrp) { - mrp->write(mrpio->portio_opaque, mrp->base + addr, data); - } else if (size == 2) { - mrp = find_portio(mrpio, addr, 1, true); - if (mrp) { - mrp->write(mrpio->portio_opaque, mrp->base + addr, data & 0xff); - if (addr + 1 < mrp->offset + mrp->len) { - mrp->write(mrpio->portio_opaque, mrp->base + addr + 1, data >> 8); - } - } - } -} - -static const MemoryRegionOps portio_ops = { - .read = portio_read, - .write = portio_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid.unaligned = true, - .impl.unaligned = true, -}; - -static void portio_list_add_1(PortioList *piolist, - const MemoryRegionPortio *pio_init, - unsigned count, unsigned start, - unsigned off_low, unsigned off_high) -{ - MemoryRegionPortioList *mrpio; - unsigned i; - - /* Copy the sub-list and null-terminate it. */ - mrpio = g_malloc0(sizeof(MemoryRegionPortioList) + - sizeof(MemoryRegionPortio) * (count + 1)); - mrpio->portio_opaque = piolist->opaque; - memcpy(mrpio->ports, pio_init, sizeof(MemoryRegionPortio) * count); - memset(mrpio->ports + count, 0, sizeof(MemoryRegionPortio)); - - /* Adjust the offsets to all be zero-based for the region. */ - for (i = 0; i < count; ++i) { - mrpio->ports[i].offset -= off_low; - mrpio->ports[i].base = start + off_low; - } - - memory_region_init_io(&mrpio->mr, piolist->owner, &portio_ops, mrpio, - piolist->name, off_high - off_low); - if (piolist->flush_coalesced_mmio) { - memory_region_set_flush_coalesced(&mrpio->mr); - } - memory_region_add_subregion(piolist->address_space, - start + off_low, &mrpio->mr); - piolist->regions[piolist->nr] = &mrpio->mr; - ++piolist->nr; -} - -void portio_list_add(PortioList *piolist, - MemoryRegion *address_space, - uint32_t start) -{ - const MemoryRegionPortio *pio, *pio_start = piolist->ports; - unsigned int off_low, off_high, off_last, count; - - piolist->address_space = address_space; - - /* Handle the first entry specially. */ - off_last = off_low = pio_start->offset; - off_high = off_low + pio_start->len + pio_start->size - 1; - count = 1; - - for (pio = pio_start + 1; pio->size != 0; pio++, count++) { - /* All entries must be sorted by offset. */ - assert(pio->offset >= off_last); - off_last = pio->offset; - - /* If we see a hole, break the region. */ - if (off_last > off_high) { - portio_list_add_1(piolist, pio_start, count, start, off_low, - off_high); - /* ... and start collecting anew. */ - pio_start = pio; - off_low = off_last; - off_high = off_low + pio->len + pio_start->size - 1; - count = 0; - } else if (off_last + pio->len > off_high) { - off_high = off_last + pio->len + pio_start->size - 1; - } - } - - /* There will always be an open sub-list. */ - portio_list_add_1(piolist, pio_start, count, start, off_low, off_high); -} - -void portio_list_del(PortioList *piolist) -{ - MemoryRegionPortioList *mrpio; - unsigned i; - - for (i = 0; i < piolist->nr; ++i) { - mrpio = container_of(piolist->regions[i], MemoryRegionPortioList, mr); - memory_region_del_subregion(piolist->address_space, &mrpio->mr); - } -} diff --git a/memory.c b/memory.c deleted file mode 100644 index 9200b20130..0000000000 --- a/memory.c +++ /dev/null @@ -1,3250 +0,0 @@ -/* - * Physical memory management - * - * Copyright 2011 Red Hat, Inc. and/or its affiliates - * - * Authors: - * Avi Kivity - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "cpu.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" -#include "qapi/visitor.h" -#include "qemu/bitops.h" -#include "qemu/error-report.h" -#include "qemu/main-loop.h" -#include "qemu/qemu-print.h" -#include "qom/object.h" -#include "trace-root.h" - -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" -#include "sysemu/tcg.h" -#include "sysemu/accel.h" -#include "hw/boards.h" -#include "migration/vmstate.h" - -//#define DEBUG_UNASSIGNED - -static unsigned memory_region_transaction_depth; -static bool memory_region_update_pending; -static bool ioeventfd_update_pending; -bool global_dirty_log; - -static QTAILQ_HEAD(, MemoryListener) memory_listeners - = QTAILQ_HEAD_INITIALIZER(memory_listeners); - -static QTAILQ_HEAD(, AddressSpace) address_spaces - = QTAILQ_HEAD_INITIALIZER(address_spaces); - -static GHashTable *flat_views; - -typedef struct AddrRange AddrRange; - -/* - * Note that signed integers are needed for negative offsetting in aliases - * (large MemoryRegion::alias_offset). - */ -struct AddrRange { - Int128 start; - Int128 size; -}; - -static AddrRange addrrange_make(Int128 start, Int128 size) -{ - return (AddrRange) { start, size }; -} - -static bool addrrange_equal(AddrRange r1, AddrRange r2) -{ - return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); -} - -static Int128 addrrange_end(AddrRange r) -{ - return int128_add(r.start, r.size); -} - -static AddrRange addrrange_shift(AddrRange range, Int128 delta) -{ - int128_addto(&range.start, delta); - return range; -} - -static bool addrrange_contains(AddrRange range, Int128 addr) -{ - return int128_ge(addr, range.start) - && int128_lt(addr, addrrange_end(range)); -} - -static bool addrrange_intersects(AddrRange r1, AddrRange r2) -{ - return addrrange_contains(r1, r2.start) - || addrrange_contains(r2, r1.start); -} - -static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) -{ - Int128 start = int128_max(r1.start, r2.start); - Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); - return addrrange_make(start, int128_sub(end, start)); -} - -enum ListenerDirection { Forward, Reverse }; - -#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ - do { \ - MemoryListener *_listener; \ - \ - switch (_direction) { \ - case Forward: \ - QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ - if (_listener->_callback) { \ - _listener->_callback(_listener, ##_args); \ - } \ - } \ - break; \ - case Reverse: \ - QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \ - if (_listener->_callback) { \ - _listener->_callback(_listener, ##_args); \ - } \ - } \ - break; \ - default: \ - abort(); \ - } \ - } while (0) - -#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ - do { \ - MemoryListener *_listener; \ - \ - switch (_direction) { \ - case Forward: \ - QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ - if (_listener->_callback) { \ - _listener->_callback(_listener, _section, ##_args); \ - } \ - } \ - break; \ - case Reverse: \ - QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ - if (_listener->_callback) { \ - _listener->_callback(_listener, _section, ##_args); \ - } \ - } \ - break; \ - default: \ - abort(); \ - } \ - } while (0) - -/* No need to ref/unref .mr, the FlatRange keeps it alive. */ -#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ - do { \ - MemoryRegionSection mrs = section_from_flat_range(fr, \ - address_space_to_flatview(as)); \ - MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ - } while(0) - -struct CoalescedMemoryRange { - AddrRange addr; - QTAILQ_ENTRY(CoalescedMemoryRange) link; -}; - -struct MemoryRegionIoeventfd { - AddrRange addr; - bool match_data; - uint64_t data; - EventNotifier *e; -}; - -static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a, - MemoryRegionIoeventfd *b) -{ - if (int128_lt(a->addr.start, b->addr.start)) { - return true; - } else if (int128_gt(a->addr.start, b->addr.start)) { - return false; - } else if (int128_lt(a->addr.size, b->addr.size)) { - return true; - } else if (int128_gt(a->addr.size, b->addr.size)) { - return false; - } else if (a->match_data < b->match_data) { - return true; - } else if (a->match_data > b->match_data) { - return false; - } else if (a->match_data) { - if (a->data < b->data) { - return true; - } else if (a->data > b->data) { - return false; - } - } - if (a->e < b->e) { - return true; - } else if (a->e > b->e) { - return false; - } - return false; -} - -static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a, - MemoryRegionIoeventfd *b) -{ - return !memory_region_ioeventfd_before(a, b) - && !memory_region_ioeventfd_before(b, a); -} - -/* Range of memory in the global map. Addresses are absolute. */ -struct FlatRange { - MemoryRegion *mr; - hwaddr offset_in_region; - AddrRange addr; - uint8_t dirty_log_mask; - bool romd_mode; - bool readonly; - bool nonvolatile; -}; - -#define FOR_EACH_FLAT_RANGE(var, view) \ - for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) - -static inline MemoryRegionSection -section_from_flat_range(FlatRange *fr, FlatView *fv) -{ - return (MemoryRegionSection) { - .mr = fr->mr, - .fv = fv, - .offset_within_region = fr->offset_in_region, - .size = fr->addr.size, - .offset_within_address_space = int128_get64(fr->addr.start), - .readonly = fr->readonly, - .nonvolatile = fr->nonvolatile, - }; -} - -static bool flatrange_equal(FlatRange *a, FlatRange *b) -{ - return a->mr == b->mr - && addrrange_equal(a->addr, b->addr) - && a->offset_in_region == b->offset_in_region - && a->romd_mode == b->romd_mode - && a->readonly == b->readonly - && a->nonvolatile == b->nonvolatile; -} - -static FlatView *flatview_new(MemoryRegion *mr_root) -{ - FlatView *view; - - view = g_new0(FlatView, 1); - view->ref = 1; - view->root = mr_root; - memory_region_ref(mr_root); - trace_flatview_new(view, mr_root); - - return view; -} - -/* Insert a range into a given position. Caller is responsible for maintaining - * sorting order. - */ -static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) -{ - if (view->nr == view->nr_allocated) { - view->nr_allocated = MAX(2 * view->nr, 10); - view->ranges = g_realloc(view->ranges, - view->nr_allocated * sizeof(*view->ranges)); - } - memmove(view->ranges + pos + 1, view->ranges + pos, - (view->nr - pos) * sizeof(FlatRange)); - view->ranges[pos] = *range; - memory_region_ref(range->mr); - ++view->nr; -} - -static void flatview_destroy(FlatView *view) -{ - int i; - - trace_flatview_destroy(view, view->root); - if (view->dispatch) { - address_space_dispatch_free(view->dispatch); - } - for (i = 0; i < view->nr; i++) { - memory_region_unref(view->ranges[i].mr); - } - g_free(view->ranges); - memory_region_unref(view->root); - g_free(view); -} - -static bool flatview_ref(FlatView *view) -{ - return atomic_fetch_inc_nonzero(&view->ref) > 0; -} - -void flatview_unref(FlatView *view) -{ - if (atomic_fetch_dec(&view->ref) == 1) { - trace_flatview_destroy_rcu(view, view->root); - assert(view->root); - call_rcu(view, flatview_destroy, rcu); - } -} - -static bool can_merge(FlatRange *r1, FlatRange *r2) -{ - return int128_eq(addrrange_end(r1->addr), r2->addr.start) - && r1->mr == r2->mr - && int128_eq(int128_add(int128_make64(r1->offset_in_region), - r1->addr.size), - int128_make64(r2->offset_in_region)) - && r1->dirty_log_mask == r2->dirty_log_mask - && r1->romd_mode == r2->romd_mode - && r1->readonly == r2->readonly - && r1->nonvolatile == r2->nonvolatile; -} - -/* Attempt to simplify a view by merging adjacent ranges */ -static void flatview_simplify(FlatView *view) -{ - unsigned i, j, k; - - i = 0; - while (i < view->nr) { - j = i + 1; - while (j < view->nr - && can_merge(&view->ranges[j-1], &view->ranges[j])) { - int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); - ++j; - } - ++i; - for (k = i; k < j; k++) { - memory_region_unref(view->ranges[k].mr); - } - memmove(&view->ranges[i], &view->ranges[j], - (view->nr - j) * sizeof(view->ranges[j])); - view->nr -= j - i; - } -} - -static bool memory_region_big_endian(MemoryRegion *mr) -{ -#ifdef TARGET_WORDS_BIGENDIAN - return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; -#else - return mr->ops->endianness == DEVICE_BIG_ENDIAN; -#endif -} - -static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) -{ - if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { - switch (op & MO_SIZE) { - case MO_8: - break; - case MO_16: - *data = bswap16(*data); - break; - case MO_32: - *data = bswap32(*data); - break; - case MO_64: - *data = bswap64(*data); - break; - default: - g_assert_not_reached(); - } - } -} - -static inline void memory_region_shift_read_access(uint64_t *value, - signed shift, - uint64_t mask, - uint64_t tmp) -{ - if (shift >= 0) { - *value |= (tmp & mask) << shift; - } else { - *value |= (tmp & mask) >> -shift; - } -} - -static inline uint64_t memory_region_shift_write_access(uint64_t *value, - signed shift, - uint64_t mask) -{ - uint64_t tmp; - - if (shift >= 0) { - tmp = (*value >> shift) & mask; - } else { - tmp = (*value << -shift) & mask; - } - - return tmp; -} - -static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) -{ - MemoryRegion *root; - hwaddr abs_addr = offset; - - abs_addr += mr->addr; - for (root = mr; root->container; ) { - root = root->container; - abs_addr += root->addr; - } - - return abs_addr; -} - -static int get_cpu_index(void) -{ - if (current_cpu) { - return current_cpu->cpu_index; - } - return -1; -} - -static MemTxResult memory_region_read_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs) -{ - uint64_t tmp; - - tmp = mr->ops->read(mr->opaque, addr, size); - if (mr->subpage) { - trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); - } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { - hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); - } - memory_region_shift_read_access(value, shift, mask, tmp); - return MEMTX_OK; -} - -static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs) -{ - uint64_t tmp = 0; - MemTxResult r; - - r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); - if (mr->subpage) { - trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); - } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { - hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); - } - memory_region_shift_read_access(value, shift, mask, tmp); - return r; -} - -static MemTxResult memory_region_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs) -{ - uint64_t tmp = memory_region_shift_write_access(value, shift, mask); - - if (mr->subpage) { - trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); - } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { - hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); - } - mr->ops->write(mr->opaque, addr, tmp, size); - return MEMTX_OK; -} - -static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs) -{ - uint64_t tmp = memory_region_shift_write_access(value, shift, mask); - - if (mr->subpage) { - trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); - } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { - hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); - } - return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); -} - -static MemTxResult access_with_adjusted_size(hwaddr addr, - uint64_t *value, - unsigned size, - unsigned access_size_min, - unsigned access_size_max, - MemTxResult (*access_fn) - (MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - signed shift, - uint64_t mask, - MemTxAttrs attrs), - MemoryRegion *mr, - MemTxAttrs attrs) -{ - uint64_t access_mask; - unsigned access_size; - unsigned i; - MemTxResult r = MEMTX_OK; - - if (!access_size_min) { - access_size_min = 1; - } - if (!access_size_max) { - access_size_max = 4; - } - - /* FIXME: support unaligned access? */ - access_size = MAX(MIN(size, access_size_max), access_size_min); - access_mask = MAKE_64BIT_MASK(0, access_size * 8); - if (memory_region_big_endian(mr)) { - for (i = 0; i < size; i += access_size) { - r |= access_fn(mr, addr + i, value, access_size, - (size - access_size - i) * 8, access_mask, attrs); - } - } else { - for (i = 0; i < size; i += access_size) { - r |= access_fn(mr, addr + i, value, access_size, i * 8, - access_mask, attrs); - } - } - return r; -} - -static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) -{ - AddressSpace *as; - - while (mr->container) { - mr = mr->container; - } - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - if (mr == as->root) { - return as; - } - } - return NULL; -} - -/* Render a memory region into the global view. Ranges in @view obscure - * ranges in @mr. - */ -static void render_memory_region(FlatView *view, - MemoryRegion *mr, - Int128 base, - AddrRange clip, - bool readonly, - bool nonvolatile) -{ - MemoryRegion *subregion; - unsigned i; - hwaddr offset_in_region; - Int128 remain; - Int128 now; - FlatRange fr; - AddrRange tmp; - - if (!mr->enabled) { - return; - } - - int128_addto(&base, int128_make64(mr->addr)); - readonly |= mr->readonly; - nonvolatile |= mr->nonvolatile; - - tmp = addrrange_make(base, mr->size); - - if (!addrrange_intersects(tmp, clip)) { - return; - } - - clip = addrrange_intersection(tmp, clip); - - if (mr->alias) { - int128_subfrom(&base, int128_make64(mr->alias->addr)); - int128_subfrom(&base, int128_make64(mr->alias_offset)); - render_memory_region(view, mr->alias, base, clip, - readonly, nonvolatile); - return; - } - - /* Render subregions in priority order. */ - QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { - render_memory_region(view, subregion, base, clip, - readonly, nonvolatile); - } - - if (!mr->terminates) { - return; - } - - offset_in_region = int128_get64(int128_sub(clip.start, base)); - base = clip.start; - remain = clip.size; - - fr.mr = mr; - fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); - fr.romd_mode = mr->romd_mode; - fr.readonly = readonly; - fr.nonvolatile = nonvolatile; - - /* Render the region itself into any gaps left by the current view. */ - for (i = 0; i < view->nr && int128_nz(remain); ++i) { - if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { - continue; - } - if (int128_lt(base, view->ranges[i].addr.start)) { - now = int128_min(remain, - int128_sub(view->ranges[i].addr.start, base)); - fr.offset_in_region = offset_in_region; - fr.addr = addrrange_make(base, now); - flatview_insert(view, i, &fr); - ++i; - int128_addto(&base, now); - offset_in_region += int128_get64(now); - int128_subfrom(&remain, now); - } - now = int128_sub(int128_min(int128_add(base, remain), - addrrange_end(view->ranges[i].addr)), - base); - int128_addto(&base, now); - offset_in_region += int128_get64(now); - int128_subfrom(&remain, now); - } - if (int128_nz(remain)) { - fr.offset_in_region = offset_in_region; - fr.addr = addrrange_make(base, remain); - flatview_insert(view, i, &fr); - } -} - -static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) -{ - while (mr->enabled) { - if (mr->alias) { - if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { - /* The alias is included in its entirety. Use it as - * the "real" root, so that we can share more FlatViews. - */ - mr = mr->alias; - continue; - } - } else if (!mr->terminates) { - unsigned int found = 0; - MemoryRegion *child, *next = NULL; - QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { - if (child->enabled) { - if (++found > 1) { - next = NULL; - break; - } - if (!child->addr && int128_ge(mr->size, child->size)) { - /* A child is included in its entirety. If it's the only - * enabled one, use it in the hope of finding an alias down the - * way. This will also let us share FlatViews. - */ - next = child; - } - } - } - if (found == 0) { - return NULL; - } - if (next) { - mr = next; - continue; - } - } - - return mr; - } - - return NULL; -} - -/* Render a memory topology into a list of disjoint absolute ranges. */ -static FlatView *generate_memory_topology(MemoryRegion *mr) -{ - int i; - FlatView *view; - - view = flatview_new(mr); - - if (mr) { - render_memory_region(view, mr, int128_zero(), - addrrange_make(int128_zero(), int128_2_64()), - false, false); - } - flatview_simplify(view); - - view->dispatch = address_space_dispatch_new(view); - for (i = 0; i < view->nr; i++) { - MemoryRegionSection mrs = - section_from_flat_range(&view->ranges[i], view); - flatview_add_to_dispatch(view, &mrs); - } - address_space_dispatch_compact(view->dispatch); - g_hash_table_replace(flat_views, mr, view); - - return view; -} - -static void address_space_add_del_ioeventfds(AddressSpace *as, - MemoryRegionIoeventfd *fds_new, - unsigned fds_new_nb, - MemoryRegionIoeventfd *fds_old, - unsigned fds_old_nb) -{ - unsigned iold, inew; - MemoryRegionIoeventfd *fd; - MemoryRegionSection section; - - /* Generate a symmetric difference of the old and new fd sets, adding - * and deleting as necessary. - */ - - iold = inew = 0; - while (iold < fds_old_nb || inew < fds_new_nb) { - if (iold < fds_old_nb - && (inew == fds_new_nb - || memory_region_ioeventfd_before(&fds_old[iold], - &fds_new[inew]))) { - fd = &fds_old[iold]; - section = (MemoryRegionSection) { - .fv = address_space_to_flatview(as), - .offset_within_address_space = int128_get64(fd->addr.start), - .size = fd->addr.size, - }; - MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, - fd->match_data, fd->data, fd->e); - ++iold; - } else if (inew < fds_new_nb - && (iold == fds_old_nb - || memory_region_ioeventfd_before(&fds_new[inew], - &fds_old[iold]))) { - fd = &fds_new[inew]; - section = (MemoryRegionSection) { - .fv = address_space_to_flatview(as), - .offset_within_address_space = int128_get64(fd->addr.start), - .size = fd->addr.size, - }; - MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, - fd->match_data, fd->data, fd->e); - ++inew; - } else { - ++iold; - ++inew; - } - } -} - -FlatView *address_space_get_flatview(AddressSpace *as) -{ - FlatView *view; - - RCU_READ_LOCK_GUARD(); - do { - view = address_space_to_flatview(as); - /* If somebody has replaced as->current_map concurrently, - * flatview_ref returns false. - */ - } while (!flatview_ref(view)); - return view; -} - -static void address_space_update_ioeventfds(AddressSpace *as) -{ - FlatView *view; - FlatRange *fr; - unsigned ioeventfd_nb = 0; - unsigned ioeventfd_max; - MemoryRegionIoeventfd *ioeventfds; - AddrRange tmp; - unsigned i; - - /* - * It is likely that the number of ioeventfds hasn't changed much, so use - * the previous size as the starting value, with some headroom to avoid - * gratuitous reallocations. - */ - ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4); - ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max); - - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { - tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, - int128_sub(fr->addr.start, - int128_make64(fr->offset_in_region))); - if (addrrange_intersects(fr->addr, tmp)) { - ++ioeventfd_nb; - if (ioeventfd_nb > ioeventfd_max) { - ioeventfd_max = MAX(ioeventfd_max * 2, 4); - ioeventfds = g_realloc(ioeventfds, - ioeventfd_max * sizeof(*ioeventfds)); - } - ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; - ioeventfds[ioeventfd_nb-1].addr = tmp; - } - } - } - - address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, - as->ioeventfds, as->ioeventfd_nb); - - g_free(as->ioeventfds); - as->ioeventfds = ioeventfds; - as->ioeventfd_nb = ioeventfd_nb; - flatview_unref(view); -} - -/* - * Notify the memory listeners about the coalesced IO change events of - * range `cmr'. Only the part that has intersection of the specified - * FlatRange will be sent. - */ -static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, - CoalescedMemoryRange *cmr, bool add) -{ - AddrRange tmp; - - tmp = addrrange_shift(cmr->addr, - int128_sub(fr->addr.start, - int128_make64(fr->offset_in_region))); - if (!addrrange_intersects(tmp, fr->addr)) { - return; - } - tmp = addrrange_intersection(tmp, fr->addr); - - if (add) { - MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, - int128_get64(tmp.start), - int128_get64(tmp.size)); - } else { - MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, - int128_get64(tmp.start), - int128_get64(tmp.size)); - } -} - -static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) -{ - CoalescedMemoryRange *cmr; - - QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { - flat_range_coalesced_io_notify(fr, as, cmr, false); - } -} - -static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) -{ - MemoryRegion *mr = fr->mr; - CoalescedMemoryRange *cmr; - - if (QTAILQ_EMPTY(&mr->coalesced)) { - return; - } - - QTAILQ_FOREACH(cmr, &mr->coalesced, link) { - flat_range_coalesced_io_notify(fr, as, cmr, true); - } -} - -static void address_space_update_topology_pass(AddressSpace *as, - const FlatView *old_view, - const FlatView *new_view, - bool adding) -{ - unsigned iold, inew; - FlatRange *frold, *frnew; - - /* Generate a symmetric difference of the old and new memory maps. - * Kill ranges in the old map, and instantiate ranges in the new map. - */ - iold = inew = 0; - while (iold < old_view->nr || inew < new_view->nr) { - if (iold < old_view->nr) { - frold = &old_view->ranges[iold]; - } else { - frold = NULL; - } - if (inew < new_view->nr) { - frnew = &new_view->ranges[inew]; - } else { - frnew = NULL; - } - - if (frold - && (!frnew - || int128_lt(frold->addr.start, frnew->addr.start) - || (int128_eq(frold->addr.start, frnew->addr.start) - && !flatrange_equal(frold, frnew)))) { - /* In old but not in new, or in both but attributes changed. */ - - if (!adding) { - flat_range_coalesced_io_del(frold, as); - MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); - } - - ++iold; - } else if (frold && frnew && flatrange_equal(frold, frnew)) { - /* In both and unchanged (except logging may have changed) */ - - if (adding) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); - if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, - frold->dirty_log_mask, - frnew->dirty_log_mask); - } - if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, - frold->dirty_log_mask, - frnew->dirty_log_mask); - } - } - - ++iold; - ++inew; - } else { - /* In new */ - - if (adding) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); - flat_range_coalesced_io_add(frnew, as); - } - - ++inew; - } - } -} - -static void flatviews_init(void) -{ - static FlatView *empty_view; - - if (flat_views) { - return; - } - - flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, - (GDestroyNotify) flatview_unref); - if (!empty_view) { - empty_view = generate_memory_topology(NULL); - /* We keep it alive forever in the global variable. */ - flatview_ref(empty_view); - } else { - g_hash_table_replace(flat_views, NULL, empty_view); - flatview_ref(empty_view); - } -} - -static void flatviews_reset(void) -{ - AddressSpace *as; - - if (flat_views) { - g_hash_table_unref(flat_views); - flat_views = NULL; - } - flatviews_init(); - - /* Render unique FVs */ - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - MemoryRegion *physmr = memory_region_get_flatview_root(as->root); - - if (g_hash_table_lookup(flat_views, physmr)) { - continue; - } - - generate_memory_topology(physmr); - } -} - -static void address_space_set_flatview(AddressSpace *as) -{ - FlatView *old_view = address_space_to_flatview(as); - MemoryRegion *physmr = memory_region_get_flatview_root(as->root); - FlatView *new_view = g_hash_table_lookup(flat_views, physmr); - - assert(new_view); - - if (old_view == new_view) { - return; - } - - if (old_view) { - flatview_ref(old_view); - } - - flatview_ref(new_view); - - if (!QTAILQ_EMPTY(&as->listeners)) { - FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; - - if (!old_view2) { - old_view2 = &tmpview; - } - address_space_update_topology_pass(as, old_view2, new_view, false); - address_space_update_topology_pass(as, old_view2, new_view, true); - } - - /* Writes are protected by the BQL. */ - atomic_rcu_set(&as->current_map, new_view); - if (old_view) { - flatview_unref(old_view); - } - - /* Note that all the old MemoryRegions are still alive up to this - * point. This relieves most MemoryListeners from the need to - * ref/unref the MemoryRegions they get---unless they use them - * outside the iothread mutex, in which case precise reference - * counting is necessary. - */ - if (old_view) { - flatview_unref(old_view); - } -} - -static void address_space_update_topology(AddressSpace *as) -{ - MemoryRegion *physmr = memory_region_get_flatview_root(as->root); - - flatviews_init(); - if (!g_hash_table_lookup(flat_views, physmr)) { - generate_memory_topology(physmr); - } - address_space_set_flatview(as); -} - -void memory_region_transaction_begin(void) -{ - qemu_flush_coalesced_mmio_buffer(); - ++memory_region_transaction_depth; -} - -void memory_region_transaction_commit(void) -{ - AddressSpace *as; - - assert(memory_region_transaction_depth); - assert(qemu_mutex_iothread_locked()); - - --memory_region_transaction_depth; - if (!memory_region_transaction_depth) { - if (memory_region_update_pending) { - flatviews_reset(); - - MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); - - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - address_space_set_flatview(as); - address_space_update_ioeventfds(as); - } - memory_region_update_pending = false; - ioeventfd_update_pending = false; - MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); - } else if (ioeventfd_update_pending) { - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - address_space_update_ioeventfds(as); - } - ioeventfd_update_pending = false; - } - } -} - -static void memory_region_destructor_none(MemoryRegion *mr) -{ -} - -static void memory_region_destructor_ram(MemoryRegion *mr) -{ - qemu_ram_free(mr->ram_block); -} - -static bool memory_region_need_escape(char c) -{ - return c == '/' || c == '[' || c == '\\' || c == ']'; -} - -static char *memory_region_escape_name(const char *name) -{ - const char *p; - char *escaped, *q; - uint8_t c; - size_t bytes = 0; - - for (p = name; *p; p++) { - bytes += memory_region_need_escape(*p) ? 4 : 1; - } - if (bytes == p - name) { - return g_memdup(name, bytes + 1); - } - - escaped = g_malloc(bytes + 1); - for (p = name, q = escaped; *p; p++) { - c = *p; - if (unlikely(memory_region_need_escape(c))) { - *q++ = '\\'; - *q++ = 'x'; - *q++ = "0123456789abcdef"[c >> 4]; - c = "0123456789abcdef"[c & 15]; - } - *q++ = c; - } - *q = 0; - return escaped; -} - -static void memory_region_do_init(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size) -{ - mr->size = int128_make64(size); - if (size == UINT64_MAX) { - mr->size = int128_2_64(); - } - mr->name = g_strdup(name); - mr->owner = owner; - mr->ram_block = NULL; - - if (name) { - char *escaped_name = memory_region_escape_name(name); - char *name_array = g_strdup_printf("%s[*]", escaped_name); - - if (!owner) { - owner = container_get(qdev_get_machine(), "/unattached"); - } - - object_property_add_child(owner, name_array, OBJECT(mr)); - object_unref(OBJECT(mr)); - g_free(name_array); - g_free(escaped_name); - } -} - -void memory_region_init(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size) -{ - object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); - memory_region_do_init(mr, owner, name, size); -} - -static void memory_region_get_container(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - char *path = (char *)""; - - if (mr->container) { - path = object_get_canonical_path(OBJECT(mr->container)); - } - visit_type_str(v, name, &path, errp); - if (mr->container) { - g_free(path); - } -} - -static Object *memory_region_resolve_container(Object *obj, void *opaque, - const char *part) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - - return OBJECT(mr->container); -} - -static void memory_region_get_priority(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - int32_t value = mr->priority; - - visit_type_int32(v, name, &value, errp); -} - -static void memory_region_get_size(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - uint64_t value = memory_region_size(mr); - - visit_type_uint64(v, name, &value, errp); -} - -static void memory_region_initfn(Object *obj) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - ObjectProperty *op; - - mr->ops = &unassigned_mem_ops; - mr->enabled = true; - mr->romd_mode = true; - mr->global_locking = true; - mr->destructor = memory_region_destructor_none; - QTAILQ_INIT(&mr->subregions); - QTAILQ_INIT(&mr->coalesced); - - op = object_property_add(OBJECT(mr), "container", - "link<" TYPE_MEMORY_REGION ">", - memory_region_get_container, - NULL, /* memory_region_set_container */ - NULL, NULL); - op->resolve = memory_region_resolve_container; - - object_property_add_uint64_ptr(OBJECT(mr), "addr", - &mr->addr, OBJ_PROP_FLAG_READ); - object_property_add(OBJECT(mr), "priority", "uint32", - memory_region_get_priority, - NULL, /* memory_region_set_priority */ - NULL, NULL); - object_property_add(OBJECT(mr), "size", "uint64", - memory_region_get_size, - NULL, /* memory_region_set_size, */ - NULL, NULL); -} - -static void iommu_memory_region_initfn(Object *obj) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - - mr->is_iommu = true; -} - -static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, - unsigned size) -{ -#ifdef DEBUG_UNASSIGNED - printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); -#endif - return 0; -} - -static void unassigned_mem_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ -#ifdef DEBUG_UNASSIGNED - printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); -#endif -} - -static bool unassigned_mem_accepts(void *opaque, hwaddr addr, - unsigned size, bool is_write, - MemTxAttrs attrs) -{ - return false; -} - -const MemoryRegionOps unassigned_mem_ops = { - .valid.accepts = unassigned_mem_accepts, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static uint64_t memory_region_ram_device_read(void *opaque, - hwaddr addr, unsigned size) -{ - MemoryRegion *mr = opaque; - uint64_t data = (uint64_t)~0; - - switch (size) { - case 1: - data = *(uint8_t *)(mr->ram_block->host + addr); - break; - case 2: - data = *(uint16_t *)(mr->ram_block->host + addr); - break; - case 4: - data = *(uint32_t *)(mr->ram_block->host + addr); - break; - case 8: - data = *(uint64_t *)(mr->ram_block->host + addr); - break; - } - - trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); - - return data; -} - -static void memory_region_ram_device_write(void *opaque, hwaddr addr, - uint64_t data, unsigned size) -{ - MemoryRegion *mr = opaque; - - trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); - - switch (size) { - case 1: - *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; - break; - case 2: - *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; - break; - case 4: - *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; - break; - case 8: - *(uint64_t *)(mr->ram_block->host + addr) = data; - break; - } -} - -static const MemoryRegionOps ram_device_mem_ops = { - .read = memory_region_ram_device_read, - .write = memory_region_ram_device_write, - .endianness = DEVICE_HOST_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 8, - .unaligned = true, - }, - .impl = { - .min_access_size = 1, - .max_access_size = 8, - .unaligned = true, - }, -}; - -bool memory_region_access_valid(MemoryRegion *mr, - hwaddr addr, - unsigned size, - bool is_write, - MemTxAttrs attrs) -{ - if (mr->ops->valid.accepts - && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { - return false; - } - - if (!mr->ops->valid.unaligned && (addr & (size - 1))) { - return false; - } - - /* Treat zero as compatibility all valid */ - if (!mr->ops->valid.max_access_size) { - return true; - } - - if (size > mr->ops->valid.max_access_size - || size < mr->ops->valid.min_access_size) { - return false; - } - return true; -} - -static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, - hwaddr addr, - uint64_t *pval, - unsigned size, - MemTxAttrs attrs) -{ - *pval = 0; - - if (mr->ops->read) { - return access_with_adjusted_size(addr, pval, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_read_accessor, - mr, attrs); - } else { - return access_with_adjusted_size(addr, pval, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_read_with_attrs_accessor, - mr, attrs); - } -} - -MemTxResult memory_region_dispatch_read(MemoryRegion *mr, - hwaddr addr, - uint64_t *pval, - MemOp op, - MemTxAttrs attrs) -{ - unsigned size = memop_size(op); - MemTxResult r; - - if (!memory_region_access_valid(mr, addr, size, false, attrs)) { - *pval = unassigned_mem_read(mr, addr, size); - return MEMTX_DECODE_ERROR; - } - - r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); - adjust_endianness(mr, pval, op); - return r; -} - -/* Return true if an eventfd was signalled */ -static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, - hwaddr addr, - uint64_t data, - unsigned size, - MemTxAttrs attrs) -{ - MemoryRegionIoeventfd ioeventfd = { - .addr = addrrange_make(int128_make64(addr), int128_make64(size)), - .data = data, - }; - unsigned i; - - for (i = 0; i < mr->ioeventfd_nb; i++) { - ioeventfd.match_data = mr->ioeventfds[i].match_data; - ioeventfd.e = mr->ioeventfds[i].e; - - if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) { - event_notifier_set(ioeventfd.e); - return true; - } - } - - return false; -} - -MemTxResult memory_region_dispatch_write(MemoryRegion *mr, - hwaddr addr, - uint64_t data, - MemOp op, - MemTxAttrs attrs) -{ - unsigned size = memop_size(op); - - if (!memory_region_access_valid(mr, addr, size, true, attrs)) { - unassigned_mem_write(mr, addr, data, size); - return MEMTX_DECODE_ERROR; - } - - adjust_endianness(mr, &data, op); - - if ((!kvm_eventfds_enabled()) && - memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { - return MEMTX_OK; - } - - if (mr->ops->write) { - return access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_accessor, mr, - attrs); - } else { - return - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_with_attrs_accessor, - mr, attrs); - } -} - -void memory_region_init_io(MemoryRegion *mr, - Object *owner, - const MemoryRegionOps *ops, - void *opaque, - const char *name, - uint64_t size) -{ - memory_region_init(mr, owner, name, size); - mr->ops = ops ? ops : &unassigned_mem_ops; - mr->opaque = opaque; - mr->terminates = true; -} - -void memory_region_init_ram_nomigrate(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - Error **errp) -{ - memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); -} - -void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - bool share, - Error **errp) -{ - Error *err = NULL; - memory_region_init(mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc(size, share, mr, &err); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; - if (err) { - mr->size = int128_zero(); - object_unparent(OBJECT(mr)); - error_propagate(errp, err); - } -} - -void memory_region_init_resizeable_ram(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - uint64_t max_size, - void (*resized)(const char*, - uint64_t length, - void *host), - Error **errp) -{ - Error *err = NULL; - memory_region_init(mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, - mr, &err); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; - if (err) { - mr->size = int128_zero(); - object_unparent(OBJECT(mr)); - error_propagate(errp, err); - } -} - -#ifdef CONFIG_POSIX -void memory_region_init_ram_from_file(MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size, - uint64_t align, - uint32_t ram_flags, - const char *path, - Error **errp) -{ - Error *err = NULL; - memory_region_init(mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram; - mr->align = align; - mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; - if (err) { - mr->size = int128_zero(); - object_unparent(OBJECT(mr)); - error_propagate(errp, err); - } -} - -void memory_region_init_ram_from_fd(MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size, - bool share, - int fd, - Error **errp) -{ - Error *err = NULL; - memory_region_init(mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc_from_fd(size, mr, - share ? RAM_SHARED : 0, - fd, &err); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; - if (err) { - mr->size = int128_zero(); - object_unparent(OBJECT(mr)); - error_propagate(errp, err); - } -} -#endif - -void memory_region_init_ram_ptr(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - void *ptr) -{ - memory_region_init(mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram; - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; - - /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ - assert(ptr != NULL); - mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); -} - -void memory_region_init_ram_device_ptr(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - void *ptr) -{ - memory_region_init(mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->ram_device = true; - mr->ops = &ram_device_mem_ops; - mr->opaque = mr; - mr->destructor = memory_region_destructor_ram; - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; - /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ - assert(ptr != NULL); - mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); -} - -void memory_region_init_alias(MemoryRegion *mr, - Object *owner, - const char *name, - MemoryRegion *orig, - hwaddr offset, - uint64_t size) -{ - memory_region_init(mr, owner, name, size); - mr->alias = orig; - mr->alias_offset = offset; -} - -void memory_region_init_rom_nomigrate(MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size, - Error **errp) -{ - memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); - mr->readonly = true; -} - -void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, - Object *owner, - const MemoryRegionOps *ops, - void *opaque, - const char *name, - uint64_t size, - Error **errp) -{ - Error *err = NULL; - assert(ops); - memory_region_init(mr, owner, name, size); - mr->ops = ops; - mr->opaque = opaque; - mr->terminates = true; - mr->rom_device = true; - mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc(size, false, mr, &err); - if (err) { - mr->size = int128_zero(); - object_unparent(OBJECT(mr)); - error_propagate(errp, err); - } -} - -void memory_region_init_iommu(void *_iommu_mr, - size_t instance_size, - const char *mrtypename, - Object *owner, - const char *name, - uint64_t size) -{ - struct IOMMUMemoryRegion *iommu_mr; - struct MemoryRegion *mr; - - object_initialize(_iommu_mr, instance_size, mrtypename); - mr = MEMORY_REGION(_iommu_mr); - memory_region_do_init(mr, owner, name, size); - iommu_mr = IOMMU_MEMORY_REGION(mr); - mr->terminates = true; /* then re-forwards */ - QLIST_INIT(&iommu_mr->iommu_notify); - iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; -} - -static void memory_region_finalize(Object *obj) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - - assert(!mr->container); - - /* We know the region is not visible in any address space (it - * does not have a container and cannot be a root either because - * it has no references, so we can blindly clear mr->enabled. - * memory_region_set_enabled instead could trigger a transaction - * and cause an infinite loop. - */ - mr->enabled = false; - memory_region_transaction_begin(); - while (!QTAILQ_EMPTY(&mr->subregions)) { - MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); - memory_region_del_subregion(mr, subregion); - } - memory_region_transaction_commit(); - - mr->destructor(mr); - memory_region_clear_coalescing(mr); - g_free((char *)mr->name); - g_free(mr->ioeventfds); -} - -Object *memory_region_owner(MemoryRegion *mr) -{ - Object *obj = OBJECT(mr); - return obj->parent; -} - -void memory_region_ref(MemoryRegion *mr) -{ - /* MMIO callbacks most likely will access data that belongs - * to the owner, hence the need to ref/unref the owner whenever - * the memory region is in use. - * - * The memory region is a child of its owner. As long as the - * owner doesn't call unparent itself on the memory region, - * ref-ing the owner will also keep the memory region alive. - * Memory regions without an owner are supposed to never go away; - * we do not ref/unref them because it slows down DMA sensibly. - */ - if (mr && mr->owner) { - object_ref(mr->owner); - } -} - -void memory_region_unref(MemoryRegion *mr) -{ - if (mr && mr->owner) { - object_unref(mr->owner); - } -} - -uint64_t memory_region_size(MemoryRegion *mr) -{ - if (int128_eq(mr->size, int128_2_64())) { - return UINT64_MAX; - } - return int128_get64(mr->size); -} - -const char *memory_region_name(const MemoryRegion *mr) -{ - if (!mr->name) { - ((MemoryRegion *)mr)->name = - object_get_canonical_path_component(OBJECT(mr)); - } - return mr->name; -} - -bool memory_region_is_ram_device(MemoryRegion *mr) -{ - return mr->ram_device; -} - -uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) -{ - uint8_t mask = mr->dirty_log_mask; - if (global_dirty_log && mr->ram_block) { - mask |= (1 << DIRTY_MEMORY_MIGRATION); - } - return mask; -} - -bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) -{ - return memory_region_get_dirty_log_mask(mr) & (1 << client); -} - -static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr, - Error **errp) -{ - IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; - IOMMUNotifier *iommu_notifier; - IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); - int ret = 0; - - IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { - flags |= iommu_notifier->notifier_flags; - } - - if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { - ret = imrc->notify_flag_changed(iommu_mr, - iommu_mr->iommu_notify_flags, - flags, errp); - } - - if (!ret) { - iommu_mr->iommu_notify_flags = flags; - } - return ret; -} - -int memory_region_register_iommu_notifier(MemoryRegion *mr, - IOMMUNotifier *n, Error **errp) -{ - IOMMUMemoryRegion *iommu_mr; - int ret; - - if (mr->alias) { - return memory_region_register_iommu_notifier(mr->alias, n, errp); - } - - /* We need to register for at least one bitfield */ - iommu_mr = IOMMU_MEMORY_REGION(mr); - assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); - assert(n->start <= n->end); - assert(n->iommu_idx >= 0 && - n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); - - QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); - ret = memory_region_update_iommu_notify_flags(iommu_mr, errp); - if (ret) { - QLIST_REMOVE(n, node); - } - return ret; -} - -uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) -{ - IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); - - if (imrc->get_min_page_size) { - return imrc->get_min_page_size(iommu_mr); - } - return TARGET_PAGE_SIZE; -} - -void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) -{ - MemoryRegion *mr = MEMORY_REGION(iommu_mr); - IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); - hwaddr addr, granularity; - IOMMUTLBEntry iotlb; - - /* If the IOMMU has its own replay callback, override */ - if (imrc->replay) { - imrc->replay(iommu_mr, n); - return; - } - - granularity = memory_region_iommu_get_min_page_size(iommu_mr); - - for (addr = 0; addr < memory_region_size(mr); addr += granularity) { - iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); - if (iotlb.perm != IOMMU_NONE) { - n->notify(n, &iotlb); - } - - /* if (2^64 - MR size) < granularity, it's possible to get an - * infinite loop here. This should catch such a wraparound */ - if ((addr + granularity) < addr) { - break; - } - } -} - -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, - IOMMUNotifier *n) -{ - IOMMUMemoryRegion *iommu_mr; - - if (mr->alias) { - memory_region_unregister_iommu_notifier(mr->alias, n); - return; - } - QLIST_REMOVE(n, node); - iommu_mr = IOMMU_MEMORY_REGION(mr); - memory_region_update_iommu_notify_flags(iommu_mr, NULL); -} - -void memory_region_notify_one(IOMMUNotifier *notifier, - IOMMUTLBEntry *entry) -{ - IOMMUNotifierFlag request_flags; - hwaddr entry_end = entry->iova + entry->addr_mask; - - /* - * Skip the notification if the notification does not overlap - * with registered range. - */ - if (notifier->start > entry_end || notifier->end < entry->iova) { - return; - } - - assert(entry->iova >= notifier->start && entry_end <= notifier->end); - - if (entry->perm & IOMMU_RW) { - request_flags = IOMMU_NOTIFIER_MAP; - } else { - request_flags = IOMMU_NOTIFIER_UNMAP; - } - - if (notifier->notifier_flags & request_flags) { - notifier->notify(notifier, entry); - } -} - -void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, - int iommu_idx, - IOMMUTLBEntry entry) -{ - IOMMUNotifier *iommu_notifier; - - assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); - - IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { - if (iommu_notifier->iommu_idx == iommu_idx) { - memory_region_notify_one(iommu_notifier, &entry); - } - } -} - -int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, - enum IOMMUMemoryRegionAttr attr, - void *data) -{ - IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); - - if (!imrc->get_attr) { - return -EINVAL; - } - - return imrc->get_attr(iommu_mr, attr, data); -} - -int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, - MemTxAttrs attrs) -{ - IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); - - if (!imrc->attrs_to_index) { - return 0; - } - - return imrc->attrs_to_index(iommu_mr, attrs); -} - -int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr) -{ - IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); - - if (!imrc->num_indexes) { - return 1; - } - - return imrc->num_indexes(iommu_mr); -} - -void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) -{ - uint8_t mask = 1 << client; - uint8_t old_logging; - - assert(client == DIRTY_MEMORY_VGA); - old_logging = mr->vga_logging_count; - mr->vga_logging_count += log ? 1 : -1; - if (!!old_logging == !!mr->vga_logging_count) { - return; - } - - memory_region_transaction_begin(); - mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); - memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(); -} - -void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, - hwaddr size) -{ - assert(mr->ram_block); - cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, - size, - memory_region_get_dirty_log_mask(mr)); -} - -static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) -{ - MemoryListener *listener; - AddressSpace *as; - FlatView *view; - FlatRange *fr; - - /* If the same address space has multiple log_sync listeners, we - * visit that address space's FlatView multiple times. But because - * log_sync listeners are rare, it's still cheaper than walking each - * address space once. - */ - QTAILQ_FOREACH(listener, &memory_listeners, link) { - if (!listener->log_sync) { - continue; - } - as = listener->address_space; - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { - MemoryRegionSection mrs = section_from_flat_range(fr, view); - listener->log_sync(listener, &mrs); - } - } - flatview_unref(view); - } -} - -void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, - hwaddr len) -{ - MemoryRegionSection mrs; - MemoryListener *listener; - AddressSpace *as; - FlatView *view; - FlatRange *fr; - hwaddr sec_start, sec_end, sec_size; - - QTAILQ_FOREACH(listener, &memory_listeners, link) { - if (!listener->log_clear) { - continue; - } - as = listener->address_space; - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - if (!fr->dirty_log_mask || fr->mr != mr) { - /* - * Clear dirty bitmap operation only applies to those - * regions whose dirty logging is at least enabled - */ - continue; - } - - mrs = section_from_flat_range(fr, view); - - sec_start = MAX(mrs.offset_within_region, start); - sec_end = mrs.offset_within_region + int128_get64(mrs.size); - sec_end = MIN(sec_end, start + len); - - if (sec_start >= sec_end) { - /* - * If this memory region section has no intersection - * with the requested range, skip. - */ - continue; - } - - /* Valid case; shrink the section if needed */ - mrs.offset_within_address_space += - sec_start - mrs.offset_within_region; - mrs.offset_within_region = sec_start; - sec_size = sec_end - sec_start; - mrs.size = int128_make64(sec_size); - listener->log_clear(listener, &mrs); - } - flatview_unref(view); - } -} - -DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, - hwaddr addr, - hwaddr size, - unsigned client) -{ - DirtyBitmapSnapshot *snapshot; - assert(mr->ram_block); - memory_region_sync_dirty_bitmap(mr); - snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); - memory_global_after_dirty_log_sync(); - return snapshot; -} - -bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, - hwaddr addr, hwaddr size) -{ - assert(mr->ram_block); - return cpu_physical_memory_snapshot_get_dirty(snap, - memory_region_get_ram_addr(mr) + addr, size); -} - -void memory_region_set_readonly(MemoryRegion *mr, bool readonly) -{ - if (mr->readonly != readonly) { - memory_region_transaction_begin(); - mr->readonly = readonly; - memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(); - } -} - -void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile) -{ - if (mr->nonvolatile != nonvolatile) { - memory_region_transaction_begin(); - mr->nonvolatile = nonvolatile; - memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(); - } -} - -void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) -{ - if (mr->romd_mode != romd_mode) { - memory_region_transaction_begin(); - mr->romd_mode = romd_mode; - memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(); - } -} - -void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, - hwaddr size, unsigned client) -{ - assert(mr->ram_block); - cpu_physical_memory_test_and_clear_dirty( - memory_region_get_ram_addr(mr) + addr, size, client); -} - -int memory_region_get_fd(MemoryRegion *mr) -{ - int fd; - - RCU_READ_LOCK_GUARD(); - while (mr->alias) { - mr = mr->alias; - } - fd = mr->ram_block->fd; - - return fd; -} - -void *memory_region_get_ram_ptr(MemoryRegion *mr) -{ - void *ptr; - uint64_t offset = 0; - - RCU_READ_LOCK_GUARD(); - while (mr->alias) { - offset += mr->alias_offset; - mr = mr->alias; - } - assert(mr->ram_block); - ptr = qemu_map_ram_ptr(mr->ram_block, offset); - - return ptr; -} - -MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) -{ - RAMBlock *block; - - block = qemu_ram_block_from_host(ptr, false, offset); - if (!block) { - return NULL; - } - - return block->mr; -} - -ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) -{ - return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; -} - -void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) -{ - assert(mr->ram_block); - - qemu_ram_resize(mr->ram_block, newsize, errp); -} - -void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size) -{ - if (mr->ram_block) { - qemu_ram_msync(mr->ram_block, addr, size); - } -} - -void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size) -{ - /* - * Might be extended case needed to cover - * different types of memory regions - */ - if (mr->dirty_log_mask) { - memory_region_msync(mr, addr, size); - } -} - -/* - * Call proper memory listeners about the change on the newly - * added/removed CoalescedMemoryRange. - */ -static void memory_region_update_coalesced_range(MemoryRegion *mr, - CoalescedMemoryRange *cmr, - bool add) -{ - AddressSpace *as; - FlatView *view; - FlatRange *fr; - - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - if (fr->mr == mr) { - flat_range_coalesced_io_notify(fr, as, cmr, add); - } - } - flatview_unref(view); - } -} - -void memory_region_set_coalescing(MemoryRegion *mr) -{ - memory_region_clear_coalescing(mr); - memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); -} - -void memory_region_add_coalescing(MemoryRegion *mr, - hwaddr offset, - uint64_t size) -{ - CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); - - cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); - QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); - memory_region_update_coalesced_range(mr, cmr, true); - memory_region_set_flush_coalesced(mr); -} - -void memory_region_clear_coalescing(MemoryRegion *mr) -{ - CoalescedMemoryRange *cmr; - - if (QTAILQ_EMPTY(&mr->coalesced)) { - return; - } - - qemu_flush_coalesced_mmio_buffer(); - mr->flush_coalesced_mmio = false; - - while (!QTAILQ_EMPTY(&mr->coalesced)) { - cmr = QTAILQ_FIRST(&mr->coalesced); - QTAILQ_REMOVE(&mr->coalesced, cmr, link); - memory_region_update_coalesced_range(mr, cmr, false); - g_free(cmr); - } -} - -void memory_region_set_flush_coalesced(MemoryRegion *mr) -{ - mr->flush_coalesced_mmio = true; -} - -void memory_region_clear_flush_coalesced(MemoryRegion *mr) -{ - qemu_flush_coalesced_mmio_buffer(); - if (QTAILQ_EMPTY(&mr->coalesced)) { - mr->flush_coalesced_mmio = false; - } -} - -void memory_region_clear_global_locking(MemoryRegion *mr) -{ - mr->global_locking = false; -} - -static bool userspace_eventfd_warning; - -void memory_region_add_eventfd(MemoryRegion *mr, - hwaddr addr, - unsigned size, - bool match_data, - uint64_t data, - EventNotifier *e) -{ - MemoryRegionIoeventfd mrfd = { - .addr.start = int128_make64(addr), - .addr.size = int128_make64(size), - .match_data = match_data, - .data = data, - .e = e, - }; - unsigned i; - - if (kvm_enabled() && (!(kvm_eventfds_enabled() || - userspace_eventfd_warning))) { - userspace_eventfd_warning = true; - error_report("Using eventfd without MMIO binding in KVM. " - "Suboptimal performance expected"); - } - - if (size) { - adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); - } - memory_region_transaction_begin(); - for (i = 0; i < mr->ioeventfd_nb; ++i) { - if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) { - break; - } - } - ++mr->ioeventfd_nb; - mr->ioeventfds = g_realloc(mr->ioeventfds, - sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); - memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], - sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); - mr->ioeventfds[i] = mrfd; - ioeventfd_update_pending |= mr->enabled; - memory_region_transaction_commit(); -} - -void memory_region_del_eventfd(MemoryRegion *mr, - hwaddr addr, - unsigned size, - bool match_data, - uint64_t data, - EventNotifier *e) -{ - MemoryRegionIoeventfd mrfd = { - .addr.start = int128_make64(addr), - .addr.size = int128_make64(size), - .match_data = match_data, - .data = data, - .e = e, - }; - unsigned i; - - if (size) { - adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); - } - memory_region_transaction_begin(); - for (i = 0; i < mr->ioeventfd_nb; ++i) { - if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) { - break; - } - } - assert(i != mr->ioeventfd_nb); - memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], - sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); - --mr->ioeventfd_nb; - mr->ioeventfds = g_realloc(mr->ioeventfds, - sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); - ioeventfd_update_pending |= mr->enabled; - memory_region_transaction_commit(); -} - -static void memory_region_update_container_subregions(MemoryRegion *subregion) -{ - MemoryRegion *mr = subregion->container; - MemoryRegion *other; - - memory_region_transaction_begin(); - - memory_region_ref(subregion); - QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { - if (subregion->priority >= other->priority) { - QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); - goto done; - } - } - QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); -done: - memory_region_update_pending |= mr->enabled && subregion->enabled; - memory_region_transaction_commit(); -} - -static void memory_region_add_subregion_common(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion) -{ - assert(!subregion->container); - subregion->container = mr; - subregion->addr = offset; - memory_region_update_container_subregions(subregion); -} - -void memory_region_add_subregion(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion) -{ - subregion->priority = 0; - memory_region_add_subregion_common(mr, offset, subregion); -} - -void memory_region_add_subregion_overlap(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion, - int priority) -{ - subregion->priority = priority; - memory_region_add_subregion_common(mr, offset, subregion); -} - -void memory_region_del_subregion(MemoryRegion *mr, - MemoryRegion *subregion) -{ - memory_region_transaction_begin(); - assert(subregion->container == mr); - subregion->container = NULL; - QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); - memory_region_unref(subregion); - memory_region_update_pending |= mr->enabled && subregion->enabled; - memory_region_transaction_commit(); -} - -void memory_region_set_enabled(MemoryRegion *mr, bool enabled) -{ - if (enabled == mr->enabled) { - return; - } - memory_region_transaction_begin(); - mr->enabled = enabled; - memory_region_update_pending = true; - memory_region_transaction_commit(); -} - -void memory_region_set_size(MemoryRegion *mr, uint64_t size) -{ - Int128 s = int128_make64(size); - - if (size == UINT64_MAX) { - s = int128_2_64(); - } - if (int128_eq(s, mr->size)) { - return; - } - memory_region_transaction_begin(); - mr->size = s; - memory_region_update_pending = true; - memory_region_transaction_commit(); -} - -static void memory_region_readd_subregion(MemoryRegion *mr) -{ - MemoryRegion *container = mr->container; - - if (container) { - memory_region_transaction_begin(); - memory_region_ref(mr); - memory_region_del_subregion(container, mr); - mr->container = container; - memory_region_update_container_subregions(mr); - memory_region_unref(mr); - memory_region_transaction_commit(); - } -} - -void memory_region_set_address(MemoryRegion *mr, hwaddr addr) -{ - if (addr != mr->addr) { - mr->addr = addr; - memory_region_readd_subregion(mr); - } -} - -void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) -{ - assert(mr->alias); - - if (offset == mr->alias_offset) { - return; - } - - memory_region_transaction_begin(); - mr->alias_offset = offset; - memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(); -} - -uint64_t memory_region_get_alignment(const MemoryRegion *mr) -{ - return mr->align; -} - -static int cmp_flatrange_addr(const void *addr_, const void *fr_) -{ - const AddrRange *addr = addr_; - const FlatRange *fr = fr_; - - if (int128_le(addrrange_end(*addr), fr->addr.start)) { - return -1; - } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { - return 1; - } - return 0; -} - -static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) -{ - return bsearch(&addr, view->ranges, view->nr, - sizeof(FlatRange), cmp_flatrange_addr); -} - -bool memory_region_is_mapped(MemoryRegion *mr) -{ - return mr->container ? true : false; -} - -/* Same as memory_region_find, but it does not add a reference to the - * returned region. It must be called from an RCU critical section. - */ -static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, - hwaddr addr, uint64_t size) -{ - MemoryRegionSection ret = { .mr = NULL }; - MemoryRegion *root; - AddressSpace *as; - AddrRange range; - FlatView *view; - FlatRange *fr; - - addr += mr->addr; - for (root = mr; root->container; ) { - root = root->container; - addr += root->addr; - } - - as = memory_region_to_address_space(root); - if (!as) { - return ret; - } - range = addrrange_make(int128_make64(addr), int128_make64(size)); - - view = address_space_to_flatview(as); - fr = flatview_lookup(view, range); - if (!fr) { - return ret; - } - - while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { - --fr; - } - - ret.mr = fr->mr; - ret.fv = view; - range = addrrange_intersection(range, fr->addr); - ret.offset_within_region = fr->offset_in_region; - ret.offset_within_region += int128_get64(int128_sub(range.start, - fr->addr.start)); - ret.size = range.size; - ret.offset_within_address_space = int128_get64(range.start); - ret.readonly = fr->readonly; - ret.nonvolatile = fr->nonvolatile; - return ret; -} - -MemoryRegionSection memory_region_find(MemoryRegion *mr, - hwaddr addr, uint64_t size) -{ - MemoryRegionSection ret; - RCU_READ_LOCK_GUARD(); - ret = memory_region_find_rcu(mr, addr, size); - if (ret.mr) { - memory_region_ref(ret.mr); - } - return ret; -} - -bool memory_region_present(MemoryRegion *container, hwaddr addr) -{ - MemoryRegion *mr; - - RCU_READ_LOCK_GUARD(); - mr = memory_region_find_rcu(container, addr, 1).mr; - return mr && mr != container; -} - -void memory_global_dirty_log_sync(void) -{ - memory_region_sync_dirty_bitmap(NULL); -} - -void memory_global_after_dirty_log_sync(void) -{ - MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); -} - -static VMChangeStateEntry *vmstate_change; - -void memory_global_dirty_log_start(void) -{ - if (vmstate_change) { - qemu_del_vm_change_state_handler(vmstate_change); - vmstate_change = NULL; - } - - global_dirty_log = true; - - MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); - - /* Refresh DIRTY_MEMORY_MIGRATION bit. */ - memory_region_transaction_begin(); - memory_region_update_pending = true; - memory_region_transaction_commit(); -} - -static void memory_global_dirty_log_do_stop(void) -{ - global_dirty_log = false; - - /* Refresh DIRTY_MEMORY_MIGRATION bit. */ - memory_region_transaction_begin(); - memory_region_update_pending = true; - memory_region_transaction_commit(); - - MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); -} - -static void memory_vm_change_state_handler(void *opaque, int running, - RunState state) -{ - if (running) { - memory_global_dirty_log_do_stop(); - - if (vmstate_change) { - qemu_del_vm_change_state_handler(vmstate_change); - vmstate_change = NULL; - } - } -} - -void memory_global_dirty_log_stop(void) -{ - if (!runstate_is_running()) { - if (vmstate_change) { - return; - } - vmstate_change = qemu_add_vm_change_state_handler( - memory_vm_change_state_handler, NULL); - return; - } - - memory_global_dirty_log_do_stop(); -} - -static void listener_add_address_space(MemoryListener *listener, - AddressSpace *as) -{ - FlatView *view; - FlatRange *fr; - - if (listener->begin) { - listener->begin(listener); - } - if (global_dirty_log) { - if (listener->log_global_start) { - listener->log_global_start(listener); - } - } - - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - MemoryRegionSection section = section_from_flat_range(fr, view); - - if (listener->region_add) { - listener->region_add(listener, §ion); - } - if (fr->dirty_log_mask && listener->log_start) { - listener->log_start(listener, §ion, 0, fr->dirty_log_mask); - } - } - if (listener->commit) { - listener->commit(listener); - } - flatview_unref(view); -} - -static void listener_del_address_space(MemoryListener *listener, - AddressSpace *as) -{ - FlatView *view; - FlatRange *fr; - - if (listener->begin) { - listener->begin(listener); - } - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - MemoryRegionSection section = section_from_flat_range(fr, view); - - if (fr->dirty_log_mask && listener->log_stop) { - listener->log_stop(listener, §ion, fr->dirty_log_mask, 0); - } - if (listener->region_del) { - listener->region_del(listener, §ion); - } - } - if (listener->commit) { - listener->commit(listener); - } - flatview_unref(view); -} - -void memory_listener_register(MemoryListener *listener, AddressSpace *as) -{ - MemoryListener *other = NULL; - - listener->address_space = as; - if (QTAILQ_EMPTY(&memory_listeners) - || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) { - QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); - } else { - QTAILQ_FOREACH(other, &memory_listeners, link) { - if (listener->priority < other->priority) { - break; - } - } - QTAILQ_INSERT_BEFORE(other, listener, link); - } - - if (QTAILQ_EMPTY(&as->listeners) - || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) { - QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); - } else { - QTAILQ_FOREACH(other, &as->listeners, link_as) { - if (listener->priority < other->priority) { - break; - } - } - QTAILQ_INSERT_BEFORE(other, listener, link_as); - } - - listener_add_address_space(listener, as); -} - -void memory_listener_unregister(MemoryListener *listener) -{ - if (!listener->address_space) { - return; - } - - listener_del_address_space(listener, listener->address_space); - QTAILQ_REMOVE(&memory_listeners, listener, link); - QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); - listener->address_space = NULL; -} - -void address_space_remove_listeners(AddressSpace *as) -{ - while (!QTAILQ_EMPTY(&as->listeners)) { - memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); - } -} - -void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) -{ - memory_region_ref(root); - as->root = root; - as->current_map = NULL; - as->ioeventfd_nb = 0; - as->ioeventfds = NULL; - QTAILQ_INIT(&as->listeners); - QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); - as->name = g_strdup(name ? name : "anonymous"); - address_space_update_topology(as); - address_space_update_ioeventfds(as); -} - -static void do_address_space_destroy(AddressSpace *as) -{ - assert(QTAILQ_EMPTY(&as->listeners)); - - flatview_unref(as->current_map); - g_free(as->name); - g_free(as->ioeventfds); - memory_region_unref(as->root); -} - -void address_space_destroy(AddressSpace *as) -{ - MemoryRegion *root = as->root; - - /* Flush out anything from MemoryListeners listening in on this */ - memory_region_transaction_begin(); - as->root = NULL; - memory_region_transaction_commit(); - QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); - - /* At this point, as->dispatch and as->current_map are dummy - * entries that the guest should never use. Wait for the old - * values to expire before freeing the data. - */ - as->root = root; - call_rcu(as, do_address_space_destroy, rcu); -} - -static const char *memory_region_type(MemoryRegion *mr) -{ - if (mr->alias) { - return memory_region_type(mr->alias); - } - if (memory_region_is_ram_device(mr)) { - return "ramd"; - } else if (memory_region_is_romd(mr)) { - return "romd"; - } else if (memory_region_is_rom(mr)) { - return "rom"; - } else if (memory_region_is_ram(mr)) { - return "ram"; - } else { - return "i/o"; - } -} - -typedef struct MemoryRegionList MemoryRegionList; - -struct MemoryRegionList { - const MemoryRegion *mr; - QTAILQ_ENTRY(MemoryRegionList) mrqueue; -}; - -typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead; - -#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ - int128_sub((size), int128_one())) : 0) -#define MTREE_INDENT " " - -static void mtree_expand_owner(const char *label, Object *obj) -{ - DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE); - - qemu_printf(" %s:{%s", label, dev ? "dev" : "obj"); - if (dev && dev->id) { - qemu_printf(" id=%s", dev->id); - } else { - char *canonical_path = object_get_canonical_path(obj); - if (canonical_path) { - qemu_printf(" path=%s", canonical_path); - g_free(canonical_path); - } else { - qemu_printf(" type=%s", object_get_typename(obj)); - } - } - qemu_printf("}"); -} - -static void mtree_print_mr_owner(const MemoryRegion *mr) -{ - Object *owner = mr->owner; - Object *parent = memory_region_owner((MemoryRegion *)mr); - - if (!owner && !parent) { - qemu_printf(" orphan"); - return; - } - if (owner) { - mtree_expand_owner("owner", owner); - } - if (parent && parent != owner) { - mtree_expand_owner("parent", parent); - } -} - -static void mtree_print_mr(const MemoryRegion *mr, unsigned int level, - hwaddr base, - MemoryRegionListHead *alias_print_queue, - bool owner, bool display_disabled) -{ - MemoryRegionList *new_ml, *ml, *next_ml; - MemoryRegionListHead submr_print_queue; - const MemoryRegion *submr; - unsigned int i; - hwaddr cur_start, cur_end; - - if (!mr) { - return; - } - - cur_start = base + mr->addr; - cur_end = cur_start + MR_SIZE(mr->size); - - /* - * Try to detect overflow of memory region. This should never - * happen normally. When it happens, we dump something to warn the - * user who is observing this. - */ - if (cur_start < base || cur_end < cur_start) { - qemu_printf("[DETECTED OVERFLOW!] "); - } - - if (mr->alias) { - MemoryRegionList *ml; - bool found = false; - - /* check if the alias is already in the queue */ - QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { - if (ml->mr == mr->alias) { - found = true; - } - } - - if (!found) { - ml = g_new(MemoryRegionList, 1); - ml->mr = mr->alias; - QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); - } - if (mr->enabled || display_disabled) { - for (i = 0; i < level; i++) { - qemu_printf(MTREE_INDENT); - } - qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx - " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx - "-" TARGET_FMT_plx "%s", - cur_start, cur_end, - mr->priority, - mr->nonvolatile ? "nv-" : "", - memory_region_type((MemoryRegion *)mr), - memory_region_name(mr), - memory_region_name(mr->alias), - mr->alias_offset, - mr->alias_offset + MR_SIZE(mr->size), - mr->enabled ? "" : " [disabled]"); - if (owner) { - mtree_print_mr_owner(mr); - } - qemu_printf("\n"); - } - } else { - if (mr->enabled || display_disabled) { - for (i = 0; i < level; i++) { - qemu_printf(MTREE_INDENT); - } - qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx - " (prio %d, %s%s): %s%s", - cur_start, cur_end, - mr->priority, - mr->nonvolatile ? "nv-" : "", - memory_region_type((MemoryRegion *)mr), - memory_region_name(mr), - mr->enabled ? "" : " [disabled]"); - if (owner) { - mtree_print_mr_owner(mr); - } - qemu_printf("\n"); - } - } - - QTAILQ_INIT(&submr_print_queue); - - QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { - new_ml = g_new(MemoryRegionList, 1); - new_ml->mr = submr; - QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { - if (new_ml->mr->addr < ml->mr->addr || - (new_ml->mr->addr == ml->mr->addr && - new_ml->mr->priority > ml->mr->priority)) { - QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); - new_ml = NULL; - break; - } - } - if (new_ml) { - QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); - } - } - - QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { - mtree_print_mr(ml->mr, level + 1, cur_start, - alias_print_queue, owner, display_disabled); - } - - QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { - g_free(ml); - } -} - -struct FlatViewInfo { - int counter; - bool dispatch_tree; - bool owner; - AccelClass *ac; -}; - -static void mtree_print_flatview(gpointer key, gpointer value, - gpointer user_data) -{ - FlatView *view = key; - GArray *fv_address_spaces = value; - struct FlatViewInfo *fvi = user_data; - FlatRange *range = &view->ranges[0]; - MemoryRegion *mr; - int n = view->nr; - int i; - AddressSpace *as; - - qemu_printf("FlatView #%d\n", fvi->counter); - ++fvi->counter; - - for (i = 0; i < fv_address_spaces->len; ++i) { - as = g_array_index(fv_address_spaces, AddressSpace*, i); - qemu_printf(" AS \"%s\", root: %s", - as->name, memory_region_name(as->root)); - if (as->root->alias) { - qemu_printf(", alias %s", memory_region_name(as->root->alias)); - } - qemu_printf("\n"); - } - - qemu_printf(" Root memory region: %s\n", - view->root ? memory_region_name(view->root) : "(none)"); - - if (n <= 0) { - qemu_printf(MTREE_INDENT "No rendered FlatView\n\n"); - return; - } - - while (n--) { - mr = range->mr; - if (range->offset_in_region) { - qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx - " (prio %d, %s%s): %s @" TARGET_FMT_plx, - int128_get64(range->addr.start), - int128_get64(range->addr.start) - + MR_SIZE(range->addr.size), - mr->priority, - range->nonvolatile ? "nv-" : "", - range->readonly ? "rom" : memory_region_type(mr), - memory_region_name(mr), - range->offset_in_region); - } else { - qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx - " (prio %d, %s%s): %s", - int128_get64(range->addr.start), - int128_get64(range->addr.start) - + MR_SIZE(range->addr.size), - mr->priority, - range->nonvolatile ? "nv-" : "", - range->readonly ? "rom" : memory_region_type(mr), - memory_region_name(mr)); - } - if (fvi->owner) { - mtree_print_mr_owner(mr); - } - - if (fvi->ac) { - for (i = 0; i < fv_address_spaces->len; ++i) { - as = g_array_index(fv_address_spaces, AddressSpace*, i); - if (fvi->ac->has_memory(current_machine, as, - int128_get64(range->addr.start), - MR_SIZE(range->addr.size) + 1)) { - qemu_printf(" %s", fvi->ac->name); - } - } - } - qemu_printf("\n"); - range++; - } - -#if !defined(CONFIG_USER_ONLY) - if (fvi->dispatch_tree && view->root) { - mtree_print_dispatch(view->dispatch, view->root); - } -#endif - - qemu_printf("\n"); -} - -static gboolean mtree_info_flatview_free(gpointer key, gpointer value, - gpointer user_data) -{ - FlatView *view = key; - GArray *fv_address_spaces = value; - - g_array_unref(fv_address_spaces); - flatview_unref(view); - - return true; -} - -void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) -{ - MemoryRegionListHead ml_head; - MemoryRegionList *ml, *ml2; - AddressSpace *as; - - if (flatview) { - FlatView *view; - struct FlatViewInfo fvi = { - .counter = 0, - .dispatch_tree = dispatch_tree, - .owner = owner, - }; - GArray *fv_address_spaces; - GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); - AccelClass *ac = ACCEL_GET_CLASS(current_accel()); - - if (ac->has_memory) { - fvi.ac = ac; - } - - /* Gather all FVs in one table */ - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - view = address_space_get_flatview(as); - - fv_address_spaces = g_hash_table_lookup(views, view); - if (!fv_address_spaces) { - fv_address_spaces = g_array_new(false, false, sizeof(as)); - g_hash_table_insert(views, view, fv_address_spaces); - } - - g_array_append_val(fv_address_spaces, as); - } - - /* Print */ - g_hash_table_foreach(views, mtree_print_flatview, &fvi); - - /* Free */ - g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); - g_hash_table_unref(views); - - return; - } - - QTAILQ_INIT(&ml_head); - - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - qemu_printf("address-space: %s\n", as->name); - mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled); - qemu_printf("\n"); - } - - /* print aliased regions */ - QTAILQ_FOREACH(ml, &ml_head, mrqueue) { - qemu_printf("memory-region: %s\n", memory_region_name(ml->mr)); - mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled); - qemu_printf("\n"); - } - - QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { - g_free(ml); - } -} - -void memory_region_init_ram(MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size, - Error **errp) -{ - DeviceState *owner_dev; - Error *err = NULL; - - memory_region_init_ram_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; - } - /* This will assert if owner is neither NULL nor a DeviceState. - * We only want the owner here for the purposes of defining a - * unique name for migration. TODO: Ideally we should implement - * a naming scheme for Objects which are not DeviceStates, in - * which case we can relax this restriction. - */ - owner_dev = DEVICE(owner); - vmstate_register_ram(mr, owner_dev); -} - -void memory_region_init_rom(MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size, - Error **errp) -{ - DeviceState *owner_dev; - Error *err = NULL; - - memory_region_init_rom_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; - } - /* This will assert if owner is neither NULL nor a DeviceState. - * We only want the owner here for the purposes of defining a - * unique name for migration. TODO: Ideally we should implement - * a naming scheme for Objects which are not DeviceStates, in - * which case we can relax this restriction. - */ - owner_dev = DEVICE(owner); - vmstate_register_ram(mr, owner_dev); -} - -void memory_region_init_rom_device(MemoryRegion *mr, - struct Object *owner, - const MemoryRegionOps *ops, - void *opaque, - const char *name, - uint64_t size, - Error **errp) -{ - DeviceState *owner_dev; - Error *err = NULL; - - memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, - name, size, &err); - if (err) { - error_propagate(errp, err); - return; - } - /* This will assert if owner is neither NULL nor a DeviceState. - * We only want the owner here for the purposes of defining a - * unique name for migration. TODO: Ideally we should implement - * a naming scheme for Objects which are not DeviceStates, in - * which case we can relax this restriction. - */ - owner_dev = DEVICE(owner); - vmstate_register_ram(mr, owner_dev); -} - -static const TypeInfo memory_region_info = { - .parent = TYPE_OBJECT, - .name = TYPE_MEMORY_REGION, - .class_size = sizeof(MemoryRegionClass), - .instance_size = sizeof(MemoryRegion), - .instance_init = memory_region_initfn, - .instance_finalize = memory_region_finalize, -}; - -static const TypeInfo iommu_memory_region_info = { - .parent = TYPE_MEMORY_REGION, - .name = TYPE_IOMMU_MEMORY_REGION, - .class_size = sizeof(IOMMUMemoryRegionClass), - .instance_size = sizeof(IOMMUMemoryRegion), - .instance_init = iommu_memory_region_initfn, - .abstract = true, -}; - -static void memory_register_types(void) -{ - type_register_static(&memory_region_info); - type_register_static(&iommu_memory_region_info); -} - -type_init(memory_register_types) diff --git a/memory_mapping.c b/memory_mapping.c deleted file mode 100644 index 18d0b8067c..0000000000 --- a/memory_mapping.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * QEMU memory mapping - * - * Copyright Fujitsu, Corp. 2011, 2012 - * - * Authors: - * Wen Congyang - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" - -#include "cpu.h" -#include "sysemu/memory_mapping.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" - -//#define DEBUG_GUEST_PHYS_REGION_ADD - -static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list, - MemoryMapping *mapping) -{ - MemoryMapping *p; - - QTAILQ_FOREACH(p, &list->head, next) { - if (p->phys_addr >= mapping->phys_addr) { - QTAILQ_INSERT_BEFORE(p, mapping, next); - return; - } - } - QTAILQ_INSERT_TAIL(&list->head, mapping, next); -} - -static void create_new_memory_mapping(MemoryMappingList *list, - hwaddr phys_addr, - hwaddr virt_addr, - ram_addr_t length) -{ - MemoryMapping *memory_mapping; - - memory_mapping = g_malloc(sizeof(MemoryMapping)); - memory_mapping->phys_addr = phys_addr; - memory_mapping->virt_addr = virt_addr; - memory_mapping->length = length; - list->last_mapping = memory_mapping; - list->num++; - memory_mapping_list_add_mapping_sorted(list, memory_mapping); -} - -static inline bool mapping_contiguous(MemoryMapping *map, - hwaddr phys_addr, - hwaddr virt_addr) -{ - return phys_addr == map->phys_addr + map->length && - virt_addr == map->virt_addr + map->length; -} - -/* - * [map->phys_addr, map->phys_addr + map->length) and - * [phys_addr, phys_addr + length) have intersection? - */ -static inline bool mapping_have_same_region(MemoryMapping *map, - hwaddr phys_addr, - ram_addr_t length) -{ - return !(phys_addr + length < map->phys_addr || - phys_addr >= map->phys_addr + map->length); -} - -/* - * [map->phys_addr, map->phys_addr + map->length) and - * [phys_addr, phys_addr + length) have intersection. The virtual address in the - * intersection are the same? - */ -static inline bool mapping_conflict(MemoryMapping *map, - hwaddr phys_addr, - hwaddr virt_addr) -{ - return virt_addr - map->virt_addr != phys_addr - map->phys_addr; -} - -/* - * [map->virt_addr, map->virt_addr + map->length) and - * [virt_addr, virt_addr + length) have intersection. And the physical address - * in the intersection are the same. - */ -static inline void mapping_merge(MemoryMapping *map, - hwaddr virt_addr, - ram_addr_t length) -{ - if (virt_addr < map->virt_addr) { - map->length += map->virt_addr - virt_addr; - map->virt_addr = virt_addr; - } - - if ((virt_addr + length) > - (map->virt_addr + map->length)) { - map->length = virt_addr + length - map->virt_addr; - } -} - -void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, - hwaddr phys_addr, - hwaddr virt_addr, - ram_addr_t length) -{ - MemoryMapping *memory_mapping, *last_mapping; - - if (QTAILQ_EMPTY(&list->head)) { - create_new_memory_mapping(list, phys_addr, virt_addr, length); - return; - } - - last_mapping = list->last_mapping; - if (last_mapping) { - if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) { - last_mapping->length += length; - return; - } - } - - QTAILQ_FOREACH(memory_mapping, &list->head, next) { - if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) { - memory_mapping->length += length; - list->last_mapping = memory_mapping; - return; - } - - if (phys_addr + length < memory_mapping->phys_addr) { - /* create a new region before memory_mapping */ - break; - } - - if (mapping_have_same_region(memory_mapping, phys_addr, length)) { - if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) { - continue; - } - - /* merge this region into memory_mapping */ - mapping_merge(memory_mapping, virt_addr, length); - list->last_mapping = memory_mapping; - return; - } - } - - /* this region can not be merged into any existed memory mapping. */ - create_new_memory_mapping(list, phys_addr, virt_addr, length); -} - -void memory_mapping_list_free(MemoryMappingList *list) -{ - MemoryMapping *p, *q; - - QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { - QTAILQ_REMOVE(&list->head, p, next); - g_free(p); - } - - list->num = 0; - list->last_mapping = NULL; -} - -void memory_mapping_list_init(MemoryMappingList *list) -{ - list->num = 0; - list->last_mapping = NULL; - QTAILQ_INIT(&list->head); -} - -void guest_phys_blocks_free(GuestPhysBlockList *list) -{ - GuestPhysBlock *p, *q; - - QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { - QTAILQ_REMOVE(&list->head, p, next); - memory_region_unref(p->mr); - g_free(p); - } - list->num = 0; -} - -void guest_phys_blocks_init(GuestPhysBlockList *list) -{ - list->num = 0; - QTAILQ_INIT(&list->head); -} - -typedef struct GuestPhysListener { - GuestPhysBlockList *list; - MemoryListener listener; -} GuestPhysListener; - -static void guest_phys_blocks_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - GuestPhysListener *g; - uint64_t section_size; - hwaddr target_start, target_end; - uint8_t *host_addr; - GuestPhysBlock *predecessor; - - /* we only care about RAM */ - if (!memory_region_is_ram(section->mr) || - memory_region_is_ram_device(section->mr) || - memory_region_is_nonvolatile(section->mr)) { - return; - } - - g = container_of(listener, GuestPhysListener, listener); - section_size = int128_get64(section->size); - target_start = section->offset_within_address_space; - target_end = target_start + section_size; - host_addr = memory_region_get_ram_ptr(section->mr) + - section->offset_within_region; - predecessor = NULL; - - /* find continuity in guest physical address space */ - if (!QTAILQ_EMPTY(&g->list->head)) { - hwaddr predecessor_size; - - predecessor = QTAILQ_LAST(&g->list->head); - predecessor_size = predecessor->target_end - predecessor->target_start; - - /* the memory API guarantees monotonically increasing traversal */ - g_assert(predecessor->target_end <= target_start); - - /* we want continuity in both guest-physical and host-virtual memory */ - if (predecessor->target_end < target_start || - predecessor->host_addr + predecessor_size != host_addr) { - predecessor = NULL; - } - } - - if (predecessor == NULL) { - /* isolated mapping, allocate it and add it to the list */ - GuestPhysBlock *block = g_malloc0(sizeof *block); - - block->target_start = target_start; - block->target_end = target_end; - block->host_addr = host_addr; - block->mr = section->mr; - memory_region_ref(section->mr); - - QTAILQ_INSERT_TAIL(&g->list->head, block, next); - ++g->list->num; - } else { - /* expand predecessor until @target_end; predecessor's start doesn't - * change - */ - predecessor->target_end = target_end; - } - -#ifdef DEBUG_GUEST_PHYS_REGION_ADD - fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end=" - TARGET_FMT_plx ": %s (count: %u)\n", __func__, target_start, - target_end, predecessor ? "joined" : "added", g->list->num); -#endif -} - -void guest_phys_blocks_append(GuestPhysBlockList *list) -{ - GuestPhysListener g = { 0 }; - - g.list = list; - g.listener.region_add = &guest_phys_blocks_region_add; - memory_listener_register(&g.listener, &address_space_memory); - memory_listener_unregister(&g.listener); -} - -static CPUState *find_paging_enabled_cpu(CPUState *start_cpu) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - if (cpu_paging_enabled(cpu)) { - return cpu; - } - } - - return NULL; -} - -void qemu_get_guest_memory_mapping(MemoryMappingList *list, - const GuestPhysBlockList *guest_phys_blocks, - Error **errp) -{ - CPUState *cpu, *first_paging_enabled_cpu; - GuestPhysBlock *block; - ram_addr_t offset, length; - - first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); - if (first_paging_enabled_cpu) { - for (cpu = first_paging_enabled_cpu; cpu != NULL; - cpu = CPU_NEXT(cpu)) { - Error *err = NULL; - cpu_get_memory_mapping(cpu, list, &err); - if (err) { - error_propagate(errp, err); - return; - } - } - return; - } - - /* - * If the guest doesn't use paging, the virtual address is equal to physical - * address. - */ - QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { - offset = block->target_start; - length = block->target_end - block->target_start; - create_new_memory_mapping(list, offset, offset, length); - } -} - -void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list, - const GuestPhysBlockList *guest_phys_blocks) -{ - GuestPhysBlock *block; - - QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { - create_new_memory_mapping(list, block->target_start, 0, - block->target_end - block->target_start); - } -} - -void memory_mapping_filter(MemoryMappingList *list, int64_t begin, - int64_t length) -{ - MemoryMapping *cur, *next; - - QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) { - if (cur->phys_addr >= begin + length || - cur->phys_addr + cur->length <= begin) { - QTAILQ_REMOVE(&list->head, cur, next); - g_free(cur); - list->num--; - continue; - } - - if (cur->phys_addr < begin) { - cur->length -= begin - cur->phys_addr; - if (cur->virt_addr) { - cur->virt_addr += begin - cur->phys_addr; - } - cur->phys_addr = begin; - } - - if (cur->phys_addr + cur->length > begin + length) { - cur->length -= cur->phys_addr + cur->length - begin - length; - } - } -} diff --git a/qtest.c b/qtest.c deleted file mode 100644 index 5672b75c35..0000000000 --- a/qtest.c +++ /dev/null @@ -1,820 +0,0 @@ -/* - * Test Server - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "cpu.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" -#include "chardev/char-fe.h" -#include "exec/ioport.h" -#include "exec/memory.h" -#include "hw/irq.h" -#include "sysemu/accel.h" -#include "sysemu/cpus.h" -#include "qemu/config-file.h" -#include "qemu/option.h" -#include "qemu/error-report.h" -#include "qemu/module.h" -#include "qemu/cutils.h" -#include "config-devices.h" -#ifdef CONFIG_PSERIES -#include "hw/ppc/spapr_rtas.h" -#endif - -#define MAX_IRQ 256 - -bool qtest_allowed; - -static DeviceState *irq_intercept_dev; -static FILE *qtest_log_fp; -static CharBackend qtest_chr; -static GString *inbuf; -static int irq_levels[MAX_IRQ]; -static qemu_timeval start_time; -static bool qtest_opened; -static void (*qtest_server_send)(void*, const char*); -static void *qtest_server_send_opaque; - -#define FMT_timeval "%ld.%06ld" - -/** - * QTest Protocol - * - * Line based protocol, request/response based. Server can send async messages - * so clients should always handle many async messages before the response - * comes in. - * - * Valid requests - * - * Clock management: - * - * The qtest client is completely in charge of the QEMU_CLOCK_VIRTUAL. qtest commands - * let you adjust the value of the clock (monotonically). All the commands - * return the current value of the clock in nanoseconds. - * - * > clock_step - * < OK VALUE - * - * Advance the clock to the next deadline. Useful when waiting for - * asynchronous events. - * - * > clock_step NS - * < OK VALUE - * - * Advance the clock by NS nanoseconds. - * - * > clock_set NS - * < OK VALUE - * - * Advance the clock to NS nanoseconds (do nothing if it's already past). - * - * PIO and memory access: - * - * > outb ADDR VALUE - * < OK - * - * > outw ADDR VALUE - * < OK - * - * > outl ADDR VALUE - * < OK - * - * > inb ADDR - * < OK VALUE - * - * > inw ADDR - * < OK VALUE - * - * > inl ADDR - * < OK VALUE - * - * > writeb ADDR VALUE - * < OK - * - * > writew ADDR VALUE - * < OK - * - * > writel ADDR VALUE - * < OK - * - * > writeq ADDR VALUE - * < OK - * - * > readb ADDR - * < OK VALUE - * - * > readw ADDR - * < OK VALUE - * - * > readl ADDR - * < OK VALUE - * - * > readq ADDR - * < OK VALUE - * - * > read ADDR SIZE - * < OK DATA - * - * > write ADDR SIZE DATA - * < OK - * - * > b64read ADDR SIZE - * < OK B64_DATA - * - * > b64write ADDR SIZE B64_DATA - * < OK - * - * > memset ADDR SIZE VALUE - * < OK - * - * ADDR, SIZE, VALUE are all integers parsed with strtoul() with a base of 0. - * For 'memset' a zero size is permitted and does nothing. - * - * DATA is an arbitrarily long hex number prefixed with '0x'. If it's smaller - * than the expected size, the value will be zero filled at the end of the data - * sequence. - * - * B64_DATA is an arbitrarily long base64 encoded string. - * If the sizes do not match, the data will be truncated. - * - * IRQ management: - * - * > irq_intercept_in QOM-PATH - * < OK - * - * > irq_intercept_out QOM-PATH - * < OK - * - * Attach to the gpio-in (resp. gpio-out) pins exported by the device at - * QOM-PATH. When the pin is triggered, one of the following async messages - * will be printed to the qtest stream: - * - * IRQ raise NUM - * IRQ lower NUM - * - * where NUM is an IRQ number. For the PC, interrupts can be intercepted - * simply with "irq_intercept_in ioapic" (note that IRQ0 comes out with - * NUM=0 even though it is remapped to GSI 2). - * - * Setting interrupt level: - * - * > set_irq_in QOM-PATH NAME NUM LEVEL - * < OK - * - * where NAME is the name of the irq/gpio list, NUM is an IRQ number and - * LEVEL is an signed integer IRQ level. - * - * Forcibly set the given interrupt pin to the given level. - * - */ - -static int hex2nib(char ch) -{ - if (ch >= '0' && ch <= '9') { - return ch - '0'; - } else if (ch >= 'a' && ch <= 'f') { - return 10 + (ch - 'a'); - } else if (ch >= 'A' && ch <= 'F') { - return 10 + (ch - 'A'); - } else { - return -1; - } -} - -static void qtest_get_time(qemu_timeval *tv) -{ - qemu_gettimeofday(tv); - tv->tv_sec -= start_time.tv_sec; - tv->tv_usec -= start_time.tv_usec; - if (tv->tv_usec < 0) { - tv->tv_usec += 1000000; - tv->tv_sec -= 1; - } -} - -static void qtest_send_prefix(CharBackend *chr) -{ - qemu_timeval tv; - - if (!qtest_log_fp || !qtest_opened) { - return; - } - - qtest_get_time(&tv); - fprintf(qtest_log_fp, "[S +" FMT_timeval "] ", - (long) tv.tv_sec, (long) tv.tv_usec); -} - -static void GCC_FMT_ATTR(1, 2) qtest_log_send(const char *fmt, ...) -{ - va_list ap; - - if (!qtest_log_fp || !qtest_opened) { - return; - } - - qtest_send_prefix(NULL); - - va_start(ap, fmt); - vfprintf(qtest_log_fp, fmt, ap); - va_end(ap); -} - -static void qtest_server_char_be_send(void *opaque, const char *str) -{ - size_t len = strlen(str); - CharBackend* chr = (CharBackend *)opaque; - qemu_chr_fe_write_all(chr, (uint8_t *)str, len); - if (qtest_log_fp && qtest_opened) { - fprintf(qtest_log_fp, "%s", str); - } -} - -static void qtest_send(CharBackend *chr, const char *str) -{ - qtest_server_send(qtest_server_send_opaque, str); -} - -static void GCC_FMT_ATTR(2, 3) qtest_sendf(CharBackend *chr, - const char *fmt, ...) -{ - va_list ap; - gchar *buffer; - - va_start(ap, fmt); - buffer = g_strdup_vprintf(fmt, ap); - qtest_send(chr, buffer); - g_free(buffer); - va_end(ap); -} - -static void qtest_irq_handler(void *opaque, int n, int level) -{ - qemu_irq old_irq = *(qemu_irq *)opaque; - qemu_set_irq(old_irq, level); - - if (irq_levels[n] != level) { - CharBackend *chr = &qtest_chr; - irq_levels[n] = level; - qtest_send_prefix(chr); - qtest_sendf(chr, "IRQ %s %d\n", - level ? "raise" : "lower", n); - } -} - -static void qtest_process_command(CharBackend *chr, gchar **words) -{ - const gchar *command; - - g_assert(words); - - command = words[0]; - - if (qtest_log_fp) { - qemu_timeval tv; - int i; - - qtest_get_time(&tv); - fprintf(qtest_log_fp, "[R +" FMT_timeval "]", - (long) tv.tv_sec, (long) tv.tv_usec); - for (i = 0; words[i]; i++) { - fprintf(qtest_log_fp, " %s", words[i]); - } - fprintf(qtest_log_fp, "\n"); - } - - g_assert(command); - if (strcmp(words[0], "irq_intercept_out") == 0 - || strcmp(words[0], "irq_intercept_in") == 0) { - DeviceState *dev; - NamedGPIOList *ngl; - - g_assert(words[1]); - dev = DEVICE(object_resolve_path(words[1], NULL)); - if (!dev) { - qtest_send_prefix(chr); - qtest_send(chr, "FAIL Unknown device\n"); - return; - } - - if (irq_intercept_dev) { - qtest_send_prefix(chr); - if (irq_intercept_dev != dev) { - qtest_send(chr, "FAIL IRQ intercept already enabled\n"); - } else { - qtest_send(chr, "OK\n"); - } - return; - } - - QLIST_FOREACH(ngl, &dev->gpios, node) { - /* We don't support intercept of named GPIOs yet */ - if (ngl->name) { - continue; - } - if (words[0][14] == 'o') { - int i; - for (i = 0; i < ngl->num_out; ++i) { - qemu_irq *disconnected = g_new0(qemu_irq, 1); - qemu_irq icpt = qemu_allocate_irq(qtest_irq_handler, - disconnected, i); - - *disconnected = qdev_intercept_gpio_out(dev, icpt, - ngl->name, i); - } - } else { - qemu_irq_intercept_in(ngl->in, qtest_irq_handler, - ngl->num_in); - } - } - irq_intercept_dev = dev; - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "set_irq_in") == 0) { - DeviceState *dev; - qemu_irq irq; - char *name; - int ret; - int num; - int level; - - g_assert(words[1] && words[2] && words[3] && words[4]); - - dev = DEVICE(object_resolve_path(words[1], NULL)); - if (!dev) { - qtest_send_prefix(chr); - qtest_send(chr, "FAIL Unknown device\n"); - return; - } - - if (strcmp(words[2], "unnamed-gpio-in") == 0) { - name = NULL; - } else { - name = words[2]; - } - - ret = qemu_strtoi(words[3], NULL, 0, &num); - g_assert(!ret); - ret = qemu_strtoi(words[4], NULL, 0, &level); - g_assert(!ret); - - irq = qdev_get_gpio_in_named(dev, name, num); - - qemu_set_irq(irq, level); - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "outb") == 0 || - strcmp(words[0], "outw") == 0 || - strcmp(words[0], "outl") == 0) { - unsigned long addr; - unsigned long value; - int ret; - - g_assert(words[1] && words[2]); - ret = qemu_strtoul(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtoul(words[2], NULL, 0, &value); - g_assert(ret == 0); - g_assert(addr <= 0xffff); - - if (words[0][3] == 'b') { - cpu_outb(addr, value); - } else if (words[0][3] == 'w') { - cpu_outw(addr, value); - } else if (words[0][3] == 'l') { - cpu_outl(addr, value); - } - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "inb") == 0 || - strcmp(words[0], "inw") == 0 || - strcmp(words[0], "inl") == 0) { - unsigned long addr; - uint32_t value = -1U; - int ret; - - g_assert(words[1]); - ret = qemu_strtoul(words[1], NULL, 0, &addr); - g_assert(ret == 0); - g_assert(addr <= 0xffff); - - if (words[0][2] == 'b') { - value = cpu_inb(addr); - } else if (words[0][2] == 'w') { - value = cpu_inw(addr); - } else if (words[0][2] == 'l') { - value = cpu_inl(addr); - } - qtest_send_prefix(chr); - qtest_sendf(chr, "OK 0x%04x\n", value); - } else if (strcmp(words[0], "writeb") == 0 || - strcmp(words[0], "writew") == 0 || - strcmp(words[0], "writel") == 0 || - strcmp(words[0], "writeq") == 0) { - uint64_t addr; - uint64_t value; - int ret; - - g_assert(words[1] && words[2]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtou64(words[2], NULL, 0, &value); - g_assert(ret == 0); - - if (words[0][5] == 'b') { - uint8_t data = value; - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 1); - } else if (words[0][5] == 'w') { - uint16_t data = value; - tswap16s(&data); - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 2); - } else if (words[0][5] == 'l') { - uint32_t data = value; - tswap32s(&data); - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 4); - } else if (words[0][5] == 'q') { - uint64_t data = value; - tswap64s(&data); - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 8); - } - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "readb") == 0 || - strcmp(words[0], "readw") == 0 || - strcmp(words[0], "readl") == 0 || - strcmp(words[0], "readq") == 0) { - uint64_t addr; - uint64_t value = UINT64_C(-1); - int ret; - - g_assert(words[1]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - - if (words[0][4] == 'b') { - uint8_t data; - address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 1); - value = data; - } else if (words[0][4] == 'w') { - uint16_t data; - address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 2); - value = tswap16(data); - } else if (words[0][4] == 'l') { - uint32_t data; - address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &data, 4); - value = tswap32(data); - } else if (words[0][4] == 'q') { - address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - &value, 8); - tswap64s(&value); - } - qtest_send_prefix(chr); - qtest_sendf(chr, "OK 0x%016" PRIx64 "\n", value); - } else if (strcmp(words[0], "read") == 0) { - uint64_t addr, len, i; - uint8_t *data; - char *enc; - int ret; - - g_assert(words[1] && words[2]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtou64(words[2], NULL, 0, &len); - g_assert(ret == 0); - /* We'd send garbage to libqtest if len is 0 */ - g_assert(len); - - data = g_malloc(len); - address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, - len); - - enc = g_malloc(2 * len + 1); - for (i = 0; i < len; i++) { - sprintf(&enc[i * 2], "%02x", data[i]); - } - - qtest_send_prefix(chr); - qtest_sendf(chr, "OK 0x%s\n", enc); - - g_free(data); - g_free(enc); - } else if (strcmp(words[0], "b64read") == 0) { - uint64_t addr, len; - uint8_t *data; - gchar *b64_data; - int ret; - - g_assert(words[1] && words[2]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtou64(words[2], NULL, 0, &len); - g_assert(ret == 0); - - data = g_malloc(len); - address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, - len); - b64_data = g_base64_encode(data, len); - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %s\n", b64_data); - - g_free(data); - g_free(b64_data); - } else if (strcmp(words[0], "write") == 0) { - uint64_t addr, len, i; - uint8_t *data; - size_t data_len; - int ret; - - g_assert(words[1] && words[2] && words[3]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtou64(words[2], NULL, 0, &len); - g_assert(ret == 0); - - data_len = strlen(words[3]); - if (data_len < 3) { - qtest_send(chr, "ERR invalid argument size\n"); - return; - } - - data = g_malloc(len); - for (i = 0; i < len; i++) { - if ((i * 2 + 4) <= data_len) { - data[i] = hex2nib(words[3][i * 2 + 2]) << 4; - data[i] |= hex2nib(words[3][i * 2 + 3]); - } else { - data[i] = 0; - } - } - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, - len); - g_free(data); - - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "memset") == 0) { - uint64_t addr, len; - uint8_t *data; - unsigned long pattern; - int ret; - - g_assert(words[1] && words[2] && words[3]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtou64(words[2], NULL, 0, &len); - g_assert(ret == 0); - ret = qemu_strtoul(words[3], NULL, 0, &pattern); - g_assert(ret == 0); - - if (len) { - data = g_malloc(len); - memset(data, pattern, len); - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, - data, len); - g_free(data); - } - - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "b64write") == 0) { - uint64_t addr, len; - uint8_t *data; - size_t data_len; - gsize out_len; - int ret; - - g_assert(words[1] && words[2] && words[3]); - ret = qemu_strtou64(words[1], NULL, 0, &addr); - g_assert(ret == 0); - ret = qemu_strtou64(words[2], NULL, 0, &len); - g_assert(ret == 0); - - data_len = strlen(words[3]); - if (data_len < 3) { - qtest_send(chr, "ERR invalid argument size\n"); - return; - } - - data = g_base64_decode_inplace(words[3], &out_len); - if (out_len != len) { - qtest_log_send("b64write: data length mismatch (told %"PRIu64", " - "found %zu)\n", - len, out_len); - out_len = MIN(out_len, len); - } - - address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, - len); - - qtest_send_prefix(chr); - qtest_send(chr, "OK\n"); - } else if (strcmp(words[0], "endianness") == 0) { - qtest_send_prefix(chr); -#if defined(TARGET_WORDS_BIGENDIAN) - qtest_sendf(chr, "OK big\n"); -#else - qtest_sendf(chr, "OK little\n"); -#endif -#ifdef CONFIG_PSERIES - } else if (strcmp(words[0], "rtas") == 0) { - uint64_t res, args, ret; - unsigned long nargs, nret; - int rc; - - rc = qemu_strtoul(words[2], NULL, 0, &nargs); - g_assert(rc == 0); - rc = qemu_strtou64(words[3], NULL, 0, &args); - g_assert(rc == 0); - rc = qemu_strtoul(words[4], NULL, 0, &nret); - g_assert(rc == 0); - rc = qemu_strtou64(words[5], NULL, 0, &ret); - g_assert(rc == 0); - res = qtest_rtas_call(words[1], nargs, args, nret, ret); - - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %"PRIu64"\n", res); -#endif - } else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) { - int64_t ns; - - if (words[1]) { - int ret = qemu_strtoi64(words[1], NULL, 0, &ns); - g_assert(ret == 0); - } else { - ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, - QEMU_TIMER_ATTR_ALL); - } - qtest_clock_warp(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns); - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %"PRIi64"\n", - (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); - } else if (strcmp(words[0], "module_load") == 0) { - g_assert(words[1] && words[2]); - - qtest_send_prefix(chr); - if (module_load_one(words[1], words[2])) { - qtest_sendf(chr, "OK\n"); - } else { - qtest_sendf(chr, "FAIL\n"); - } - } else if (qtest_enabled() && strcmp(words[0], "clock_set") == 0) { - int64_t ns; - int ret; - - g_assert(words[1]); - ret = qemu_strtoi64(words[1], NULL, 0, &ns); - g_assert(ret == 0); - qtest_clock_warp(ns); - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %"PRIi64"\n", - (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); - } else { - qtest_send_prefix(chr); - qtest_sendf(chr, "FAIL Unknown command '%s'\n", words[0]); - } -} - -static void qtest_process_inbuf(CharBackend *chr, GString *inbuf) -{ - char *end; - - while ((end = strchr(inbuf->str, '\n')) != NULL) { - size_t offset; - GString *cmd; - gchar **words; - - offset = end - inbuf->str; - - cmd = g_string_new_len(inbuf->str, offset); - g_string_erase(inbuf, 0, offset + 1); - - words = g_strsplit(cmd->str, " ", 0); - qtest_process_command(chr, words); - g_strfreev(words); - - g_string_free(cmd, TRUE); - } -} - -static void qtest_read(void *opaque, const uint8_t *buf, int size) -{ - CharBackend *chr = opaque; - - g_string_append_len(inbuf, (const gchar *)buf, size); - qtest_process_inbuf(chr, inbuf); -} - -static int qtest_can_read(void *opaque) -{ - return 1024; -} - -static void qtest_event(void *opaque, QEMUChrEvent event) -{ - int i; - - switch (event) { - case CHR_EVENT_OPENED: - /* - * We used to call qemu_system_reset() here, hoping we could - * use the same process for multiple tests that way. Never - * used. Injects an extra reset even when it's not used, and - * that can mess up tests, e.g. -boot once. - */ - for (i = 0; i < ARRAY_SIZE(irq_levels); i++) { - irq_levels[i] = 0; - } - qemu_gettimeofday(&start_time); - qtest_opened = true; - if (qtest_log_fp) { - fprintf(qtest_log_fp, "[I " FMT_timeval "] OPENED\n", - (long) start_time.tv_sec, (long) start_time.tv_usec); - } - break; - case CHR_EVENT_CLOSED: - qtest_opened = false; - if (qtest_log_fp) { - qemu_timeval tv; - qtest_get_time(&tv); - fprintf(qtest_log_fp, "[I +" FMT_timeval "] CLOSED\n", - (long) tv.tv_sec, (long) tv.tv_usec); - } - break; - default: - break; - } -} -void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp) -{ - Chardev *chr; - - chr = qemu_chr_new("qtest", qtest_chrdev, NULL); - - if (chr == NULL) { - error_setg(errp, "Failed to initialize device for qtest: \"%s\"", - qtest_chrdev); - return; - } - - if (qtest_log) { - if (strcmp(qtest_log, "none") != 0) { - qtest_log_fp = fopen(qtest_log, "w+"); - } - } else { - qtest_log_fp = stderr; - } - - qemu_chr_fe_init(&qtest_chr, chr, errp); - qemu_chr_fe_set_handlers(&qtest_chr, qtest_can_read, qtest_read, - qtest_event, NULL, &qtest_chr, NULL, true); - qemu_chr_fe_set_echo(&qtest_chr, true); - - inbuf = g_string_new(""); - - if (!qtest_server_send) { - qtest_server_set_send_handler(qtest_server_char_be_send, &qtest_chr); - } -} - -void qtest_server_set_send_handler(void (*send)(void*, const char*), - void *opaque) -{ - qtest_server_send = send; - qtest_server_send_opaque = opaque; -} - -bool qtest_driver(void) -{ - return qtest_chr.chr != NULL; -} - -void qtest_server_inproc_recv(void *dummy, const char *buf) -{ - static GString *gstr; - if (!gstr) { - gstr = g_string_new(NULL); - } - g_string_append(gstr, buf); - if (gstr->str[gstr->len - 1] == '\n') { - qtest_process_inbuf(NULL, gstr); - g_string_truncate(gstr, 0); - } -} diff --git a/softmmu/Makefile.objs b/softmmu/Makefile.objs index dd15c24346..a4bd9f2f52 100644 --- a/softmmu/Makefile.objs +++ b/softmmu/Makefile.objs @@ -1,3 +1,13 @@ softmmu-main-y = softmmu/main.o + +obj-y += arch_init.o +obj-y += cpus.o +obj-y += balloon.o +obj-y += ioport.o +obj-y += memory.o +obj-y += memory_mapping.o + +obj-y += qtest.o + obj-y += vl.o vl.o-cflags := $(GPROF_CFLAGS) $(SDL_CFLAGS) diff --git a/softmmu/arch_init.c b/softmmu/arch_init.c new file mode 100644 index 0000000000..8afea4748b --- /dev/null +++ b/softmmu/arch_init.c @@ -0,0 +1,113 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "sysemu/sysemu.h" +#include "sysemu/arch_init.h" +#include "hw/pci/pci.h" +#include "hw/audio/soundhw.h" +#include "qapi/error.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "hw/acpi/acpi.h" +#include "qemu/help_option.h" + +#ifdef TARGET_SPARC +int graphic_width = 1024; +int graphic_height = 768; +int graphic_depth = 8; +#elif defined(TARGET_M68K) +int graphic_width = 800; +int graphic_height = 600; +int graphic_depth = 8; +#else +int graphic_width = 800; +int graphic_height = 600; +int graphic_depth = 32; +#endif + + +#if defined(TARGET_ALPHA) +#define QEMU_ARCH QEMU_ARCH_ALPHA +#elif defined(TARGET_ARM) +#define QEMU_ARCH QEMU_ARCH_ARM +#elif defined(TARGET_CRIS) +#define QEMU_ARCH QEMU_ARCH_CRIS +#elif defined(TARGET_HPPA) +#define QEMU_ARCH QEMU_ARCH_HPPA +#elif defined(TARGET_I386) +#define QEMU_ARCH QEMU_ARCH_I386 +#elif defined(TARGET_LM32) +#define QEMU_ARCH QEMU_ARCH_LM32 +#elif defined(TARGET_M68K) +#define QEMU_ARCH QEMU_ARCH_M68K +#elif defined(TARGET_MICROBLAZE) +#define QEMU_ARCH QEMU_ARCH_MICROBLAZE +#elif defined(TARGET_MIPS) +#define QEMU_ARCH QEMU_ARCH_MIPS +#elif defined(TARGET_MOXIE) +#define QEMU_ARCH QEMU_ARCH_MOXIE +#elif defined(TARGET_NIOS2) +#define QEMU_ARCH QEMU_ARCH_NIOS2 +#elif defined(TARGET_OPENRISC) +#define QEMU_ARCH QEMU_ARCH_OPENRISC +#elif defined(TARGET_PPC) +#define QEMU_ARCH QEMU_ARCH_PPC +#elif defined(TARGET_RISCV) +#define QEMU_ARCH QEMU_ARCH_RISCV +#elif defined(TARGET_RX) +#define QEMU_ARCH QEMU_ARCH_RX +#elif defined(TARGET_S390X) +#define QEMU_ARCH QEMU_ARCH_S390X +#elif defined(TARGET_SH4) +#define QEMU_ARCH QEMU_ARCH_SH4 +#elif defined(TARGET_SPARC) +#define QEMU_ARCH QEMU_ARCH_SPARC +#elif defined(TARGET_TRICORE) +#define QEMU_ARCH QEMU_ARCH_TRICORE +#elif defined(TARGET_UNICORE32) +#define QEMU_ARCH QEMU_ARCH_UNICORE32 +#elif defined(TARGET_XTENSA) +#define QEMU_ARCH QEMU_ARCH_XTENSA +#endif + +const uint32_t arch_type = QEMU_ARCH; + +int kvm_available(void) +{ +#ifdef CONFIG_KVM + return 1; +#else + return 0; +#endif +} + +int xen_available(void) +{ +#ifdef CONFIG_XEN + return 1; +#else + return 0; +#endif +} diff --git a/softmmu/balloon.c b/softmmu/balloon.c new file mode 100644 index 0000000000..354408c6ea --- /dev/null +++ b/softmmu/balloon.c @@ -0,0 +1,106 @@ +/* + * Generic Balloon handlers and management + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (C) 2011 Red Hat, Inc. + * Copyright (C) 2011 Amit Shah + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/atomic.h" +#include "sysemu/kvm.h" +#include "sysemu/balloon.h" +#include "trace-root.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc.h" +#include "qapi/qmp/qerror.h" + +static QEMUBalloonEvent *balloon_event_fn; +static QEMUBalloonStatus *balloon_stat_fn; +static void *balloon_opaque; + +static bool have_balloon(Error **errp) +{ + if (kvm_enabled() && !kvm_has_sync_mmu()) { + error_set(errp, ERROR_CLASS_KVM_MISSING_CAP, + "Using KVM without synchronous MMU, balloon unavailable"); + return false; + } + if (!balloon_event_fn) { + error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, + "No balloon device has been activated"); + return false; + } + return true; +} + +int qemu_add_balloon_handler(QEMUBalloonEvent *event_func, + QEMUBalloonStatus *stat_func, void *opaque) +{ + if (balloon_event_fn || balloon_stat_fn || balloon_opaque) { + /* We're already registered one balloon handler. How many can + * a guest really have? + */ + return -1; + } + balloon_event_fn = event_func; + balloon_stat_fn = stat_func; + balloon_opaque = opaque; + return 0; +} + +void qemu_remove_balloon_handler(void *opaque) +{ + if (balloon_opaque != opaque) { + return; + } + balloon_event_fn = NULL; + balloon_stat_fn = NULL; + balloon_opaque = NULL; +} + +BalloonInfo *qmp_query_balloon(Error **errp) +{ + BalloonInfo *info; + + if (!have_balloon(errp)) { + return NULL; + } + + info = g_malloc0(sizeof(*info)); + balloon_stat_fn(balloon_opaque, info); + return info; +} + +void qmp_balloon(int64_t target, Error **errp) +{ + if (!have_balloon(errp)) { + return; + } + + if (target <= 0) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "target", "a size"); + return; + } + + trace_balloon_event(balloon_opaque, target); + balloon_event_fn(balloon_opaque, target); +} diff --git a/softmmu/cpus.c b/softmmu/cpus.c new file mode 100644 index 0000000000..d94456ed29 --- /dev/null +++ b/softmmu/cpus.c @@ -0,0 +1,2317 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/config-file.h" +#include "qemu/cutils.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc.h" +#include "qapi/qapi-events-run-state.h" +#include "qapi/qmp/qerror.h" +#include "qemu/error-report.h" +#include "qemu/qemu-print.h" +#include "sysemu/tcg.h" +#include "sysemu/block-backend.h" +#include "exec/gdbstub.h" +#include "sysemu/dma.h" +#include "sysemu/hw_accel.h" +#include "sysemu/kvm.h" +#include "sysemu/hax.h" +#include "sysemu/hvf.h" +#include "sysemu/whpx.h" +#include "exec/exec-all.h" + +#include "qemu/thread.h" +#include "qemu/plugin.h" +#include "sysemu/cpus.h" +#include "sysemu/qtest.h" +#include "qemu/main-loop.h" +#include "qemu/option.h" +#include "qemu/bitmap.h" +#include "qemu/seqlock.h" +#include "qemu/guest-random.h" +#include "tcg/tcg.h" +#include "hw/nmi.h" +#include "sysemu/replay.h" +#include "sysemu/runstate.h" +#include "hw/boards.h" +#include "hw/hw.h" + +#ifdef CONFIG_LINUX + +#include + +#ifndef PR_MCE_KILL +#define PR_MCE_KILL 33 +#endif + +#ifndef PR_MCE_KILL_SET +#define PR_MCE_KILL_SET 1 +#endif + +#ifndef PR_MCE_KILL_EARLY +#define PR_MCE_KILL_EARLY 1 +#endif + +#endif /* CONFIG_LINUX */ + +static QemuMutex qemu_global_mutex; + +int64_t max_delay; +int64_t max_advance; + +/* vcpu throttling controls */ +static QEMUTimer *throttle_timer; +static unsigned int throttle_percentage; + +#define CPU_THROTTLE_PCT_MIN 1 +#define CPU_THROTTLE_PCT_MAX 99 +#define CPU_THROTTLE_TIMESLICE_NS 10000000 + +bool cpu_is_stopped(CPUState *cpu) +{ + return cpu->stopped || !runstate_is_running(); +} + +static inline bool cpu_work_list_empty(CPUState *cpu) +{ + bool ret; + + qemu_mutex_lock(&cpu->work_mutex); + ret = QSIMPLEQ_EMPTY(&cpu->work_list); + qemu_mutex_unlock(&cpu->work_mutex); + return ret; +} + +static bool cpu_thread_is_idle(CPUState *cpu) +{ + if (cpu->stop || !cpu_work_list_empty(cpu)) { + return false; + } + if (cpu_is_stopped(cpu)) { + return true; + } + if (!cpu->halted || cpu_has_work(cpu) || + kvm_halt_in_kernel()) { + return false; + } + return true; +} + +static bool all_cpu_threads_idle(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (!cpu_thread_is_idle(cpu)) { + return false; + } + } + return true; +} + +/***********************************************************/ +/* guest cycle counter */ + +/* Protected by TimersState seqlock */ + +static bool icount_sleep = true; +/* Arbitrarily pick 1MIPS as the minimum allowable speed. */ +#define MAX_ICOUNT_SHIFT 10 + +typedef struct TimersState { + /* Protected by BQL. */ + int64_t cpu_ticks_prev; + int64_t cpu_ticks_offset; + + /* Protect fields that can be respectively read outside the + * BQL, and written from multiple threads. + */ + QemuSeqLock vm_clock_seqlock; + QemuSpin vm_clock_lock; + + int16_t cpu_ticks_enabled; + + /* Conversion factor from emulated instructions to virtual clock ticks. */ + int16_t icount_time_shift; + + /* Compensate for varying guest execution speed. */ + int64_t qemu_icount_bias; + + int64_t vm_clock_warp_start; + int64_t cpu_clock_offset; + + /* Only written by TCG thread */ + int64_t qemu_icount; + + /* for adjusting icount */ + QEMUTimer *icount_rt_timer; + QEMUTimer *icount_vm_timer; + QEMUTimer *icount_warp_timer; +} TimersState; + +static TimersState timers_state; +bool mttcg_enabled; + + +/* The current number of executed instructions is based on what we + * originally budgeted minus the current state of the decrementing + * icount counters in extra/u16.low. + */ +static int64_t cpu_get_icount_executed(CPUState *cpu) +{ + return (cpu->icount_budget - + (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra)); +} + +/* + * Update the global shared timer_state.qemu_icount to take into + * account executed instructions. This is done by the TCG vCPU + * thread so the main-loop can see time has moved forward. + */ +static void cpu_update_icount_locked(CPUState *cpu) +{ + int64_t executed = cpu_get_icount_executed(cpu); + cpu->icount_budget -= executed; + + atomic_set_i64(&timers_state.qemu_icount, + timers_state.qemu_icount + executed); +} + +/* + * Update the global shared timer_state.qemu_icount to take into + * account executed instructions. This is done by the TCG vCPU + * thread so the main-loop can see time has moved forward. + */ +void cpu_update_icount(CPUState *cpu) +{ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + cpu_update_icount_locked(cpu); + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); +} + +static int64_t cpu_get_icount_raw_locked(void) +{ + CPUState *cpu = current_cpu; + + if (cpu && cpu->running) { + if (!cpu->can_do_io) { + error_report("Bad icount read"); + exit(1); + } + /* Take into account what has run */ + cpu_update_icount_locked(cpu); + } + /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ + return atomic_read_i64(&timers_state.qemu_icount); +} + +static int64_t cpu_get_icount_locked(void) +{ + int64_t icount = cpu_get_icount_raw_locked(); + return atomic_read_i64(&timers_state.qemu_icount_bias) + + cpu_icount_to_ns(icount); +} + +int64_t cpu_get_icount_raw(void) +{ + int64_t icount; + unsigned start; + + do { + start = seqlock_read_begin(&timers_state.vm_clock_seqlock); + icount = cpu_get_icount_raw_locked(); + } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); + + return icount; +} + +/* Return the virtual CPU time, based on the instruction counter. */ +int64_t cpu_get_icount(void) +{ + int64_t icount; + unsigned start; + + do { + start = seqlock_read_begin(&timers_state.vm_clock_seqlock); + icount = cpu_get_icount_locked(); + } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); + + return icount; +} + +int64_t cpu_icount_to_ns(int64_t icount) +{ + return icount << atomic_read(&timers_state.icount_time_shift); +} + +static int64_t cpu_get_ticks_locked(void) +{ + int64_t ticks = timers_state.cpu_ticks_offset; + if (timers_state.cpu_ticks_enabled) { + ticks += cpu_get_host_ticks(); + } + + if (timers_state.cpu_ticks_prev > ticks) { + /* Non increasing ticks may happen if the host uses software suspend. */ + timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; + ticks = timers_state.cpu_ticks_prev; + } + + timers_state.cpu_ticks_prev = ticks; + return ticks; +} + +/* return the time elapsed in VM between vm_start and vm_stop. Unless + * icount is active, cpu_get_ticks() uses units of the host CPU cycle + * counter. + */ +int64_t cpu_get_ticks(void) +{ + int64_t ticks; + + if (use_icount) { + return cpu_get_icount(); + } + + qemu_spin_lock(&timers_state.vm_clock_lock); + ticks = cpu_get_ticks_locked(); + qemu_spin_unlock(&timers_state.vm_clock_lock); + return ticks; +} + +static int64_t cpu_get_clock_locked(void) +{ + int64_t time; + + time = timers_state.cpu_clock_offset; + if (timers_state.cpu_ticks_enabled) { + time += get_clock(); + } + + return time; +} + +/* Return the monotonic time elapsed in VM, i.e., + * the time between vm_start and vm_stop + */ +int64_t cpu_get_clock(void) +{ + int64_t ti; + unsigned start; + + do { + start = seqlock_read_begin(&timers_state.vm_clock_seqlock); + ti = cpu_get_clock_locked(); + } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); + + return ti; +} + +/* enable cpu_get_ticks() + * Caller must hold BQL which serves as mutex for vm_clock_seqlock. + */ +void cpu_enable_ticks(void) +{ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + if (!timers_state.cpu_ticks_enabled) { + timers_state.cpu_ticks_offset -= cpu_get_host_ticks(); + timers_state.cpu_clock_offset -= get_clock(); + timers_state.cpu_ticks_enabled = 1; + } + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); +} + +/* disable cpu_get_ticks() : the clock is stopped. You must not call + * cpu_get_ticks() after that. + * Caller must hold BQL which serves as mutex for vm_clock_seqlock. + */ +void cpu_disable_ticks(void) +{ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + if (timers_state.cpu_ticks_enabled) { + timers_state.cpu_ticks_offset += cpu_get_host_ticks(); + timers_state.cpu_clock_offset = cpu_get_clock_locked(); + timers_state.cpu_ticks_enabled = 0; + } + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); +} + +/* Correlation between real and virtual time is always going to be + fairly approximate, so ignore small variation. + When the guest is idle real and virtual time will be aligned in + the IO wait loop. */ +#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10) + +static void icount_adjust(void) +{ + int64_t cur_time; + int64_t cur_icount; + int64_t delta; + + /* Protected by TimersState mutex. */ + static int64_t last_delta; + + /* If the VM is not running, then do nothing. */ + if (!runstate_is_running()) { + return; + } + + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT, + cpu_get_clock_locked()); + cur_icount = cpu_get_icount_locked(); + + delta = cur_icount - cur_time; + /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ + if (delta > 0 + && last_delta + ICOUNT_WOBBLE < delta * 2 + && timers_state.icount_time_shift > 0) { + /* The guest is getting too far ahead. Slow time down. */ + atomic_set(&timers_state.icount_time_shift, + timers_state.icount_time_shift - 1); + } + if (delta < 0 + && last_delta - ICOUNT_WOBBLE > delta * 2 + && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) { + /* The guest is getting too far behind. Speed time up. */ + atomic_set(&timers_state.icount_time_shift, + timers_state.icount_time_shift + 1); + } + last_delta = delta; + atomic_set_i64(&timers_state.qemu_icount_bias, + cur_icount - (timers_state.qemu_icount + << timers_state.icount_time_shift)); + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); +} + +static void icount_adjust_rt(void *opaque) +{ + timer_mod(timers_state.icount_rt_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); + icount_adjust(); +} + +static void icount_adjust_vm(void *opaque) +{ + timer_mod(timers_state.icount_vm_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 10); + icount_adjust(); +} + +static int64_t qemu_icount_round(int64_t count) +{ + int shift = atomic_read(&timers_state.icount_time_shift); + return (count + (1 << shift) - 1) >> shift; +} + +static void icount_warp_rt(void) +{ + unsigned seq; + int64_t warp_start; + + /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start + * changes from -1 to another value, so the race here is okay. + */ + do { + seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); + warp_start = timers_state.vm_clock_warp_start; + } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); + + if (warp_start == -1) { + return; + } + + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + if (runstate_is_running()) { + int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT, + cpu_get_clock_locked()); + int64_t warp_delta; + + warp_delta = clock - timers_state.vm_clock_warp_start; + if (use_icount == 2) { + /* + * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too + * far ahead of real time. + */ + int64_t cur_icount = cpu_get_icount_locked(); + int64_t delta = clock - cur_icount; + warp_delta = MIN(warp_delta, delta); + } + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + warp_delta); + } + timers_state.vm_clock_warp_start = -1; + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + + if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } +} + +static void icount_timer_cb(void *opaque) +{ + /* No need for a checkpoint because the timer already synchronizes + * with CHECKPOINT_CLOCK_VIRTUAL_RT. + */ + icount_warp_rt(); +} + +void qtest_clock_warp(int64_t dest) +{ + int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + AioContext *aio_context; + assert(qtest_enabled()); + aio_context = qemu_get_aio_context(); + while (clock < dest) { + int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + int64_t warp = qemu_soonest_timeout(dest - clock, deadline); + + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + warp); + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + + qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); + timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]); + clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); +} + +void qemu_start_warp_timer(void) +{ + int64_t clock; + int64_t deadline; + + if (!use_icount) { + return; + } + + /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers + * do not fire, so computing the deadline does not make sense. + */ + if (!runstate_is_running()) { + return; + } + + if (replay_mode != REPLAY_MODE_PLAY) { + if (!all_cpu_threads_idle()) { + return; + } + + if (qtest_enabled()) { + /* When testing, qtest commands advance icount. */ + return; + } + + replay_checkpoint(CHECKPOINT_CLOCK_WARP_START); + } else { + /* warp clock deterministically in record/replay mode */ + if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { + /* vCPU is sleeping and warp can't be started. + It is probably a race condition: notification sent + to vCPU was processed in advance and vCPU went to sleep. + Therefore we have to wake it up for doing someting. */ + if (replay_has_checkpoint()) { + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } + return; + } + } + + /* We want to use the earliest deadline from ALL vm_clocks */ + clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); + deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + ~QEMU_TIMER_ATTR_EXTERNAL); + if (deadline < 0) { + static bool notified; + if (!icount_sleep && !notified) { + warn_report("icount sleep disabled and no active timers"); + notified = true; + } + return; + } + + if (deadline > 0) { + /* + * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to + * sleep. Otherwise, the CPU might be waiting for a future timer + * interrupt to wake it up, but the interrupt never comes because + * the vCPU isn't running any insns and thus doesn't advance the + * QEMU_CLOCK_VIRTUAL. + */ + if (!icount_sleep) { + /* + * We never let VCPUs sleep in no sleep icount mode. + * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance + * to the next QEMU_CLOCK_VIRTUAL event and notify it. + * It is useful when we want a deterministic execution time, + * isolated from host latencies. + */ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + deadline); + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } else { + /* + * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some + * "real" time, (related to the time left until the next event) has + * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this. + * This avoids that the warps are visible externally; for example, + * you will not be sending network packets continuously instead of + * every 100ms. + */ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + if (timers_state.vm_clock_warp_start == -1 + || timers_state.vm_clock_warp_start > clock) { + timers_state.vm_clock_warp_start = clock; + } + seqlock_write_unlock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + timer_mod_anticipate(timers_state.icount_warp_timer, + clock + deadline); + } + } else if (deadline == 0) { + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } +} + +static void qemu_account_warp_timer(void) +{ + if (!use_icount || !icount_sleep) { + return; + } + + /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers + * do not fire, so computing the deadline does not make sense. + */ + if (!runstate_is_running()) { + return; + } + + /* warp clock deterministically in record/replay mode */ + if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) { + return; + } + + timer_del(timers_state.icount_warp_timer); + icount_warp_rt(); +} + +static bool icount_state_needed(void *opaque) +{ + return use_icount; +} + +static bool warp_timer_state_needed(void *opaque) +{ + TimersState *s = opaque; + return s->icount_warp_timer != NULL; +} + +static bool adjust_timers_state_needed(void *opaque) +{ + TimersState *s = opaque; + return s->icount_rt_timer != NULL; +} + +static bool shift_state_needed(void *opaque) +{ + return use_icount == 2; +} + +/* + * Subsection for warp timer migration is optional, because may not be created + */ +static const VMStateDescription icount_vmstate_warp_timer = { + .name = "timer/icount/warp_timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = warp_timer_state_needed, + .fields = (VMStateField[]) { + VMSTATE_INT64(vm_clock_warp_start, TimersState), + VMSTATE_TIMER_PTR(icount_warp_timer, TimersState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription icount_vmstate_adjust_timers = { + .name = "timer/icount/timers", + .version_id = 1, + .minimum_version_id = 1, + .needed = adjust_timers_state_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(icount_rt_timer, TimersState), + VMSTATE_TIMER_PTR(icount_vm_timer, TimersState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription icount_vmstate_shift = { + .name = "timer/icount/shift", + .version_id = 1, + .minimum_version_id = 1, + .needed = shift_state_needed, + .fields = (VMStateField[]) { + VMSTATE_INT16(icount_time_shift, TimersState), + VMSTATE_END_OF_LIST() + } +}; + +/* + * This is a subsection for icount migration. + */ +static const VMStateDescription icount_vmstate_timers = { + .name = "timer/icount", + .version_id = 1, + .minimum_version_id = 1, + .needed = icount_state_needed, + .fields = (VMStateField[]) { + VMSTATE_INT64(qemu_icount_bias, TimersState), + VMSTATE_INT64(qemu_icount, TimersState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &icount_vmstate_warp_timer, + &icount_vmstate_adjust_timers, + &icount_vmstate_shift, + NULL + } +}; + +static const VMStateDescription vmstate_timers = { + .name = "timer", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT64(cpu_ticks_offset, TimersState), + VMSTATE_UNUSED(8), + VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &icount_vmstate_timers, + NULL + } +}; + +static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) +{ + double pct; + double throttle_ratio; + int64_t sleeptime_ns, endtime_ns; + + if (!cpu_throttle_get_percentage()) { + return; + } + + pct = (double)cpu_throttle_get_percentage()/100; + throttle_ratio = pct / (1 - pct); + /* Add 1ns to fix double's rounding error (like 0.9999999...) */ + sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1); + endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; + while (sleeptime_ns > 0 && !cpu->stop) { + if (sleeptime_ns > SCALE_MS) { + qemu_cond_timedwait(cpu->halt_cond, &qemu_global_mutex, + sleeptime_ns / SCALE_MS); + } else { + qemu_mutex_unlock_iothread(); + g_usleep(sleeptime_ns / SCALE_US); + qemu_mutex_lock_iothread(); + } + sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + } + atomic_set(&cpu->throttle_thread_scheduled, 0); +} + +static void cpu_throttle_timer_tick(void *opaque) +{ + CPUState *cpu; + double pct; + + /* Stop the timer if needed */ + if (!cpu_throttle_get_percentage()) { + return; + } + CPU_FOREACH(cpu) { + if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { + async_run_on_cpu(cpu, cpu_throttle_thread, + RUN_ON_CPU_NULL); + } + } + + pct = (double)cpu_throttle_get_percentage()/100; + timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + + CPU_THROTTLE_TIMESLICE_NS / (1-pct)); +} + +void cpu_throttle_set(int new_throttle_pct) +{ + /* Ensure throttle percentage is within valid range */ + new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); + new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); + + atomic_set(&throttle_percentage, new_throttle_pct); + + timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + + CPU_THROTTLE_TIMESLICE_NS); +} + +void cpu_throttle_stop(void) +{ + atomic_set(&throttle_percentage, 0); +} + +bool cpu_throttle_active(void) +{ + return (cpu_throttle_get_percentage() != 0); +} + +int cpu_throttle_get_percentage(void) +{ + return atomic_read(&throttle_percentage); +} + +void cpu_ticks_init(void) +{ + seqlock_init(&timers_state.vm_clock_seqlock); + qemu_spin_init(&timers_state.vm_clock_lock); + vmstate_register(NULL, 0, &vmstate_timers, &timers_state); + throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, + cpu_throttle_timer_tick, NULL); +} + +void configure_icount(QemuOpts *opts, Error **errp) +{ + const char *option = qemu_opt_get(opts, "shift"); + bool sleep = qemu_opt_get_bool(opts, "sleep", true); + bool align = qemu_opt_get_bool(opts, "align", false); + long time_shift = -1; + + if (!option) { + if (qemu_opt_get(opts, "align") != NULL) { + error_setg(errp, "Please specify shift option when using align"); + } + return; + } + + if (align && !sleep) { + error_setg(errp, "align=on and sleep=off are incompatible"); + return; + } + + if (strcmp(option, "auto") != 0) { + if (qemu_strtol(option, NULL, 0, &time_shift) < 0 + || time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) { + error_setg(errp, "icount: Invalid shift value"); + return; + } + } else if (icount_align_option) { + error_setg(errp, "shift=auto and align=on are incompatible"); + return; + } else if (!icount_sleep) { + error_setg(errp, "shift=auto and sleep=off are incompatible"); + return; + } + + icount_sleep = sleep; + if (icount_sleep) { + timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, + icount_timer_cb, NULL); + } + + icount_align_option = align; + + if (time_shift >= 0) { + timers_state.icount_time_shift = time_shift; + use_icount = 1; + return; + } + + use_icount = 2; + + /* 125MIPS seems a reasonable initial guess at the guest speed. + It will be corrected fairly quickly anyway. */ + timers_state.icount_time_shift = 3; + + /* Have both realtime and virtual time triggers for speed adjustment. + The realtime trigger catches emulated time passing too slowly, + the virtual time trigger catches emulated time passing too fast. + Realtime triggers occur even when idle, so use them less frequently + than VM triggers. */ + timers_state.vm_clock_warp_start = -1; + timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, + icount_adjust_rt, NULL); + timer_mod(timers_state.icount_rt_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); + timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + icount_adjust_vm, NULL); + timer_mod(timers_state.icount_vm_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 10); +} + +/***********************************************************/ +/* TCG vCPU kick timer + * + * The kick timer is responsible for moving single threaded vCPU + * emulation on to the next vCPU. If more than one vCPU is running a + * timer event with force a cpu->exit so the next vCPU can get + * scheduled. + * + * The timer is removed if all vCPUs are idle and restarted again once + * idleness is complete. + */ + +static QEMUTimer *tcg_kick_vcpu_timer; +static CPUState *tcg_current_rr_cpu; + +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) + +static inline int64_t qemu_tcg_next_kick(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; +} + +/* Kick the currently round-robin scheduled vCPU to next */ +static void qemu_cpu_kick_rr_next_cpu(void) +{ + CPUState *cpu; + do { + cpu = atomic_mb_read(&tcg_current_rr_cpu); + if (cpu) { + cpu_exit(cpu); + } + } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); +} + +/* Kick all RR vCPUs */ +static void qemu_cpu_kick_rr_cpus(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_exit(cpu); + }; +} + +static void do_nothing(CPUState *cpu, run_on_cpu_data unused) +{ +} + +void qemu_timer_notify_cb(void *opaque, QEMUClockType type) +{ + if (!use_icount || type != QEMU_CLOCK_VIRTUAL) { + qemu_notify_event(); + return; + } + + if (qemu_in_vcpu_thread()) { + /* A CPU is currently running; kick it back out to the + * tcg_cpu_exec() loop so it will recalculate its + * icount deadline immediately. + */ + qemu_cpu_kick(current_cpu); + } else if (first_cpu) { + /* qemu_cpu_kick is not enough to kick a halted CPU out of + * qemu_tcg_wait_io_event. async_run_on_cpu, instead, + * causes cpu_thread_is_idle to return false. This way, + * handle_icount_deadline can run. + * If we have no CPUs at all for some reason, we don't + * need to do anything. + */ + async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL); + } +} + +static void kick_tcg_thread(void *opaque) +{ + timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); + qemu_cpu_kick_rr_next_cpu(); +} + +static void start_tcg_kick_timer(void) +{ + assert(!mttcg_enabled); + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { + tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + kick_tcg_thread, NULL); + } + if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) { + timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); + } +} + +static void stop_tcg_kick_timer(void) +{ + assert(!mttcg_enabled); + if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) { + timer_del(tcg_kick_vcpu_timer); + } +} + +/***********************************************************/ +void hw_error(const char *fmt, ...) +{ + va_list ap; + CPUState *cpu; + + va_start(ap, fmt); + fprintf(stderr, "qemu: hardware error: "); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + CPU_FOREACH(cpu) { + fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); + cpu_dump_state(cpu, stderr, CPU_DUMP_FPU); + } + va_end(ap); + abort(); +} + +void cpu_synchronize_all_states(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_synchronize_state(cpu); + } +} + +void cpu_synchronize_all_post_reset(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_synchronize_post_reset(cpu); + } +} + +void cpu_synchronize_all_post_init(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_synchronize_post_init(cpu); + } +} + +void cpu_synchronize_all_pre_loadvm(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_synchronize_pre_loadvm(cpu); + } +} + +static int do_vm_stop(RunState state, bool send_stop) +{ + int ret = 0; + + if (runstate_is_running()) { + runstate_set(state); + cpu_disable_ticks(); + pause_all_vcpus(); + vm_state_notify(0, state); + if (send_stop) { + qapi_event_send_stop(); + } + } + + bdrv_drain_all(); + ret = bdrv_flush_all(); + + return ret; +} + +/* Special vm_stop() variant for terminating the process. Historically clients + * did not expect a QMP STOP event and so we need to retain compatibility. + */ +int vm_shutdown(void) +{ + return do_vm_stop(RUN_STATE_SHUTDOWN, false); +} + +static bool cpu_can_run(CPUState *cpu) +{ + if (cpu->stop) { + return false; + } + if (cpu_is_stopped(cpu)) { + return false; + } + return true; +} + +static void cpu_handle_guest_debug(CPUState *cpu) +{ + gdb_set_stop_cpu(cpu); + qemu_system_debug_request(); + cpu->stopped = true; +} + +#ifdef CONFIG_LINUX +static void sigbus_reraise(void) +{ + sigset_t set; + struct sigaction action; + + memset(&action, 0, sizeof(action)); + action.sa_handler = SIG_DFL; + if (!sigaction(SIGBUS, &action, NULL)) { + raise(SIGBUS); + sigemptyset(&set); + sigaddset(&set, SIGBUS); + pthread_sigmask(SIG_UNBLOCK, &set, NULL); + } + perror("Failed to re-raise SIGBUS!\n"); + abort(); +} + +static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx) +{ + if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) { + sigbus_reraise(); + } + + if (current_cpu) { + /* Called asynchronously in VCPU thread. */ + if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) { + sigbus_reraise(); + } + } else { + /* Called synchronously (via signalfd) in main thread. */ + if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) { + sigbus_reraise(); + } + } +} + +static void qemu_init_sigbus(void) +{ + struct sigaction action; + + memset(&action, 0, sizeof(action)); + action.sa_flags = SA_SIGINFO; + action.sa_sigaction = sigbus_handler; + sigaction(SIGBUS, &action, NULL); + + prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); +} +#else /* !CONFIG_LINUX */ +static void qemu_init_sigbus(void) +{ +} +#endif /* !CONFIG_LINUX */ + +static QemuThread io_thread; + +/* cpu creation */ +static QemuCond qemu_cpu_cond; +/* system init */ +static QemuCond qemu_pause_cond; + +void qemu_init_cpu_loop(void) +{ + qemu_init_sigbus(); + qemu_cond_init(&qemu_cpu_cond); + qemu_cond_init(&qemu_pause_cond); + qemu_mutex_init(&qemu_global_mutex); + + qemu_thread_get_self(&io_thread); +} + +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) +{ + do_run_on_cpu(cpu, func, data, &qemu_global_mutex); +} + +static void qemu_kvm_destroy_vcpu(CPUState *cpu) +{ + if (kvm_destroy_vcpu(cpu) < 0) { + error_report("kvm_destroy_vcpu failed"); + exit(EXIT_FAILURE); + } +} + +static void qemu_tcg_destroy_vcpu(CPUState *cpu) +{ +} + +static void qemu_cpu_stop(CPUState *cpu, bool exit) +{ + g_assert(qemu_cpu_is_self(cpu)); + cpu->stop = false; + cpu->stopped = true; + if (exit) { + cpu_exit(cpu); + } + qemu_cond_broadcast(&qemu_pause_cond); +} + +static void qemu_wait_io_event_common(CPUState *cpu) +{ + atomic_mb_set(&cpu->thread_kicked, false); + if (cpu->stop) { + qemu_cpu_stop(cpu, false); + } + process_queued_cpu_work(cpu); +} + +static void qemu_tcg_rr_wait_io_event(void) +{ + CPUState *cpu; + + while (all_cpu_threads_idle()) { + stop_tcg_kick_timer(); + qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); + } + + start_tcg_kick_timer(); + + CPU_FOREACH(cpu) { + qemu_wait_io_event_common(cpu); + } +} + +static void qemu_wait_io_event(CPUState *cpu) +{ + bool slept = false; + + while (cpu_thread_is_idle(cpu)) { + if (!slept) { + slept = true; + qemu_plugin_vcpu_idle_cb(cpu); + } + qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + } + if (slept) { + qemu_plugin_vcpu_resume_cb(cpu); + } + +#ifdef _WIN32 + /* Eat dummy APC queued by qemu_cpu_kick_thread. */ + if (!tcg_enabled()) { + SleepEx(0, TRUE); + } +#endif + qemu_wait_io_event_common(cpu); +} + +static void *qemu_kvm_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + int r; + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + + r = kvm_init_vcpu(cpu); + if (r < 0) { + error_report("kvm_init_vcpu failed: %s", strerror(-r)); + exit(1); + } + + kvm_init_cpu_signals(cpu); + + /* signal CPU creation */ + cpu->created = true; + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = kvm_cpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + qemu_kvm_destroy_vcpu(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +static void *qemu_dummy_cpu_thread_fn(void *arg) +{ +#ifdef _WIN32 + error_report("qtest is not supported under Windows"); + exit(1); +#else + CPUState *cpu = arg; + sigset_t waitset; + int r; + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + + sigemptyset(&waitset); + sigaddset(&waitset, SIG_IPI); + + /* signal CPU creation */ + cpu->created = true; + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + qemu_mutex_unlock_iothread(); + do { + int sig; + r = sigwait(&waitset, &sig); + } while (r == -1 && (errno == EAGAIN || errno == EINTR)); + if (r == -1) { + perror("sigwait"); + exit(1); + } + qemu_mutex_lock_iothread(); + qemu_wait_io_event(cpu); + } while (!cpu->unplug); + + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +#endif +} + +static int64_t tcg_get_icount_limit(void) +{ + int64_t deadline; + + if (replay_mode != REPLAY_MODE_PLAY) { + /* + * Include all the timers, because they may need an attention. + * Too long CPU execution may create unnecessary delay in UI. + */ + deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + /* Check realtime timers, because they help with input processing */ + deadline = qemu_soonest_timeout(deadline, + qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME, + QEMU_TIMER_ATTR_ALL)); + + /* Maintain prior (possibly buggy) behaviour where if no deadline + * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than + * INT32_MAX nanoseconds ahead, we still use INT32_MAX + * nanoseconds. + */ + if ((deadline < 0) || (deadline > INT32_MAX)) { + deadline = INT32_MAX; + } + + return qemu_icount_round(deadline); + } else { + return replay_get_instructions(); + } +} + +static void notify_aio_contexts(void) +{ + /* Wake up other AioContexts. */ + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); +} + +static void handle_icount_deadline(void) +{ + assert(qemu_in_vcpu_thread()); + if (use_icount) { + int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + + if (deadline == 0) { + notify_aio_contexts(); + } + } +} + +static void prepare_icount_for_run(CPUState *cpu) +{ + if (use_icount) { + int insns_left; + + /* These should always be cleared by process_icount_data after + * each vCPU execution. However u16.high can be raised + * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt + */ + g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); + g_assert(cpu->icount_extra == 0); + + cpu->icount_budget = tcg_get_icount_limit(); + insns_left = MIN(0xffff, cpu->icount_budget); + cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + + replay_mutex_lock(); + + if (cpu->icount_budget == 0 && replay_has_checkpoint()) { + notify_aio_contexts(); + } + } +} + +static void process_icount_data(CPUState *cpu) +{ + if (use_icount) { + /* Account for executed instructions */ + cpu_update_icount(cpu); + + /* Reset the counters */ + cpu_neg(cpu)->icount_decr.u16.low = 0; + cpu->icount_extra = 0; + cpu->icount_budget = 0; + + replay_account_executed_instructions(); + + replay_mutex_unlock(); + } +} + + +static int tcg_cpu_exec(CPUState *cpu) +{ + int ret; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + + assert(tcg_enabled()); +#ifdef CONFIG_PROFILER + ti = profile_getclock(); +#endif + cpu_exec_start(cpu); + ret = cpu_exec(cpu); + cpu_exec_end(cpu); +#ifdef CONFIG_PROFILER + atomic_set(&tcg_ctx->prof.cpu_exec_time, + tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); +#endif + return ret; +} + +/* Destroy any remaining vCPUs which have been unplugged and have + * finished running + */ +static void deal_with_unplugged_cpus(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->unplug && !cpu_can_run(cpu)) { + qemu_tcg_destroy_vcpu(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); + break; + } + } +} + +/* Single-threaded TCG + * + * In the single-threaded case each vCPU is simulated in turn. If + * there is more than a single vCPU we create a simple timer to kick + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. + * This is done explicitly rather than relying on side-effects + * elsewhere. + */ + +static void *qemu_tcg_rr_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + + assert(tcg_enabled()); + rcu_register_thread(); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->created = true; + cpu->can_do_io = 1; + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* wait for initial kick-off after machine start */ + while (first_cpu->stopped) { + qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); + + /* process any pending work */ + CPU_FOREACH(cpu) { + current_cpu = cpu; + qemu_wait_io_event_common(cpu); + } + } + + start_tcg_kick_timer(); + + cpu = first_cpu; + + /* process any pending work */ + cpu->exit_request = 1; + + while (1) { + qemu_mutex_unlock_iothread(); + replay_mutex_lock(); + qemu_mutex_lock_iothread(); + /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ + qemu_account_warp_timer(); + + /* Run the timers here. This is much more efficient than + * waking up the I/O thread and waiting for completion. + */ + handle_icount_deadline(); + + replay_mutex_unlock(); + + if (!cpu) { + cpu = first_cpu; + } + + while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { + + atomic_mb_set(&tcg_current_rr_cpu, cpu); + current_cpu = cpu; + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, + (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + + if (cpu_can_run(cpu)) { + int r; + + qemu_mutex_unlock_iothread(); + prepare_icount_for_run(cpu); + + r = tcg_cpu_exec(cpu); + + process_icount_data(cpu); + qemu_mutex_lock_iothread(); + + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + break; + } else if (r == EXCP_ATOMIC) { + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + break; + } + } else if (cpu->stop) { + if (cpu->unplug) { + cpu = CPU_NEXT(cpu); + } + break; + } + + cpu = CPU_NEXT(cpu); + } /* while (cpu && !cpu->exit_request).. */ + + /* Does not need atomic_mb_set because a spurious wakeup is okay. */ + atomic_set(&tcg_current_rr_cpu, NULL); + + if (cpu && cpu->exit_request) { + atomic_mb_set(&cpu->exit_request, 0); + } + + if (use_icount && all_cpu_threads_idle()) { + /* + * When all cpus are sleeping (e.g in WFI), to avoid a deadlock + * in the main_loop, wake it up in order to start the warp timer. + */ + qemu_notify_event(); + } + + qemu_tcg_rr_wait_io_event(); + deal_with_unplugged_cpus(); + } + + rcu_unregister_thread(); + return NULL; +} + +static void *qemu_hax_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + int r; + + rcu_register_thread(); + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->created = true; + current_cpu = cpu; + + hax_init_vcpu(cpu); + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = hax_smp_cpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + rcu_unregister_thread(); + return NULL; +} + +/* The HVF-specific vCPU thread function. This one should only run when the host + * CPU supports the VMX "unrestricted guest" feature. */ +static void *qemu_hvf_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + + int r; + + assert(hvf_enabled()); + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + current_cpu = cpu; + + hvf_init_vcpu(cpu); + + /* signal CPU creation */ + cpu->created = true; + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = hvf_vcpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + hvf_vcpu_destroy(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +static void *qemu_whpx_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + int r; + + rcu_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + current_cpu = cpu; + + r = whpx_init_vcpu(cpu); + if (r < 0) { + fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r)); + exit(1); + } + + /* signal CPU creation */ + cpu->created = true; + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + if (cpu_can_run(cpu)) { + r = whpx_vcpu_exec(cpu); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + } + } + while (cpu_thread_is_idle(cpu)) { + qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + } + qemu_wait_io_event_common(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + whpx_destroy_vcpu(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +#ifdef _WIN32 +static void CALLBACK dummy_apc_func(ULONG_PTR unused) +{ +} +#endif + +/* Multi-threaded TCG + * + * In the multi-threaded case each vCPU has its own thread. The TLS + * variable current_cpu can be used deep in the code to find the + * current CPUState for a given thread. + */ + +static void *qemu_tcg_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + + assert(tcg_enabled()); + g_assert(!use_icount); + + rcu_register_thread(); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->created = true; + cpu->can_do_io = 1; + current_cpu = cpu; + qemu_cond_signal(&qemu_cpu_cond); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* process any pending work */ + cpu->exit_request = 1; + + do { + if (cpu_can_run(cpu)) { + int r; + qemu_mutex_unlock_iothread(); + r = tcg_cpu_exec(cpu); + qemu_mutex_lock_iothread(); + switch (r) { + case EXCP_DEBUG: + cpu_handle_guest_debug(cpu); + break; + case EXCP_HALTED: + /* during start-up the vCPU is reset and the thread is + * kicked several times. If we don't ensure we go back + * to sleep in the halted state we won't cleanly + * start-up when the vCPU is enabled. + * + * cpu->halted should ensure we sleep in wait_io_event + */ + g_assert(cpu->halted); + break; + case EXCP_ATOMIC: + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + default: + /* Ignore everything else? */ + break; + } + } + + atomic_mb_set(&cpu->exit_request, 0); + qemu_wait_io_event(cpu); + } while (!cpu->unplug || cpu_can_run(cpu)); + + qemu_tcg_destroy_vcpu(cpu); + cpu->created = false; + qemu_cond_signal(&qemu_cpu_cond); + qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); + return NULL; +} + +static void qemu_cpu_kick_thread(CPUState *cpu) +{ +#ifndef _WIN32 + int err; + + if (cpu->thread_kicked) { + return; + } + cpu->thread_kicked = true; + err = pthread_kill(cpu->thread->thread, SIG_IPI); + if (err && err != ESRCH) { + fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); + exit(1); + } +#else /* _WIN32 */ + if (!qemu_cpu_is_self(cpu)) { + if (whpx_enabled()) { + whpx_vcpu_kick(cpu); + } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { + fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n", + __func__, GetLastError()); + exit(1); + } + } +#endif +} + +void qemu_cpu_kick(CPUState *cpu) +{ + qemu_cond_broadcast(cpu->halt_cond); + if (tcg_enabled()) { + if (qemu_tcg_mttcg_enabled()) { + cpu_exit(cpu); + } else { + qemu_cpu_kick_rr_cpus(); + } + } else { + if (hax_enabled()) { + /* + * FIXME: race condition with the exit_request check in + * hax_vcpu_hax_exec + */ + cpu->exit_request = 1; + } + qemu_cpu_kick_thread(cpu); + } +} + +void qemu_cpu_kick_self(void) +{ + assert(current_cpu); + qemu_cpu_kick_thread(current_cpu); +} + +bool qemu_cpu_is_self(CPUState *cpu) +{ + return qemu_thread_is_self(cpu->thread); +} + +bool qemu_in_vcpu_thread(void) +{ + return current_cpu && qemu_cpu_is_self(current_cpu); +} + +static __thread bool iothread_locked = false; + +bool qemu_mutex_iothread_locked(void) +{ + return iothread_locked; +} + +/* + * The BQL is taken from so many places that it is worth profiling the + * callers directly, instead of funneling them all through a single function. + */ +void qemu_mutex_lock_iothread_impl(const char *file, int line) +{ + QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func); + + g_assert(!qemu_mutex_iothread_locked()); + bql_lock(&qemu_global_mutex, file, line); + iothread_locked = true; +} + +void qemu_mutex_unlock_iothread(void) +{ + g_assert(qemu_mutex_iothread_locked()); + iothread_locked = false; + qemu_mutex_unlock(&qemu_global_mutex); +} + +void qemu_cond_wait_iothread(QemuCond *cond) +{ + qemu_cond_wait(cond, &qemu_global_mutex); +} + +static bool all_vcpus_paused(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (!cpu->stopped) { + return false; + } + } + + return true; +} + +void pause_all_vcpus(void) +{ + CPUState *cpu; + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); + CPU_FOREACH(cpu) { + if (qemu_cpu_is_self(cpu)) { + qemu_cpu_stop(cpu, true); + } else { + cpu->stop = true; + qemu_cpu_kick(cpu); + } + } + + /* We need to drop the replay_lock so any vCPU threads woken up + * can finish their replay tasks + */ + replay_mutex_unlock(); + + while (!all_vcpus_paused()) { + qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); + CPU_FOREACH(cpu) { + qemu_cpu_kick(cpu); + } + } + + qemu_mutex_unlock_iothread(); + replay_mutex_lock(); + qemu_mutex_lock_iothread(); +} + +void cpu_resume(CPUState *cpu) +{ + cpu->stop = false; + cpu->stopped = false; + qemu_cpu_kick(cpu); +} + +void resume_all_vcpus(void) +{ + CPUState *cpu; + + if (!runstate_is_running()) { + return; + } + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); + CPU_FOREACH(cpu) { + cpu_resume(cpu); + } +} + +void cpu_remove_sync(CPUState *cpu) +{ + cpu->stop = true; + cpu->unplug = true; + qemu_cpu_kick(cpu); + qemu_mutex_unlock_iothread(); + qemu_thread_join(cpu->thread); + qemu_mutex_lock_iothread(); +} + +/* For temporary buffers for forming a name */ +#define VCPU_THREAD_NAME_SIZE 16 + +static void qemu_tcg_init_vcpu(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + static QemuCond *single_tcg_halt_cond; + static QemuThread *single_tcg_cpu_thread; + static int tcg_region_inited; + + assert(tcg_enabled()); + /* + * Initialize TCG regions--once. Now is a good time, because: + * (1) TCG's init context, prologue and target globals have been set up. + * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the + * -accel flag is processed, so the check doesn't work then). + */ + if (!tcg_region_inited) { + tcg_region_inited = 1; + tcg_region_init(); + } + + if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) { + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + if (qemu_tcg_mttcg_enabled()) { + /* create a thread per vCPU with TCG (MTTCG) */ + parallel_cpus = true; + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", + cpu->cpu_index); + + qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + + } else { + /* share a single thread for all cpus with TCG */ + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); + qemu_thread_create(cpu->thread, thread_name, + qemu_tcg_rr_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + + single_tcg_halt_cond = cpu->halt_cond; + single_tcg_cpu_thread = cpu->thread; + } +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif + } else { + /* For non-MTTCG cases we share the thread */ + cpu->thread = single_tcg_cpu_thread; + cpu->halt_cond = single_tcg_halt_cond; + cpu->thread_id = first_cpu->thread_id; + cpu->can_do_io = 1; + cpu->created = true; + } +} + +static void qemu_hax_start_vcpu(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif +} + +static void qemu_kvm_start_vcpu(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +} + +static void qemu_hvf_start_vcpu(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + /* HVF currently does not support TCG, and only runs in + * unrestricted-guest mode. */ + assert(hvf_enabled()); + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +} + +static void qemu_whpx_start_vcpu(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif +} + +static void qemu_dummy_start_vcpu(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY", + cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu, + QEMU_THREAD_JOINABLE); +} + +void qemu_init_vcpu(CPUState *cpu) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + cpu->nr_cores = ms->smp.cores; + cpu->nr_threads = ms->smp.threads; + cpu->stopped = true; + cpu->random_seed = qemu_guest_random_seed_thread_part1(); + + if (!cpu->as) { + /* If the target cpu hasn't set up any address spaces itself, + * give it the default one. + */ + cpu->num_ases = 1; + cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory); + } + + if (kvm_enabled()) { + qemu_kvm_start_vcpu(cpu); + } else if (hax_enabled()) { + qemu_hax_start_vcpu(cpu); + } else if (hvf_enabled()) { + qemu_hvf_start_vcpu(cpu); + } else if (tcg_enabled()) { + qemu_tcg_init_vcpu(cpu); + } else if (whpx_enabled()) { + qemu_whpx_start_vcpu(cpu); + } else { + qemu_dummy_start_vcpu(cpu); + } + + while (!cpu->created) { + qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + } +} + +void cpu_stop_current(void) +{ + if (current_cpu) { + current_cpu->stop = true; + cpu_exit(current_cpu); + } +} + +int vm_stop(RunState state) +{ + if (qemu_in_vcpu_thread()) { + qemu_system_vmstop_request_prepare(); + qemu_system_vmstop_request(state); + /* + * FIXME: should not return to device code in case + * vm_stop() has been requested. + */ + cpu_stop_current(); + return 0; + } + + return do_vm_stop(state, true); +} + +/** + * Prepare for (re)starting the VM. + * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already + * running or in case of an error condition), 0 otherwise. + */ +int vm_prepare_start(void) +{ + RunState requested; + + qemu_vmstop_requested(&requested); + if (runstate_is_running() && requested == RUN_STATE__MAX) { + return -1; + } + + /* Ensure that a STOP/RESUME pair of events is emitted if a + * vmstop request was pending. The BLOCK_IO_ERROR event, for + * example, according to documentation is always followed by + * the STOP event. + */ + if (runstate_is_running()) { + qapi_event_send_stop(); + qapi_event_send_resume(); + return -1; + } + + /* We are sending this now, but the CPUs will be resumed shortly later */ + qapi_event_send_resume(); + + cpu_enable_ticks(); + runstate_set(RUN_STATE_RUNNING); + vm_state_notify(1, RUN_STATE_RUNNING); + return 0; +} + +void vm_start(void) +{ + if (!vm_prepare_start()) { + resume_all_vcpus(); + } +} + +/* does a state transition even if the VM is already stopped, + current state is forgotten forever */ +int vm_stop_force_state(RunState state) +{ + if (runstate_is_running()) { + return vm_stop(state); + } else { + runstate_set(state); + + bdrv_drain_all(); + /* Make sure to return an error if the flush in a previous vm_stop() + * failed. */ + return bdrv_flush_all(); + } +} + +void list_cpus(const char *optarg) +{ + /* XXX: implement xxx_cpu_list for targets that still miss it */ +#if defined(cpu_list) + cpu_list(); +#endif +} + +void qmp_memsave(int64_t addr, int64_t size, const char *filename, + bool has_cpu, int64_t cpu_index, Error **errp) +{ + FILE *f; + uint32_t l; + CPUState *cpu; + uint8_t buf[1024]; + int64_t orig_addr = addr, orig_size = size; + + if (!has_cpu) { + cpu_index = 0; + } + + cpu = qemu_get_cpu(cpu_index); + if (cpu == NULL) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index", + "a CPU number"); + return; + } + + f = fopen(filename, "wb"); + if (!f) { + error_setg_file_open(errp, errno, filename); + return; + } + + while (size != 0) { + l = sizeof(buf); + if (l > size) + l = size; + if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { + error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64 + " specified", orig_addr, orig_size); + goto exit; + } + if (fwrite(buf, 1, l, f) != l) { + error_setg(errp, QERR_IO_ERROR); + goto exit; + } + addr += l; + size -= l; + } + +exit: + fclose(f); +} + +void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, + Error **errp) +{ + FILE *f; + uint32_t l; + uint8_t buf[1024]; + + f = fopen(filename, "wb"); + if (!f) { + error_setg_file_open(errp, errno, filename); + return; + } + + while (size != 0) { + l = sizeof(buf); + if (l > size) + l = size; + cpu_physical_memory_read(addr, buf, l); + if (fwrite(buf, 1, l, f) != l) { + error_setg(errp, QERR_IO_ERROR); + goto exit; + } + addr += l; + size -= l; + } + +exit: + fclose(f); +} + +void qmp_inject_nmi(Error **errp) +{ + nmi_monitor_handle(monitor_get_cpu_index(), errp); +} + +void dump_drift_info(void) +{ + if (!use_icount) { + return; + } + + qemu_printf("Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - cpu_get_icount())/SCALE_MS); + if (icount_align_option) { + qemu_printf("Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + qemu_printf("Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); + } else { + qemu_printf("Max guest delay NA\n"); + qemu_printf("Max guest advance NA\n"); + } +} diff --git a/softmmu/ioport.c b/softmmu/ioport.c new file mode 100644 index 0000000000..04e360e79a --- /dev/null +++ b/softmmu/ioport.c @@ -0,0 +1,299 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * splitted out ioport related stuffs from vl.c. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/ioport.h" +#include "trace-root.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" + +typedef struct MemoryRegionPortioList { + MemoryRegion mr; + void *portio_opaque; + MemoryRegionPortio ports[]; +} MemoryRegionPortioList; + +static uint64_t unassigned_io_read(void *opaque, hwaddr addr, unsigned size) +{ + return -1ULL; +} + +static void unassigned_io_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ +} + +const MemoryRegionOps unassigned_io_ops = { + .read = unassigned_io_read, + .write = unassigned_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void cpu_outb(uint32_t addr, uint8_t val) +{ + trace_cpu_out(addr, 'b', val); + address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + &val, 1); +} + +void cpu_outw(uint32_t addr, uint16_t val) +{ + uint8_t buf[2]; + + trace_cpu_out(addr, 'w', val); + stw_p(buf, val); + address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + buf, 2); +} + +void cpu_outl(uint32_t addr, uint32_t val) +{ + uint8_t buf[4]; + + trace_cpu_out(addr, 'l', val); + stl_p(buf, val); + address_space_write(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + buf, 4); +} + +uint8_t cpu_inb(uint32_t addr) +{ + uint8_t val; + + address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + &val, 1); + trace_cpu_in(addr, 'b', val); + return val; +} + +uint16_t cpu_inw(uint32_t addr) +{ + uint8_t buf[2]; + uint16_t val; + + address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 2); + val = lduw_p(buf); + trace_cpu_in(addr, 'w', val); + return val; +} + +uint32_t cpu_inl(uint32_t addr) +{ + uint8_t buf[4]; + uint32_t val; + + address_space_read(&address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 4); + val = ldl_p(buf); + trace_cpu_in(addr, 'l', val); + return val; +} + +void portio_list_init(PortioList *piolist, + Object *owner, + const MemoryRegionPortio *callbacks, + void *opaque, const char *name) +{ + unsigned n = 0; + + while (callbacks[n].size) { + ++n; + } + + piolist->ports = callbacks; + piolist->nr = 0; + piolist->regions = g_new0(MemoryRegion *, n); + piolist->address_space = NULL; + piolist->opaque = opaque; + piolist->owner = owner; + piolist->name = name; + piolist->flush_coalesced_mmio = false; +} + +void portio_list_set_flush_coalesced(PortioList *piolist) +{ + piolist->flush_coalesced_mmio = true; +} + +void portio_list_destroy(PortioList *piolist) +{ + MemoryRegionPortioList *mrpio; + unsigned i; + + for (i = 0; i < piolist->nr; ++i) { + mrpio = container_of(piolist->regions[i], MemoryRegionPortioList, mr); + object_unparent(OBJECT(&mrpio->mr)); + g_free(mrpio); + } + g_free(piolist->regions); +} + +static const MemoryRegionPortio *find_portio(MemoryRegionPortioList *mrpio, + uint64_t offset, unsigned size, + bool write) +{ + const MemoryRegionPortio *mrp; + + for (mrp = mrpio->ports; mrp->size; ++mrp) { + if (offset >= mrp->offset && offset < mrp->offset + mrp->len && + size == mrp->size && + (write ? (bool)mrp->write : (bool)mrp->read)) { + return mrp; + } + } + return NULL; +} + +static uint64_t portio_read(void *opaque, hwaddr addr, unsigned size) +{ + MemoryRegionPortioList *mrpio = opaque; + const MemoryRegionPortio *mrp = find_portio(mrpio, addr, size, false); + uint64_t data; + + data = ((uint64_t)1 << (size * 8)) - 1; + if (mrp) { + data = mrp->read(mrpio->portio_opaque, mrp->base + addr); + } else if (size == 2) { + mrp = find_portio(mrpio, addr, 1, false); + if (mrp) { + data = mrp->read(mrpio->portio_opaque, mrp->base + addr); + if (addr + 1 < mrp->offset + mrp->len) { + data |= mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8; + } else { + data |= 0xff00; + } + } + } + return data; +} + +static void portio_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + MemoryRegionPortioList *mrpio = opaque; + const MemoryRegionPortio *mrp = find_portio(mrpio, addr, size, true); + + if (mrp) { + mrp->write(mrpio->portio_opaque, mrp->base + addr, data); + } else if (size == 2) { + mrp = find_portio(mrpio, addr, 1, true); + if (mrp) { + mrp->write(mrpio->portio_opaque, mrp->base + addr, data & 0xff); + if (addr + 1 < mrp->offset + mrp->len) { + mrp->write(mrpio->portio_opaque, mrp->base + addr + 1, data >> 8); + } + } + } +} + +static const MemoryRegionOps portio_ops = { + .read = portio_read, + .write = portio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.unaligned = true, + .impl.unaligned = true, +}; + +static void portio_list_add_1(PortioList *piolist, + const MemoryRegionPortio *pio_init, + unsigned count, unsigned start, + unsigned off_low, unsigned off_high) +{ + MemoryRegionPortioList *mrpio; + unsigned i; + + /* Copy the sub-list and null-terminate it. */ + mrpio = g_malloc0(sizeof(MemoryRegionPortioList) + + sizeof(MemoryRegionPortio) * (count + 1)); + mrpio->portio_opaque = piolist->opaque; + memcpy(mrpio->ports, pio_init, sizeof(MemoryRegionPortio) * count); + memset(mrpio->ports + count, 0, sizeof(MemoryRegionPortio)); + + /* Adjust the offsets to all be zero-based for the region. */ + for (i = 0; i < count; ++i) { + mrpio->ports[i].offset -= off_low; + mrpio->ports[i].base = start + off_low; + } + + memory_region_init_io(&mrpio->mr, piolist->owner, &portio_ops, mrpio, + piolist->name, off_high - off_low); + if (piolist->flush_coalesced_mmio) { + memory_region_set_flush_coalesced(&mrpio->mr); + } + memory_region_add_subregion(piolist->address_space, + start + off_low, &mrpio->mr); + piolist->regions[piolist->nr] = &mrpio->mr; + ++piolist->nr; +} + +void portio_list_add(PortioList *piolist, + MemoryRegion *address_space, + uint32_t start) +{ + const MemoryRegionPortio *pio, *pio_start = piolist->ports; + unsigned int off_low, off_high, off_last, count; + + piolist->address_space = address_space; + + /* Handle the first entry specially. */ + off_last = off_low = pio_start->offset; + off_high = off_low + pio_start->len + pio_start->size - 1; + count = 1; + + for (pio = pio_start + 1; pio->size != 0; pio++, count++) { + /* All entries must be sorted by offset. */ + assert(pio->offset >= off_last); + off_last = pio->offset; + + /* If we see a hole, break the region. */ + if (off_last > off_high) { + portio_list_add_1(piolist, pio_start, count, start, off_low, + off_high); + /* ... and start collecting anew. */ + pio_start = pio; + off_low = off_last; + off_high = off_low + pio->len + pio_start->size - 1; + count = 0; + } else if (off_last + pio->len > off_high) { + off_high = off_last + pio->len + pio_start->size - 1; + } + } + + /* There will always be an open sub-list. */ + portio_list_add_1(piolist, pio_start, count, start, off_low, off_high); +} + +void portio_list_del(PortioList *piolist) +{ + MemoryRegionPortioList *mrpio; + unsigned i; + + for (i = 0; i < piolist->nr; ++i) { + mrpio = container_of(piolist->regions[i], MemoryRegionPortioList, mr); + memory_region_del_subregion(piolist->address_space, &mrpio->mr); + } +} diff --git a/softmmu/memory.c b/softmmu/memory.c new file mode 100644 index 0000000000..9200b20130 --- /dev/null +++ b/softmmu/memory.c @@ -0,0 +1,3250 @@ +/* + * Physical memory management + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "qapi/visitor.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/qemu-print.h" +#include "qom/object.h" +#include "trace-root.h" + +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "sysemu/tcg.h" +#include "sysemu/accel.h" +#include "hw/boards.h" +#include "migration/vmstate.h" + +//#define DEBUG_UNASSIGNED + +static unsigned memory_region_transaction_depth; +static bool memory_region_update_pending; +static bool ioeventfd_update_pending; +bool global_dirty_log; + +static QTAILQ_HEAD(, MemoryListener) memory_listeners + = QTAILQ_HEAD_INITIALIZER(memory_listeners); + +static QTAILQ_HEAD(, AddressSpace) address_spaces + = QTAILQ_HEAD_INITIALIZER(address_spaces); + +static GHashTable *flat_views; + +typedef struct AddrRange AddrRange; + +/* + * Note that signed integers are needed for negative offsetting in aliases + * (large MemoryRegion::alias_offset). + */ +struct AddrRange { + Int128 start; + Int128 size; +}; + +static AddrRange addrrange_make(Int128 start, Int128 size) +{ + return (AddrRange) { start, size }; +} + +static bool addrrange_equal(AddrRange r1, AddrRange r2) +{ + return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); +} + +static Int128 addrrange_end(AddrRange r) +{ + return int128_add(r.start, r.size); +} + +static AddrRange addrrange_shift(AddrRange range, Int128 delta) +{ + int128_addto(&range.start, delta); + return range; +} + +static bool addrrange_contains(AddrRange range, Int128 addr) +{ + return int128_ge(addr, range.start) + && int128_lt(addr, addrrange_end(range)); +} + +static bool addrrange_intersects(AddrRange r1, AddrRange r2) +{ + return addrrange_contains(r1, r2.start) + || addrrange_contains(r2, r1.start); +} + +static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) +{ + Int128 start = int128_max(r1.start, r2.start); + Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); + return addrrange_make(start, int128_sub(end, start)); +} + +enum ListenerDirection { Forward, Reverse }; + +#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ + do { \ + MemoryListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, ##_args); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, ##_args); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ + do { \ + MemoryListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, _section, ##_args); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, _section, ##_args); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +/* No need to ref/unref .mr, the FlatRange keeps it alive. */ +#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ + do { \ + MemoryRegionSection mrs = section_from_flat_range(fr, \ + address_space_to_flatview(as)); \ + MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ + } while(0) + +struct CoalescedMemoryRange { + AddrRange addr; + QTAILQ_ENTRY(CoalescedMemoryRange) link; +}; + +struct MemoryRegionIoeventfd { + AddrRange addr; + bool match_data; + uint64_t data; + EventNotifier *e; +}; + +static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a, + MemoryRegionIoeventfd *b) +{ + if (int128_lt(a->addr.start, b->addr.start)) { + return true; + } else if (int128_gt(a->addr.start, b->addr.start)) { + return false; + } else if (int128_lt(a->addr.size, b->addr.size)) { + return true; + } else if (int128_gt(a->addr.size, b->addr.size)) { + return false; + } else if (a->match_data < b->match_data) { + return true; + } else if (a->match_data > b->match_data) { + return false; + } else if (a->match_data) { + if (a->data < b->data) { + return true; + } else if (a->data > b->data) { + return false; + } + } + if (a->e < b->e) { + return true; + } else if (a->e > b->e) { + return false; + } + return false; +} + +static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a, + MemoryRegionIoeventfd *b) +{ + return !memory_region_ioeventfd_before(a, b) + && !memory_region_ioeventfd_before(b, a); +} + +/* Range of memory in the global map. Addresses are absolute. */ +struct FlatRange { + MemoryRegion *mr; + hwaddr offset_in_region; + AddrRange addr; + uint8_t dirty_log_mask; + bool romd_mode; + bool readonly; + bool nonvolatile; +}; + +#define FOR_EACH_FLAT_RANGE(var, view) \ + for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) + +static inline MemoryRegionSection +section_from_flat_range(FlatRange *fr, FlatView *fv) +{ + return (MemoryRegionSection) { + .mr = fr->mr, + .fv = fv, + .offset_within_region = fr->offset_in_region, + .size = fr->addr.size, + .offset_within_address_space = int128_get64(fr->addr.start), + .readonly = fr->readonly, + .nonvolatile = fr->nonvolatile, + }; +} + +static bool flatrange_equal(FlatRange *a, FlatRange *b) +{ + return a->mr == b->mr + && addrrange_equal(a->addr, b->addr) + && a->offset_in_region == b->offset_in_region + && a->romd_mode == b->romd_mode + && a->readonly == b->readonly + && a->nonvolatile == b->nonvolatile; +} + +static FlatView *flatview_new(MemoryRegion *mr_root) +{ + FlatView *view; + + view = g_new0(FlatView, 1); + view->ref = 1; + view->root = mr_root; + memory_region_ref(mr_root); + trace_flatview_new(view, mr_root); + + return view; +} + +/* Insert a range into a given position. Caller is responsible for maintaining + * sorting order. + */ +static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) +{ + if (view->nr == view->nr_allocated) { + view->nr_allocated = MAX(2 * view->nr, 10); + view->ranges = g_realloc(view->ranges, + view->nr_allocated * sizeof(*view->ranges)); + } + memmove(view->ranges + pos + 1, view->ranges + pos, + (view->nr - pos) * sizeof(FlatRange)); + view->ranges[pos] = *range; + memory_region_ref(range->mr); + ++view->nr; +} + +static void flatview_destroy(FlatView *view) +{ + int i; + + trace_flatview_destroy(view, view->root); + if (view->dispatch) { + address_space_dispatch_free(view->dispatch); + } + for (i = 0; i < view->nr; i++) { + memory_region_unref(view->ranges[i].mr); + } + g_free(view->ranges); + memory_region_unref(view->root); + g_free(view); +} + +static bool flatview_ref(FlatView *view) +{ + return atomic_fetch_inc_nonzero(&view->ref) > 0; +} + +void flatview_unref(FlatView *view) +{ + if (atomic_fetch_dec(&view->ref) == 1) { + trace_flatview_destroy_rcu(view, view->root); + assert(view->root); + call_rcu(view, flatview_destroy, rcu); + } +} + +static bool can_merge(FlatRange *r1, FlatRange *r2) +{ + return int128_eq(addrrange_end(r1->addr), r2->addr.start) + && r1->mr == r2->mr + && int128_eq(int128_add(int128_make64(r1->offset_in_region), + r1->addr.size), + int128_make64(r2->offset_in_region)) + && r1->dirty_log_mask == r2->dirty_log_mask + && r1->romd_mode == r2->romd_mode + && r1->readonly == r2->readonly + && r1->nonvolatile == r2->nonvolatile; +} + +/* Attempt to simplify a view by merging adjacent ranges */ +static void flatview_simplify(FlatView *view) +{ + unsigned i, j, k; + + i = 0; + while (i < view->nr) { + j = i + 1; + while (j < view->nr + && can_merge(&view->ranges[j-1], &view->ranges[j])) { + int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); + ++j; + } + ++i; + for (k = i; k < j; k++) { + memory_region_unref(view->ranges[k].mr); + } + memmove(&view->ranges[i], &view->ranges[j], + (view->nr - j) * sizeof(view->ranges[j])); + view->nr -= j - i; + } +} + +static bool memory_region_big_endian(MemoryRegion *mr) +{ +#ifdef TARGET_WORDS_BIGENDIAN + return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; +#else + return mr->ops->endianness == DEVICE_BIG_ENDIAN; +#endif +} + +static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) +{ + if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { + switch (op & MO_SIZE) { + case MO_8: + break; + case MO_16: + *data = bswap16(*data); + break; + case MO_32: + *data = bswap32(*data); + break; + case MO_64: + *data = bswap64(*data); + break; + default: + g_assert_not_reached(); + } + } +} + +static inline void memory_region_shift_read_access(uint64_t *value, + signed shift, + uint64_t mask, + uint64_t tmp) +{ + if (shift >= 0) { + *value |= (tmp & mask) << shift; + } else { + *value |= (tmp & mask) >> -shift; + } +} + +static inline uint64_t memory_region_shift_write_access(uint64_t *value, + signed shift, + uint64_t mask) +{ + uint64_t tmp; + + if (shift >= 0) { + tmp = (*value >> shift) & mask; + } else { + tmp = (*value << -shift) & mask; + } + + return tmp; +} + +static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) +{ + MemoryRegion *root; + hwaddr abs_addr = offset; + + abs_addr += mr->addr; + for (root = mr; root->container; ) { + root = root->container; + abs_addr += root->addr; + } + + return abs_addr; +} + +static int get_cpu_index(void) +{ + if (current_cpu) { + return current_cpu->cpu_index; + } + return -1; +} + +static MemTxResult memory_region_read_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp; + + tmp = mr->ops->read(mr->opaque, addr, size); + if (mr->subpage) { + trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); + } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { + hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); + trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); + } + memory_region_shift_read_access(value, shift, mask, tmp); + return MEMTX_OK; +} + +static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp = 0; + MemTxResult r; + + r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); + if (mr->subpage) { + trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); + } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { + hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); + trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); + } + memory_region_shift_read_access(value, shift, mask, tmp); + return r; +} + +static MemTxResult memory_region_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); + + if (mr->subpage) { + trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); + } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { + hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); + trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); + } + mr->ops->write(mr->opaque, addr, tmp, size); + return MEMTX_OK; +} + +static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); + + if (mr->subpage) { + trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); + } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { + hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); + trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); + } + return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); +} + +static MemTxResult access_with_adjusted_size(hwaddr addr, + uint64_t *value, + unsigned size, + unsigned access_size_min, + unsigned access_size_max, + MemTxResult (*access_fn) + (MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs), + MemoryRegion *mr, + MemTxAttrs attrs) +{ + uint64_t access_mask; + unsigned access_size; + unsigned i; + MemTxResult r = MEMTX_OK; + + if (!access_size_min) { + access_size_min = 1; + } + if (!access_size_max) { + access_size_max = 4; + } + + /* FIXME: support unaligned access? */ + access_size = MAX(MIN(size, access_size_max), access_size_min); + access_mask = MAKE_64BIT_MASK(0, access_size * 8); + if (memory_region_big_endian(mr)) { + for (i = 0; i < size; i += access_size) { + r |= access_fn(mr, addr + i, value, access_size, + (size - access_size - i) * 8, access_mask, attrs); + } + } else { + for (i = 0; i < size; i += access_size) { + r |= access_fn(mr, addr + i, value, access_size, i * 8, + access_mask, attrs); + } + } + return r; +} + +static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) +{ + AddressSpace *as; + + while (mr->container) { + mr = mr->container; + } + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + if (mr == as->root) { + return as; + } + } + return NULL; +} + +/* Render a memory region into the global view. Ranges in @view obscure + * ranges in @mr. + */ +static void render_memory_region(FlatView *view, + MemoryRegion *mr, + Int128 base, + AddrRange clip, + bool readonly, + bool nonvolatile) +{ + MemoryRegion *subregion; + unsigned i; + hwaddr offset_in_region; + Int128 remain; + Int128 now; + FlatRange fr; + AddrRange tmp; + + if (!mr->enabled) { + return; + } + + int128_addto(&base, int128_make64(mr->addr)); + readonly |= mr->readonly; + nonvolatile |= mr->nonvolatile; + + tmp = addrrange_make(base, mr->size); + + if (!addrrange_intersects(tmp, clip)) { + return; + } + + clip = addrrange_intersection(tmp, clip); + + if (mr->alias) { + int128_subfrom(&base, int128_make64(mr->alias->addr)); + int128_subfrom(&base, int128_make64(mr->alias_offset)); + render_memory_region(view, mr->alias, base, clip, + readonly, nonvolatile); + return; + } + + /* Render subregions in priority order. */ + QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { + render_memory_region(view, subregion, base, clip, + readonly, nonvolatile); + } + + if (!mr->terminates) { + return; + } + + offset_in_region = int128_get64(int128_sub(clip.start, base)); + base = clip.start; + remain = clip.size; + + fr.mr = mr; + fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); + fr.romd_mode = mr->romd_mode; + fr.readonly = readonly; + fr.nonvolatile = nonvolatile; + + /* Render the region itself into any gaps left by the current view. */ + for (i = 0; i < view->nr && int128_nz(remain); ++i) { + if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { + continue; + } + if (int128_lt(base, view->ranges[i].addr.start)) { + now = int128_min(remain, + int128_sub(view->ranges[i].addr.start, base)); + fr.offset_in_region = offset_in_region; + fr.addr = addrrange_make(base, now); + flatview_insert(view, i, &fr); + ++i; + int128_addto(&base, now); + offset_in_region += int128_get64(now); + int128_subfrom(&remain, now); + } + now = int128_sub(int128_min(int128_add(base, remain), + addrrange_end(view->ranges[i].addr)), + base); + int128_addto(&base, now); + offset_in_region += int128_get64(now); + int128_subfrom(&remain, now); + } + if (int128_nz(remain)) { + fr.offset_in_region = offset_in_region; + fr.addr = addrrange_make(base, remain); + flatview_insert(view, i, &fr); + } +} + +static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) +{ + while (mr->enabled) { + if (mr->alias) { + if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { + /* The alias is included in its entirety. Use it as + * the "real" root, so that we can share more FlatViews. + */ + mr = mr->alias; + continue; + } + } else if (!mr->terminates) { + unsigned int found = 0; + MemoryRegion *child, *next = NULL; + QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { + if (child->enabled) { + if (++found > 1) { + next = NULL; + break; + } + if (!child->addr && int128_ge(mr->size, child->size)) { + /* A child is included in its entirety. If it's the only + * enabled one, use it in the hope of finding an alias down the + * way. This will also let us share FlatViews. + */ + next = child; + } + } + } + if (found == 0) { + return NULL; + } + if (next) { + mr = next; + continue; + } + } + + return mr; + } + + return NULL; +} + +/* Render a memory topology into a list of disjoint absolute ranges. */ +static FlatView *generate_memory_topology(MemoryRegion *mr) +{ + int i; + FlatView *view; + + view = flatview_new(mr); + + if (mr) { + render_memory_region(view, mr, int128_zero(), + addrrange_make(int128_zero(), int128_2_64()), + false, false); + } + flatview_simplify(view); + + view->dispatch = address_space_dispatch_new(view); + for (i = 0; i < view->nr; i++) { + MemoryRegionSection mrs = + section_from_flat_range(&view->ranges[i], view); + flatview_add_to_dispatch(view, &mrs); + } + address_space_dispatch_compact(view->dispatch); + g_hash_table_replace(flat_views, mr, view); + + return view; +} + +static void address_space_add_del_ioeventfds(AddressSpace *as, + MemoryRegionIoeventfd *fds_new, + unsigned fds_new_nb, + MemoryRegionIoeventfd *fds_old, + unsigned fds_old_nb) +{ + unsigned iold, inew; + MemoryRegionIoeventfd *fd; + MemoryRegionSection section; + + /* Generate a symmetric difference of the old and new fd sets, adding + * and deleting as necessary. + */ + + iold = inew = 0; + while (iold < fds_old_nb || inew < fds_new_nb) { + if (iold < fds_old_nb + && (inew == fds_new_nb + || memory_region_ioeventfd_before(&fds_old[iold], + &fds_new[inew]))) { + fd = &fds_old[iold]; + section = (MemoryRegionSection) { + .fv = address_space_to_flatview(as), + .offset_within_address_space = int128_get64(fd->addr.start), + .size = fd->addr.size, + }; + MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, + fd->match_data, fd->data, fd->e); + ++iold; + } else if (inew < fds_new_nb + && (iold == fds_old_nb + || memory_region_ioeventfd_before(&fds_new[inew], + &fds_old[iold]))) { + fd = &fds_new[inew]; + section = (MemoryRegionSection) { + .fv = address_space_to_flatview(as), + .offset_within_address_space = int128_get64(fd->addr.start), + .size = fd->addr.size, + }; + MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, + fd->match_data, fd->data, fd->e); + ++inew; + } else { + ++iold; + ++inew; + } + } +} + +FlatView *address_space_get_flatview(AddressSpace *as) +{ + FlatView *view; + + RCU_READ_LOCK_GUARD(); + do { + view = address_space_to_flatview(as); + /* If somebody has replaced as->current_map concurrently, + * flatview_ref returns false. + */ + } while (!flatview_ref(view)); + return view; +} + +static void address_space_update_ioeventfds(AddressSpace *as) +{ + FlatView *view; + FlatRange *fr; + unsigned ioeventfd_nb = 0; + unsigned ioeventfd_max; + MemoryRegionIoeventfd *ioeventfds; + AddrRange tmp; + unsigned i; + + /* + * It is likely that the number of ioeventfds hasn't changed much, so use + * the previous size as the starting value, with some headroom to avoid + * gratuitous reallocations. + */ + ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4); + ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max); + + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { + tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, + int128_sub(fr->addr.start, + int128_make64(fr->offset_in_region))); + if (addrrange_intersects(fr->addr, tmp)) { + ++ioeventfd_nb; + if (ioeventfd_nb > ioeventfd_max) { + ioeventfd_max = MAX(ioeventfd_max * 2, 4); + ioeventfds = g_realloc(ioeventfds, + ioeventfd_max * sizeof(*ioeventfds)); + } + ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; + ioeventfds[ioeventfd_nb-1].addr = tmp; + } + } + } + + address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, + as->ioeventfds, as->ioeventfd_nb); + + g_free(as->ioeventfds); + as->ioeventfds = ioeventfds; + as->ioeventfd_nb = ioeventfd_nb; + flatview_unref(view); +} + +/* + * Notify the memory listeners about the coalesced IO change events of + * range `cmr'. Only the part that has intersection of the specified + * FlatRange will be sent. + */ +static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, + CoalescedMemoryRange *cmr, bool add) +{ + AddrRange tmp; + + tmp = addrrange_shift(cmr->addr, + int128_sub(fr->addr.start, + int128_make64(fr->offset_in_region))); + if (!addrrange_intersects(tmp, fr->addr)) { + return; + } + tmp = addrrange_intersection(tmp, fr->addr); + + if (add) { + MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, + int128_get64(tmp.start), + int128_get64(tmp.size)); + } else { + MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, + int128_get64(tmp.start), + int128_get64(tmp.size)); + } +} + +static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) +{ + CoalescedMemoryRange *cmr; + + QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { + flat_range_coalesced_io_notify(fr, as, cmr, false); + } +} + +static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) +{ + MemoryRegion *mr = fr->mr; + CoalescedMemoryRange *cmr; + + if (QTAILQ_EMPTY(&mr->coalesced)) { + return; + } + + QTAILQ_FOREACH(cmr, &mr->coalesced, link) { + flat_range_coalesced_io_notify(fr, as, cmr, true); + } +} + +static void address_space_update_topology_pass(AddressSpace *as, + const FlatView *old_view, + const FlatView *new_view, + bool adding) +{ + unsigned iold, inew; + FlatRange *frold, *frnew; + + /* Generate a symmetric difference of the old and new memory maps. + * Kill ranges in the old map, and instantiate ranges in the new map. + */ + iold = inew = 0; + while (iold < old_view->nr || inew < new_view->nr) { + if (iold < old_view->nr) { + frold = &old_view->ranges[iold]; + } else { + frold = NULL; + } + if (inew < new_view->nr) { + frnew = &new_view->ranges[inew]; + } else { + frnew = NULL; + } + + if (frold + && (!frnew + || int128_lt(frold->addr.start, frnew->addr.start) + || (int128_eq(frold->addr.start, frnew->addr.start) + && !flatrange_equal(frold, frnew)))) { + /* In old but not in new, or in both but attributes changed. */ + + if (!adding) { + flat_range_coalesced_io_del(frold, as); + MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); + } + + ++iold; + } else if (frold && frnew && flatrange_equal(frold, frnew)) { + /* In both and unchanged (except logging may have changed) */ + + if (adding) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); + if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, + frold->dirty_log_mask, + frnew->dirty_log_mask); + } + if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, + frold->dirty_log_mask, + frnew->dirty_log_mask); + } + } + + ++iold; + ++inew; + } else { + /* In new */ + + if (adding) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); + flat_range_coalesced_io_add(frnew, as); + } + + ++inew; + } + } +} + +static void flatviews_init(void) +{ + static FlatView *empty_view; + + if (flat_views) { + return; + } + + flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, + (GDestroyNotify) flatview_unref); + if (!empty_view) { + empty_view = generate_memory_topology(NULL); + /* We keep it alive forever in the global variable. */ + flatview_ref(empty_view); + } else { + g_hash_table_replace(flat_views, NULL, empty_view); + flatview_ref(empty_view); + } +} + +static void flatviews_reset(void) +{ + AddressSpace *as; + + if (flat_views) { + g_hash_table_unref(flat_views); + flat_views = NULL; + } + flatviews_init(); + + /* Render unique FVs */ + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + MemoryRegion *physmr = memory_region_get_flatview_root(as->root); + + if (g_hash_table_lookup(flat_views, physmr)) { + continue; + } + + generate_memory_topology(physmr); + } +} + +static void address_space_set_flatview(AddressSpace *as) +{ + FlatView *old_view = address_space_to_flatview(as); + MemoryRegion *physmr = memory_region_get_flatview_root(as->root); + FlatView *new_view = g_hash_table_lookup(flat_views, physmr); + + assert(new_view); + + if (old_view == new_view) { + return; + } + + if (old_view) { + flatview_ref(old_view); + } + + flatview_ref(new_view); + + if (!QTAILQ_EMPTY(&as->listeners)) { + FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; + + if (!old_view2) { + old_view2 = &tmpview; + } + address_space_update_topology_pass(as, old_view2, new_view, false); + address_space_update_topology_pass(as, old_view2, new_view, true); + } + + /* Writes are protected by the BQL. */ + atomic_rcu_set(&as->current_map, new_view); + if (old_view) { + flatview_unref(old_view); + } + + /* Note that all the old MemoryRegions are still alive up to this + * point. This relieves most MemoryListeners from the need to + * ref/unref the MemoryRegions they get---unless they use them + * outside the iothread mutex, in which case precise reference + * counting is necessary. + */ + if (old_view) { + flatview_unref(old_view); + } +} + +static void address_space_update_topology(AddressSpace *as) +{ + MemoryRegion *physmr = memory_region_get_flatview_root(as->root); + + flatviews_init(); + if (!g_hash_table_lookup(flat_views, physmr)) { + generate_memory_topology(physmr); + } + address_space_set_flatview(as); +} + +void memory_region_transaction_begin(void) +{ + qemu_flush_coalesced_mmio_buffer(); + ++memory_region_transaction_depth; +} + +void memory_region_transaction_commit(void) +{ + AddressSpace *as; + + assert(memory_region_transaction_depth); + assert(qemu_mutex_iothread_locked()); + + --memory_region_transaction_depth; + if (!memory_region_transaction_depth) { + if (memory_region_update_pending) { + flatviews_reset(); + + MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); + + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + address_space_set_flatview(as); + address_space_update_ioeventfds(as); + } + memory_region_update_pending = false; + ioeventfd_update_pending = false; + MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); + } else if (ioeventfd_update_pending) { + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + address_space_update_ioeventfds(as); + } + ioeventfd_update_pending = false; + } + } +} + +static void memory_region_destructor_none(MemoryRegion *mr) +{ +} + +static void memory_region_destructor_ram(MemoryRegion *mr) +{ + qemu_ram_free(mr->ram_block); +} + +static bool memory_region_need_escape(char c) +{ + return c == '/' || c == '[' || c == '\\' || c == ']'; +} + +static char *memory_region_escape_name(const char *name) +{ + const char *p; + char *escaped, *q; + uint8_t c; + size_t bytes = 0; + + for (p = name; *p; p++) { + bytes += memory_region_need_escape(*p) ? 4 : 1; + } + if (bytes == p - name) { + return g_memdup(name, bytes + 1); + } + + escaped = g_malloc(bytes + 1); + for (p = name, q = escaped; *p; p++) { + c = *p; + if (unlikely(memory_region_need_escape(c))) { + *q++ = '\\'; + *q++ = 'x'; + *q++ = "0123456789abcdef"[c >> 4]; + c = "0123456789abcdef"[c & 15]; + } + *q++ = c; + } + *q = 0; + return escaped; +} + +static void memory_region_do_init(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size) +{ + mr->size = int128_make64(size); + if (size == UINT64_MAX) { + mr->size = int128_2_64(); + } + mr->name = g_strdup(name); + mr->owner = owner; + mr->ram_block = NULL; + + if (name) { + char *escaped_name = memory_region_escape_name(name); + char *name_array = g_strdup_printf("%s[*]", escaped_name); + + if (!owner) { + owner = container_get(qdev_get_machine(), "/unattached"); + } + + object_property_add_child(owner, name_array, OBJECT(mr)); + object_unref(OBJECT(mr)); + g_free(name_array); + g_free(escaped_name); + } +} + +void memory_region_init(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size) +{ + object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); + memory_region_do_init(mr, owner, name, size); +} + +static void memory_region_get_container(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + char *path = (char *)""; + + if (mr->container) { + path = object_get_canonical_path(OBJECT(mr->container)); + } + visit_type_str(v, name, &path, errp); + if (mr->container) { + g_free(path); + } +} + +static Object *memory_region_resolve_container(Object *obj, void *opaque, + const char *part) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + + return OBJECT(mr->container); +} + +static void memory_region_get_priority(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + int32_t value = mr->priority; + + visit_type_int32(v, name, &value, errp); +} + +static void memory_region_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + uint64_t value = memory_region_size(mr); + + visit_type_uint64(v, name, &value, errp); +} + +static void memory_region_initfn(Object *obj) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + ObjectProperty *op; + + mr->ops = &unassigned_mem_ops; + mr->enabled = true; + mr->romd_mode = true; + mr->global_locking = true; + mr->destructor = memory_region_destructor_none; + QTAILQ_INIT(&mr->subregions); + QTAILQ_INIT(&mr->coalesced); + + op = object_property_add(OBJECT(mr), "container", + "link<" TYPE_MEMORY_REGION ">", + memory_region_get_container, + NULL, /* memory_region_set_container */ + NULL, NULL); + op->resolve = memory_region_resolve_container; + + object_property_add_uint64_ptr(OBJECT(mr), "addr", + &mr->addr, OBJ_PROP_FLAG_READ); + object_property_add(OBJECT(mr), "priority", "uint32", + memory_region_get_priority, + NULL, /* memory_region_set_priority */ + NULL, NULL); + object_property_add(OBJECT(mr), "size", "uint64", + memory_region_get_size, + NULL, /* memory_region_set_size, */ + NULL, NULL); +} + +static void iommu_memory_region_initfn(Object *obj) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + + mr->is_iommu = true; +} + +static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); +#endif + return 0; +} + +static void unassigned_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); +#endif +} + +static bool unassigned_mem_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return false; +} + +const MemoryRegionOps unassigned_mem_ops = { + .valid.accepts = unassigned_mem_accepts, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t memory_region_ram_device_read(void *opaque, + hwaddr addr, unsigned size) +{ + MemoryRegion *mr = opaque; + uint64_t data = (uint64_t)~0; + + switch (size) { + case 1: + data = *(uint8_t *)(mr->ram_block->host + addr); + break; + case 2: + data = *(uint16_t *)(mr->ram_block->host + addr); + break; + case 4: + data = *(uint32_t *)(mr->ram_block->host + addr); + break; + case 8: + data = *(uint64_t *)(mr->ram_block->host + addr); + break; + } + + trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); + + return data; +} + +static void memory_region_ram_device_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + MemoryRegion *mr = opaque; + + trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); + + switch (size) { + case 1: + *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; + break; + case 2: + *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; + break; + case 4: + *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; + break; + case 8: + *(uint64_t *)(mr->ram_block->host + addr) = data; + break; + } +} + +static const MemoryRegionOps ram_device_mem_ops = { + .read = memory_region_ram_device_read, + .write = memory_region_ram_device_write, + .endianness = DEVICE_HOST_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, +}; + +bool memory_region_access_valid(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool is_write, + MemTxAttrs attrs) +{ + if (mr->ops->valid.accepts + && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { + return false; + } + + if (!mr->ops->valid.unaligned && (addr & (size - 1))) { + return false; + } + + /* Treat zero as compatibility all valid */ + if (!mr->ops->valid.max_access_size) { + return true; + } + + if (size > mr->ops->valid.max_access_size + || size < mr->ops->valid.min_access_size) { + return false; + } + return true; +} + +static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size, + MemTxAttrs attrs) +{ + *pval = 0; + + if (mr->ops->read) { + return access_with_adjusted_size(addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_accessor, + mr, attrs); + } else { + return access_with_adjusted_size(addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_with_attrs_accessor, + mr, attrs); + } +} + +MemTxResult memory_region_dispatch_read(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + MemOp op, + MemTxAttrs attrs) +{ + unsigned size = memop_size(op); + MemTxResult r; + + if (!memory_region_access_valid(mr, addr, size, false, attrs)) { + *pval = unassigned_mem_read(mr, addr, size); + return MEMTX_DECODE_ERROR; + } + + r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); + adjust_endianness(mr, pval, op); + return r; +} + +/* Return true if an eventfd was signalled */ +static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs) +{ + MemoryRegionIoeventfd ioeventfd = { + .addr = addrrange_make(int128_make64(addr), int128_make64(size)), + .data = data, + }; + unsigned i; + + for (i = 0; i < mr->ioeventfd_nb; i++) { + ioeventfd.match_data = mr->ioeventfds[i].match_data; + ioeventfd.e = mr->ioeventfds[i].e; + + if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) { + event_notifier_set(ioeventfd.e); + return true; + } + } + + return false; +} + +MemTxResult memory_region_dispatch_write(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + MemOp op, + MemTxAttrs attrs) +{ + unsigned size = memop_size(op); + + if (!memory_region_access_valid(mr, addr, size, true, attrs)) { + unassigned_mem_write(mr, addr, data, size); + return MEMTX_DECODE_ERROR; + } + + adjust_endianness(mr, &data, op); + + if ((!kvm_eventfds_enabled()) && + memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { + return MEMTX_OK; + } + + if (mr->ops->write) { + return access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr, + attrs); + } else { + return + access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_with_attrs_accessor, + mr, attrs); + } +} + +void memory_region_init_io(MemoryRegion *mr, + Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size) +{ + memory_region_init(mr, owner, name, size); + mr->ops = ops ? ops : &unassigned_mem_ops; + mr->opaque = opaque; + mr->terminates = true; +} + +void memory_region_init_ram_nomigrate(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + Error **errp) +{ + memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); +} + +void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + bool share, + Error **errp) +{ + Error *err = NULL; + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_block = qemu_ram_alloc(size, share, mr, &err); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } +} + +void memory_region_init_resizeable_ram(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + uint64_t max_size, + void (*resized)(const char*, + uint64_t length, + void *host), + Error **errp) +{ + Error *err = NULL; + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, + mr, &err); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } +} + +#ifdef CONFIG_POSIX +void memory_region_init_ram_from_file(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + uint64_t align, + uint32_t ram_flags, + const char *path, + Error **errp) +{ + Error *err = NULL; + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->align = align; + mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } +} + +void memory_region_init_ram_from_fd(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + bool share, + int fd, + Error **errp) +{ + Error *err = NULL; + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_block = qemu_ram_alloc_from_fd(size, mr, + share ? RAM_SHARED : 0, + fd, &err); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } +} +#endif + +void memory_region_init_ram_ptr(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + void *ptr) +{ + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + + /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ + assert(ptr != NULL); + mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); +} + +void memory_region_init_ram_device_ptr(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + void *ptr) +{ + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->ram_device = true; + mr->ops = &ram_device_mem_ops; + mr->opaque = mr; + mr->destructor = memory_region_destructor_ram; + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ + assert(ptr != NULL); + mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); +} + +void memory_region_init_alias(MemoryRegion *mr, + Object *owner, + const char *name, + MemoryRegion *orig, + hwaddr offset, + uint64_t size) +{ + memory_region_init(mr, owner, name, size); + mr->alias = orig; + mr->alias_offset = offset; +} + +void memory_region_init_rom_nomigrate(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + Error **errp) +{ + memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); + mr->readonly = true; +} + +void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, + Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size, + Error **errp) +{ + Error *err = NULL; + assert(ops); + memory_region_init(mr, owner, name, size); + mr->ops = ops; + mr->opaque = opaque; + mr->terminates = true; + mr->rom_device = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_block = qemu_ram_alloc(size, false, mr, &err); + if (err) { + mr->size = int128_zero(); + object_unparent(OBJECT(mr)); + error_propagate(errp, err); + } +} + +void memory_region_init_iommu(void *_iommu_mr, + size_t instance_size, + const char *mrtypename, + Object *owner, + const char *name, + uint64_t size) +{ + struct IOMMUMemoryRegion *iommu_mr; + struct MemoryRegion *mr; + + object_initialize(_iommu_mr, instance_size, mrtypename); + mr = MEMORY_REGION(_iommu_mr); + memory_region_do_init(mr, owner, name, size); + iommu_mr = IOMMU_MEMORY_REGION(mr); + mr->terminates = true; /* then re-forwards */ + QLIST_INIT(&iommu_mr->iommu_notify); + iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; +} + +static void memory_region_finalize(Object *obj) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + + assert(!mr->container); + + /* We know the region is not visible in any address space (it + * does not have a container and cannot be a root either because + * it has no references, so we can blindly clear mr->enabled. + * memory_region_set_enabled instead could trigger a transaction + * and cause an infinite loop. + */ + mr->enabled = false; + memory_region_transaction_begin(); + while (!QTAILQ_EMPTY(&mr->subregions)) { + MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); + memory_region_del_subregion(mr, subregion); + } + memory_region_transaction_commit(); + + mr->destructor(mr); + memory_region_clear_coalescing(mr); + g_free((char *)mr->name); + g_free(mr->ioeventfds); +} + +Object *memory_region_owner(MemoryRegion *mr) +{ + Object *obj = OBJECT(mr); + return obj->parent; +} + +void memory_region_ref(MemoryRegion *mr) +{ + /* MMIO callbacks most likely will access data that belongs + * to the owner, hence the need to ref/unref the owner whenever + * the memory region is in use. + * + * The memory region is a child of its owner. As long as the + * owner doesn't call unparent itself on the memory region, + * ref-ing the owner will also keep the memory region alive. + * Memory regions without an owner are supposed to never go away; + * we do not ref/unref them because it slows down DMA sensibly. + */ + if (mr && mr->owner) { + object_ref(mr->owner); + } +} + +void memory_region_unref(MemoryRegion *mr) +{ + if (mr && mr->owner) { + object_unref(mr->owner); + } +} + +uint64_t memory_region_size(MemoryRegion *mr) +{ + if (int128_eq(mr->size, int128_2_64())) { + return UINT64_MAX; + } + return int128_get64(mr->size); +} + +const char *memory_region_name(const MemoryRegion *mr) +{ + if (!mr->name) { + ((MemoryRegion *)mr)->name = + object_get_canonical_path_component(OBJECT(mr)); + } + return mr->name; +} + +bool memory_region_is_ram_device(MemoryRegion *mr) +{ + return mr->ram_device; +} + +uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) +{ + uint8_t mask = mr->dirty_log_mask; + if (global_dirty_log && mr->ram_block) { + mask |= (1 << DIRTY_MEMORY_MIGRATION); + } + return mask; +} + +bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) +{ + return memory_region_get_dirty_log_mask(mr) & (1 << client); +} + +static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr, + Error **errp) +{ + IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; + IOMMUNotifier *iommu_notifier; + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + int ret = 0; + + IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { + flags |= iommu_notifier->notifier_flags; + } + + if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { + ret = imrc->notify_flag_changed(iommu_mr, + iommu_mr->iommu_notify_flags, + flags, errp); + } + + if (!ret) { + iommu_mr->iommu_notify_flags = flags; + } + return ret; +} + +int memory_region_register_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n, Error **errp) +{ + IOMMUMemoryRegion *iommu_mr; + int ret; + + if (mr->alias) { + return memory_region_register_iommu_notifier(mr->alias, n, errp); + } + + /* We need to register for at least one bitfield */ + iommu_mr = IOMMU_MEMORY_REGION(mr); + assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); + assert(n->start <= n->end); + assert(n->iommu_idx >= 0 && + n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); + + QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); + ret = memory_region_update_iommu_notify_flags(iommu_mr, errp); + if (ret) { + QLIST_REMOVE(n, node); + } + return ret; +} + +uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + + if (imrc->get_min_page_size) { + return imrc->get_min_page_size(iommu_mr); + } + return TARGET_PAGE_SIZE; +} + +void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) +{ + MemoryRegion *mr = MEMORY_REGION(iommu_mr); + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + hwaddr addr, granularity; + IOMMUTLBEntry iotlb; + + /* If the IOMMU has its own replay callback, override */ + if (imrc->replay) { + imrc->replay(iommu_mr, n); + return; + } + + granularity = memory_region_iommu_get_min_page_size(iommu_mr); + + for (addr = 0; addr < memory_region_size(mr); addr += granularity) { + iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); + if (iotlb.perm != IOMMU_NONE) { + n->notify(n, &iotlb); + } + + /* if (2^64 - MR size) < granularity, it's possible to get an + * infinite loop here. This should catch such a wraparound */ + if ((addr + granularity) < addr) { + break; + } + } +} + +void memory_region_unregister_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n) +{ + IOMMUMemoryRegion *iommu_mr; + + if (mr->alias) { + memory_region_unregister_iommu_notifier(mr->alias, n); + return; + } + QLIST_REMOVE(n, node); + iommu_mr = IOMMU_MEMORY_REGION(mr); + memory_region_update_iommu_notify_flags(iommu_mr, NULL); +} + +void memory_region_notify_one(IOMMUNotifier *notifier, + IOMMUTLBEntry *entry) +{ + IOMMUNotifierFlag request_flags; + hwaddr entry_end = entry->iova + entry->addr_mask; + + /* + * Skip the notification if the notification does not overlap + * with registered range. + */ + if (notifier->start > entry_end || notifier->end < entry->iova) { + return; + } + + assert(entry->iova >= notifier->start && entry_end <= notifier->end); + + if (entry->perm & IOMMU_RW) { + request_flags = IOMMU_NOTIFIER_MAP; + } else { + request_flags = IOMMU_NOTIFIER_UNMAP; + } + + if (notifier->notifier_flags & request_flags) { + notifier->notify(notifier, entry); + } +} + +void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, + int iommu_idx, + IOMMUTLBEntry entry) +{ + IOMMUNotifier *iommu_notifier; + + assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); + + IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { + if (iommu_notifier->iommu_idx == iommu_idx) { + memory_region_notify_one(iommu_notifier, &entry); + } + } +} + +int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, + enum IOMMUMemoryRegionAttr attr, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + + if (!imrc->get_attr) { + return -EINVAL; + } + + return imrc->get_attr(iommu_mr, attr, data); +} + +int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, + MemTxAttrs attrs) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + + if (!imrc->attrs_to_index) { + return 0; + } + + return imrc->attrs_to_index(iommu_mr, attrs); +} + +int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + + if (!imrc->num_indexes) { + return 1; + } + + return imrc->num_indexes(iommu_mr); +} + +void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) +{ + uint8_t mask = 1 << client; + uint8_t old_logging; + + assert(client == DIRTY_MEMORY_VGA); + old_logging = mr->vga_logging_count; + mr->vga_logging_count += log ? 1 : -1; + if (!!old_logging == !!mr->vga_logging_count) { + return; + } + + memory_region_transaction_begin(); + mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); + memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(); +} + +void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr size) +{ + assert(mr->ram_block); + cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, + size, + memory_region_get_dirty_log_mask(mr)); +} + +static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) +{ + MemoryListener *listener; + AddressSpace *as; + FlatView *view; + FlatRange *fr; + + /* If the same address space has multiple log_sync listeners, we + * visit that address space's FlatView multiple times. But because + * log_sync listeners are rare, it's still cheaper than walking each + * address space once. + */ + QTAILQ_FOREACH(listener, &memory_listeners, link) { + if (!listener->log_sync) { + continue; + } + as = listener->address_space; + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { + MemoryRegionSection mrs = section_from_flat_range(fr, view); + listener->log_sync(listener, &mrs); + } + } + flatview_unref(view); + } +} + +void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, + hwaddr len) +{ + MemoryRegionSection mrs; + MemoryListener *listener; + AddressSpace *as; + FlatView *view; + FlatRange *fr; + hwaddr sec_start, sec_end, sec_size; + + QTAILQ_FOREACH(listener, &memory_listeners, link) { + if (!listener->log_clear) { + continue; + } + as = listener->address_space; + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + if (!fr->dirty_log_mask || fr->mr != mr) { + /* + * Clear dirty bitmap operation only applies to those + * regions whose dirty logging is at least enabled + */ + continue; + } + + mrs = section_from_flat_range(fr, view); + + sec_start = MAX(mrs.offset_within_region, start); + sec_end = mrs.offset_within_region + int128_get64(mrs.size); + sec_end = MIN(sec_end, start + len); + + if (sec_start >= sec_end) { + /* + * If this memory region section has no intersection + * with the requested range, skip. + */ + continue; + } + + /* Valid case; shrink the section if needed */ + mrs.offset_within_address_space += + sec_start - mrs.offset_within_region; + mrs.offset_within_region = sec_start; + sec_size = sec_end - sec_start; + mrs.size = int128_make64(sec_size); + listener->log_clear(listener, &mrs); + } + flatview_unref(view); + } +} + +DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, + hwaddr addr, + hwaddr size, + unsigned client) +{ + DirtyBitmapSnapshot *snapshot; + assert(mr->ram_block); + memory_region_sync_dirty_bitmap(mr); + snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); + memory_global_after_dirty_log_sync(); + return snapshot; +} + +bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, + hwaddr addr, hwaddr size) +{ + assert(mr->ram_block); + return cpu_physical_memory_snapshot_get_dirty(snap, + memory_region_get_ram_addr(mr) + addr, size); +} + +void memory_region_set_readonly(MemoryRegion *mr, bool readonly) +{ + if (mr->readonly != readonly) { + memory_region_transaction_begin(); + mr->readonly = readonly; + memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(); + } +} + +void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile) +{ + if (mr->nonvolatile != nonvolatile) { + memory_region_transaction_begin(); + mr->nonvolatile = nonvolatile; + memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(); + } +} + +void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) +{ + if (mr->romd_mode != romd_mode) { + memory_region_transaction_begin(); + mr->romd_mode = romd_mode; + memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(); + } +} + +void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr size, unsigned client) +{ + assert(mr->ram_block); + cpu_physical_memory_test_and_clear_dirty( + memory_region_get_ram_addr(mr) + addr, size, client); +} + +int memory_region_get_fd(MemoryRegion *mr) +{ + int fd; + + RCU_READ_LOCK_GUARD(); + while (mr->alias) { + mr = mr->alias; + } + fd = mr->ram_block->fd; + + return fd; +} + +void *memory_region_get_ram_ptr(MemoryRegion *mr) +{ + void *ptr; + uint64_t offset = 0; + + RCU_READ_LOCK_GUARD(); + while (mr->alias) { + offset += mr->alias_offset; + mr = mr->alias; + } + assert(mr->ram_block); + ptr = qemu_map_ram_ptr(mr->ram_block, offset); + + return ptr; +} + +MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) +{ + RAMBlock *block; + + block = qemu_ram_block_from_host(ptr, false, offset); + if (!block) { + return NULL; + } + + return block->mr; +} + +ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) +{ + return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; +} + +void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) +{ + assert(mr->ram_block); + + qemu_ram_resize(mr->ram_block, newsize, errp); +} + +void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size) +{ + if (mr->ram_block) { + qemu_ram_msync(mr->ram_block, addr, size); + } +} + +void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size) +{ + /* + * Might be extended case needed to cover + * different types of memory regions + */ + if (mr->dirty_log_mask) { + memory_region_msync(mr, addr, size); + } +} + +/* + * Call proper memory listeners about the change on the newly + * added/removed CoalescedMemoryRange. + */ +static void memory_region_update_coalesced_range(MemoryRegion *mr, + CoalescedMemoryRange *cmr, + bool add) +{ + AddressSpace *as; + FlatView *view; + FlatRange *fr; + + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + if (fr->mr == mr) { + flat_range_coalesced_io_notify(fr, as, cmr, add); + } + } + flatview_unref(view); + } +} + +void memory_region_set_coalescing(MemoryRegion *mr) +{ + memory_region_clear_coalescing(mr); + memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); +} + +void memory_region_add_coalescing(MemoryRegion *mr, + hwaddr offset, + uint64_t size) +{ + CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); + + cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); + QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); + memory_region_update_coalesced_range(mr, cmr, true); + memory_region_set_flush_coalesced(mr); +} + +void memory_region_clear_coalescing(MemoryRegion *mr) +{ + CoalescedMemoryRange *cmr; + + if (QTAILQ_EMPTY(&mr->coalesced)) { + return; + } + + qemu_flush_coalesced_mmio_buffer(); + mr->flush_coalesced_mmio = false; + + while (!QTAILQ_EMPTY(&mr->coalesced)) { + cmr = QTAILQ_FIRST(&mr->coalesced); + QTAILQ_REMOVE(&mr->coalesced, cmr, link); + memory_region_update_coalesced_range(mr, cmr, false); + g_free(cmr); + } +} + +void memory_region_set_flush_coalesced(MemoryRegion *mr) +{ + mr->flush_coalesced_mmio = true; +} + +void memory_region_clear_flush_coalesced(MemoryRegion *mr) +{ + qemu_flush_coalesced_mmio_buffer(); + if (QTAILQ_EMPTY(&mr->coalesced)) { + mr->flush_coalesced_mmio = false; + } +} + +void memory_region_clear_global_locking(MemoryRegion *mr) +{ + mr->global_locking = false; +} + +static bool userspace_eventfd_warning; + +void memory_region_add_eventfd(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool match_data, + uint64_t data, + EventNotifier *e) +{ + MemoryRegionIoeventfd mrfd = { + .addr.start = int128_make64(addr), + .addr.size = int128_make64(size), + .match_data = match_data, + .data = data, + .e = e, + }; + unsigned i; + + if (kvm_enabled() && (!(kvm_eventfds_enabled() || + userspace_eventfd_warning))) { + userspace_eventfd_warning = true; + error_report("Using eventfd without MMIO binding in KVM. " + "Suboptimal performance expected"); + } + + if (size) { + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); + } + memory_region_transaction_begin(); + for (i = 0; i < mr->ioeventfd_nb; ++i) { + if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) { + break; + } + } + ++mr->ioeventfd_nb; + mr->ioeventfds = g_realloc(mr->ioeventfds, + sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); + memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], + sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); + mr->ioeventfds[i] = mrfd; + ioeventfd_update_pending |= mr->enabled; + memory_region_transaction_commit(); +} + +void memory_region_del_eventfd(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool match_data, + uint64_t data, + EventNotifier *e) +{ + MemoryRegionIoeventfd mrfd = { + .addr.start = int128_make64(addr), + .addr.size = int128_make64(size), + .match_data = match_data, + .data = data, + .e = e, + }; + unsigned i; + + if (size) { + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); + } + memory_region_transaction_begin(); + for (i = 0; i < mr->ioeventfd_nb; ++i) { + if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) { + break; + } + } + assert(i != mr->ioeventfd_nb); + memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], + sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); + --mr->ioeventfd_nb; + mr->ioeventfds = g_realloc(mr->ioeventfds, + sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); + ioeventfd_update_pending |= mr->enabled; + memory_region_transaction_commit(); +} + +static void memory_region_update_container_subregions(MemoryRegion *subregion) +{ + MemoryRegion *mr = subregion->container; + MemoryRegion *other; + + memory_region_transaction_begin(); + + memory_region_ref(subregion); + QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { + if (subregion->priority >= other->priority) { + QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); + goto done; + } + } + QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); +done: + memory_region_update_pending |= mr->enabled && subregion->enabled; + memory_region_transaction_commit(); +} + +static void memory_region_add_subregion_common(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion) +{ + assert(!subregion->container); + subregion->container = mr; + subregion->addr = offset; + memory_region_update_container_subregions(subregion); +} + +void memory_region_add_subregion(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion) +{ + subregion->priority = 0; + memory_region_add_subregion_common(mr, offset, subregion); +} + +void memory_region_add_subregion_overlap(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion, + int priority) +{ + subregion->priority = priority; + memory_region_add_subregion_common(mr, offset, subregion); +} + +void memory_region_del_subregion(MemoryRegion *mr, + MemoryRegion *subregion) +{ + memory_region_transaction_begin(); + assert(subregion->container == mr); + subregion->container = NULL; + QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); + memory_region_unref(subregion); + memory_region_update_pending |= mr->enabled && subregion->enabled; + memory_region_transaction_commit(); +} + +void memory_region_set_enabled(MemoryRegion *mr, bool enabled) +{ + if (enabled == mr->enabled) { + return; + } + memory_region_transaction_begin(); + mr->enabled = enabled; + memory_region_update_pending = true; + memory_region_transaction_commit(); +} + +void memory_region_set_size(MemoryRegion *mr, uint64_t size) +{ + Int128 s = int128_make64(size); + + if (size == UINT64_MAX) { + s = int128_2_64(); + } + if (int128_eq(s, mr->size)) { + return; + } + memory_region_transaction_begin(); + mr->size = s; + memory_region_update_pending = true; + memory_region_transaction_commit(); +} + +static void memory_region_readd_subregion(MemoryRegion *mr) +{ + MemoryRegion *container = mr->container; + + if (container) { + memory_region_transaction_begin(); + memory_region_ref(mr); + memory_region_del_subregion(container, mr); + mr->container = container; + memory_region_update_container_subregions(mr); + memory_region_unref(mr); + memory_region_transaction_commit(); + } +} + +void memory_region_set_address(MemoryRegion *mr, hwaddr addr) +{ + if (addr != mr->addr) { + mr->addr = addr; + memory_region_readd_subregion(mr); + } +} + +void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) +{ + assert(mr->alias); + + if (offset == mr->alias_offset) { + return; + } + + memory_region_transaction_begin(); + mr->alias_offset = offset; + memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(); +} + +uint64_t memory_region_get_alignment(const MemoryRegion *mr) +{ + return mr->align; +} + +static int cmp_flatrange_addr(const void *addr_, const void *fr_) +{ + const AddrRange *addr = addr_; + const FlatRange *fr = fr_; + + if (int128_le(addrrange_end(*addr), fr->addr.start)) { + return -1; + } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { + return 1; + } + return 0; +} + +static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) +{ + return bsearch(&addr, view->ranges, view->nr, + sizeof(FlatRange), cmp_flatrange_addr); +} + +bool memory_region_is_mapped(MemoryRegion *mr) +{ + return mr->container ? true : false; +} + +/* Same as memory_region_find, but it does not add a reference to the + * returned region. It must be called from an RCU critical section. + */ +static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, + hwaddr addr, uint64_t size) +{ + MemoryRegionSection ret = { .mr = NULL }; + MemoryRegion *root; + AddressSpace *as; + AddrRange range; + FlatView *view; + FlatRange *fr; + + addr += mr->addr; + for (root = mr; root->container; ) { + root = root->container; + addr += root->addr; + } + + as = memory_region_to_address_space(root); + if (!as) { + return ret; + } + range = addrrange_make(int128_make64(addr), int128_make64(size)); + + view = address_space_to_flatview(as); + fr = flatview_lookup(view, range); + if (!fr) { + return ret; + } + + while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { + --fr; + } + + ret.mr = fr->mr; + ret.fv = view; + range = addrrange_intersection(range, fr->addr); + ret.offset_within_region = fr->offset_in_region; + ret.offset_within_region += int128_get64(int128_sub(range.start, + fr->addr.start)); + ret.size = range.size; + ret.offset_within_address_space = int128_get64(range.start); + ret.readonly = fr->readonly; + ret.nonvolatile = fr->nonvolatile; + return ret; +} + +MemoryRegionSection memory_region_find(MemoryRegion *mr, + hwaddr addr, uint64_t size) +{ + MemoryRegionSection ret; + RCU_READ_LOCK_GUARD(); + ret = memory_region_find_rcu(mr, addr, size); + if (ret.mr) { + memory_region_ref(ret.mr); + } + return ret; +} + +bool memory_region_present(MemoryRegion *container, hwaddr addr) +{ + MemoryRegion *mr; + + RCU_READ_LOCK_GUARD(); + mr = memory_region_find_rcu(container, addr, 1).mr; + return mr && mr != container; +} + +void memory_global_dirty_log_sync(void) +{ + memory_region_sync_dirty_bitmap(NULL); +} + +void memory_global_after_dirty_log_sync(void) +{ + MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); +} + +static VMChangeStateEntry *vmstate_change; + +void memory_global_dirty_log_start(void) +{ + if (vmstate_change) { + qemu_del_vm_change_state_handler(vmstate_change); + vmstate_change = NULL; + } + + global_dirty_log = true; + + MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); + + /* Refresh DIRTY_MEMORY_MIGRATION bit. */ + memory_region_transaction_begin(); + memory_region_update_pending = true; + memory_region_transaction_commit(); +} + +static void memory_global_dirty_log_do_stop(void) +{ + global_dirty_log = false; + + /* Refresh DIRTY_MEMORY_MIGRATION bit. */ + memory_region_transaction_begin(); + memory_region_update_pending = true; + memory_region_transaction_commit(); + + MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); +} + +static void memory_vm_change_state_handler(void *opaque, int running, + RunState state) +{ + if (running) { + memory_global_dirty_log_do_stop(); + + if (vmstate_change) { + qemu_del_vm_change_state_handler(vmstate_change); + vmstate_change = NULL; + } + } +} + +void memory_global_dirty_log_stop(void) +{ + if (!runstate_is_running()) { + if (vmstate_change) { + return; + } + vmstate_change = qemu_add_vm_change_state_handler( + memory_vm_change_state_handler, NULL); + return; + } + + memory_global_dirty_log_do_stop(); +} + +static void listener_add_address_space(MemoryListener *listener, + AddressSpace *as) +{ + FlatView *view; + FlatRange *fr; + + if (listener->begin) { + listener->begin(listener); + } + if (global_dirty_log) { + if (listener->log_global_start) { + listener->log_global_start(listener); + } + } + + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + MemoryRegionSection section = section_from_flat_range(fr, view); + + if (listener->region_add) { + listener->region_add(listener, §ion); + } + if (fr->dirty_log_mask && listener->log_start) { + listener->log_start(listener, §ion, 0, fr->dirty_log_mask); + } + } + if (listener->commit) { + listener->commit(listener); + } + flatview_unref(view); +} + +static void listener_del_address_space(MemoryListener *listener, + AddressSpace *as) +{ + FlatView *view; + FlatRange *fr; + + if (listener->begin) { + listener->begin(listener); + } + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + MemoryRegionSection section = section_from_flat_range(fr, view); + + if (fr->dirty_log_mask && listener->log_stop) { + listener->log_stop(listener, §ion, fr->dirty_log_mask, 0); + } + if (listener->region_del) { + listener->region_del(listener, §ion); + } + } + if (listener->commit) { + listener->commit(listener); + } + flatview_unref(view); +} + +void memory_listener_register(MemoryListener *listener, AddressSpace *as) +{ + MemoryListener *other = NULL; + + listener->address_space = as; + if (QTAILQ_EMPTY(&memory_listeners) + || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) { + QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); + } else { + QTAILQ_FOREACH(other, &memory_listeners, link) { + if (listener->priority < other->priority) { + break; + } + } + QTAILQ_INSERT_BEFORE(other, listener, link); + } + + if (QTAILQ_EMPTY(&as->listeners) + || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) { + QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); + } else { + QTAILQ_FOREACH(other, &as->listeners, link_as) { + if (listener->priority < other->priority) { + break; + } + } + QTAILQ_INSERT_BEFORE(other, listener, link_as); + } + + listener_add_address_space(listener, as); +} + +void memory_listener_unregister(MemoryListener *listener) +{ + if (!listener->address_space) { + return; + } + + listener_del_address_space(listener, listener->address_space); + QTAILQ_REMOVE(&memory_listeners, listener, link); + QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); + listener->address_space = NULL; +} + +void address_space_remove_listeners(AddressSpace *as) +{ + while (!QTAILQ_EMPTY(&as->listeners)) { + memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); + } +} + +void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) +{ + memory_region_ref(root); + as->root = root; + as->current_map = NULL; + as->ioeventfd_nb = 0; + as->ioeventfds = NULL; + QTAILQ_INIT(&as->listeners); + QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); + as->name = g_strdup(name ? name : "anonymous"); + address_space_update_topology(as); + address_space_update_ioeventfds(as); +} + +static void do_address_space_destroy(AddressSpace *as) +{ + assert(QTAILQ_EMPTY(&as->listeners)); + + flatview_unref(as->current_map); + g_free(as->name); + g_free(as->ioeventfds); + memory_region_unref(as->root); +} + +void address_space_destroy(AddressSpace *as) +{ + MemoryRegion *root = as->root; + + /* Flush out anything from MemoryListeners listening in on this */ + memory_region_transaction_begin(); + as->root = NULL; + memory_region_transaction_commit(); + QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); + + /* At this point, as->dispatch and as->current_map are dummy + * entries that the guest should never use. Wait for the old + * values to expire before freeing the data. + */ + as->root = root; + call_rcu(as, do_address_space_destroy, rcu); +} + +static const char *memory_region_type(MemoryRegion *mr) +{ + if (mr->alias) { + return memory_region_type(mr->alias); + } + if (memory_region_is_ram_device(mr)) { + return "ramd"; + } else if (memory_region_is_romd(mr)) { + return "romd"; + } else if (memory_region_is_rom(mr)) { + return "rom"; + } else if (memory_region_is_ram(mr)) { + return "ram"; + } else { + return "i/o"; + } +} + +typedef struct MemoryRegionList MemoryRegionList; + +struct MemoryRegionList { + const MemoryRegion *mr; + QTAILQ_ENTRY(MemoryRegionList) mrqueue; +}; + +typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead; + +#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ + int128_sub((size), int128_one())) : 0) +#define MTREE_INDENT " " + +static void mtree_expand_owner(const char *label, Object *obj) +{ + DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE); + + qemu_printf(" %s:{%s", label, dev ? "dev" : "obj"); + if (dev && dev->id) { + qemu_printf(" id=%s", dev->id); + } else { + char *canonical_path = object_get_canonical_path(obj); + if (canonical_path) { + qemu_printf(" path=%s", canonical_path); + g_free(canonical_path); + } else { + qemu_printf(" type=%s", object_get_typename(obj)); + } + } + qemu_printf("}"); +} + +static void mtree_print_mr_owner(const MemoryRegion *mr) +{ + Object *owner = mr->owner; + Object *parent = memory_region_owner((MemoryRegion *)mr); + + if (!owner && !parent) { + qemu_printf(" orphan"); + return; + } + if (owner) { + mtree_expand_owner("owner", owner); + } + if (parent && parent != owner) { + mtree_expand_owner("parent", parent); + } +} + +static void mtree_print_mr(const MemoryRegion *mr, unsigned int level, + hwaddr base, + MemoryRegionListHead *alias_print_queue, + bool owner, bool display_disabled) +{ + MemoryRegionList *new_ml, *ml, *next_ml; + MemoryRegionListHead submr_print_queue; + const MemoryRegion *submr; + unsigned int i; + hwaddr cur_start, cur_end; + + if (!mr) { + return; + } + + cur_start = base + mr->addr; + cur_end = cur_start + MR_SIZE(mr->size); + + /* + * Try to detect overflow of memory region. This should never + * happen normally. When it happens, we dump something to warn the + * user who is observing this. + */ + if (cur_start < base || cur_end < cur_start) { + qemu_printf("[DETECTED OVERFLOW!] "); + } + + if (mr->alias) { + MemoryRegionList *ml; + bool found = false; + + /* check if the alias is already in the queue */ + QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { + if (ml->mr == mr->alias) { + found = true; + } + } + + if (!found) { + ml = g_new(MemoryRegionList, 1); + ml->mr = mr->alias; + QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); + } + if (mr->enabled || display_disabled) { + for (i = 0; i < level; i++) { + qemu_printf(MTREE_INDENT); + } + qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx + " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx + "-" TARGET_FMT_plx "%s", + cur_start, cur_end, + mr->priority, + mr->nonvolatile ? "nv-" : "", + memory_region_type((MemoryRegion *)mr), + memory_region_name(mr), + memory_region_name(mr->alias), + mr->alias_offset, + mr->alias_offset + MR_SIZE(mr->size), + mr->enabled ? "" : " [disabled]"); + if (owner) { + mtree_print_mr_owner(mr); + } + qemu_printf("\n"); + } + } else { + if (mr->enabled || display_disabled) { + for (i = 0; i < level; i++) { + qemu_printf(MTREE_INDENT); + } + qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx + " (prio %d, %s%s): %s%s", + cur_start, cur_end, + mr->priority, + mr->nonvolatile ? "nv-" : "", + memory_region_type((MemoryRegion *)mr), + memory_region_name(mr), + mr->enabled ? "" : " [disabled]"); + if (owner) { + mtree_print_mr_owner(mr); + } + qemu_printf("\n"); + } + } + + QTAILQ_INIT(&submr_print_queue); + + QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { + new_ml = g_new(MemoryRegionList, 1); + new_ml->mr = submr; + QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { + if (new_ml->mr->addr < ml->mr->addr || + (new_ml->mr->addr == ml->mr->addr && + new_ml->mr->priority > ml->mr->priority)) { + QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); + new_ml = NULL; + break; + } + } + if (new_ml) { + QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); + } + } + + QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { + mtree_print_mr(ml->mr, level + 1, cur_start, + alias_print_queue, owner, display_disabled); + } + + QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { + g_free(ml); + } +} + +struct FlatViewInfo { + int counter; + bool dispatch_tree; + bool owner; + AccelClass *ac; +}; + +static void mtree_print_flatview(gpointer key, gpointer value, + gpointer user_data) +{ + FlatView *view = key; + GArray *fv_address_spaces = value; + struct FlatViewInfo *fvi = user_data; + FlatRange *range = &view->ranges[0]; + MemoryRegion *mr; + int n = view->nr; + int i; + AddressSpace *as; + + qemu_printf("FlatView #%d\n", fvi->counter); + ++fvi->counter; + + for (i = 0; i < fv_address_spaces->len; ++i) { + as = g_array_index(fv_address_spaces, AddressSpace*, i); + qemu_printf(" AS \"%s\", root: %s", + as->name, memory_region_name(as->root)); + if (as->root->alias) { + qemu_printf(", alias %s", memory_region_name(as->root->alias)); + } + qemu_printf("\n"); + } + + qemu_printf(" Root memory region: %s\n", + view->root ? memory_region_name(view->root) : "(none)"); + + if (n <= 0) { + qemu_printf(MTREE_INDENT "No rendered FlatView\n\n"); + return; + } + + while (n--) { + mr = range->mr; + if (range->offset_in_region) { + qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx + " (prio %d, %s%s): %s @" TARGET_FMT_plx, + int128_get64(range->addr.start), + int128_get64(range->addr.start) + + MR_SIZE(range->addr.size), + mr->priority, + range->nonvolatile ? "nv-" : "", + range->readonly ? "rom" : memory_region_type(mr), + memory_region_name(mr), + range->offset_in_region); + } else { + qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx + " (prio %d, %s%s): %s", + int128_get64(range->addr.start), + int128_get64(range->addr.start) + + MR_SIZE(range->addr.size), + mr->priority, + range->nonvolatile ? "nv-" : "", + range->readonly ? "rom" : memory_region_type(mr), + memory_region_name(mr)); + } + if (fvi->owner) { + mtree_print_mr_owner(mr); + } + + if (fvi->ac) { + for (i = 0; i < fv_address_spaces->len; ++i) { + as = g_array_index(fv_address_spaces, AddressSpace*, i); + if (fvi->ac->has_memory(current_machine, as, + int128_get64(range->addr.start), + MR_SIZE(range->addr.size) + 1)) { + qemu_printf(" %s", fvi->ac->name); + } + } + } + qemu_printf("\n"); + range++; + } + +#if !defined(CONFIG_USER_ONLY) + if (fvi->dispatch_tree && view->root) { + mtree_print_dispatch(view->dispatch, view->root); + } +#endif + + qemu_printf("\n"); +} + +static gboolean mtree_info_flatview_free(gpointer key, gpointer value, + gpointer user_data) +{ + FlatView *view = key; + GArray *fv_address_spaces = value; + + g_array_unref(fv_address_spaces); + flatview_unref(view); + + return true; +} + +void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) +{ + MemoryRegionListHead ml_head; + MemoryRegionList *ml, *ml2; + AddressSpace *as; + + if (flatview) { + FlatView *view; + struct FlatViewInfo fvi = { + .counter = 0, + .dispatch_tree = dispatch_tree, + .owner = owner, + }; + GArray *fv_address_spaces; + GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); + AccelClass *ac = ACCEL_GET_CLASS(current_accel()); + + if (ac->has_memory) { + fvi.ac = ac; + } + + /* Gather all FVs in one table */ + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + view = address_space_get_flatview(as); + + fv_address_spaces = g_hash_table_lookup(views, view); + if (!fv_address_spaces) { + fv_address_spaces = g_array_new(false, false, sizeof(as)); + g_hash_table_insert(views, view, fv_address_spaces); + } + + g_array_append_val(fv_address_spaces, as); + } + + /* Print */ + g_hash_table_foreach(views, mtree_print_flatview, &fvi); + + /* Free */ + g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); + g_hash_table_unref(views); + + return; + } + + QTAILQ_INIT(&ml_head); + + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + qemu_printf("address-space: %s\n", as->name); + mtree_print_mr(as->root, 1, 0, &ml_head, owner, disabled); + qemu_printf("\n"); + } + + /* print aliased regions */ + QTAILQ_FOREACH(ml, &ml_head, mrqueue) { + qemu_printf("memory-region: %s\n", memory_region_name(ml->mr)); + mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled); + qemu_printf("\n"); + } + + QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { + g_free(ml); + } +} + +void memory_region_init_ram(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + Error **errp) +{ + DeviceState *owner_dev; + Error *err = NULL; + + memory_region_init_ram_nomigrate(mr, owner, name, size, &err); + if (err) { + error_propagate(errp, err); + return; + } + /* This will assert if owner is neither NULL nor a DeviceState. + * We only want the owner here for the purposes of defining a + * unique name for migration. TODO: Ideally we should implement + * a naming scheme for Objects which are not DeviceStates, in + * which case we can relax this restriction. + */ + owner_dev = DEVICE(owner); + vmstate_register_ram(mr, owner_dev); +} + +void memory_region_init_rom(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + Error **errp) +{ + DeviceState *owner_dev; + Error *err = NULL; + + memory_region_init_rom_nomigrate(mr, owner, name, size, &err); + if (err) { + error_propagate(errp, err); + return; + } + /* This will assert if owner is neither NULL nor a DeviceState. + * We only want the owner here for the purposes of defining a + * unique name for migration. TODO: Ideally we should implement + * a naming scheme for Objects which are not DeviceStates, in + * which case we can relax this restriction. + */ + owner_dev = DEVICE(owner); + vmstate_register_ram(mr, owner_dev); +} + +void memory_region_init_rom_device(MemoryRegion *mr, + struct Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size, + Error **errp) +{ + DeviceState *owner_dev; + Error *err = NULL; + + memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, + name, size, &err); + if (err) { + error_propagate(errp, err); + return; + } + /* This will assert if owner is neither NULL nor a DeviceState. + * We only want the owner here for the purposes of defining a + * unique name for migration. TODO: Ideally we should implement + * a naming scheme for Objects which are not DeviceStates, in + * which case we can relax this restriction. + */ + owner_dev = DEVICE(owner); + vmstate_register_ram(mr, owner_dev); +} + +static const TypeInfo memory_region_info = { + .parent = TYPE_OBJECT, + .name = TYPE_MEMORY_REGION, + .class_size = sizeof(MemoryRegionClass), + .instance_size = sizeof(MemoryRegion), + .instance_init = memory_region_initfn, + .instance_finalize = memory_region_finalize, +}; + +static const TypeInfo iommu_memory_region_info = { + .parent = TYPE_MEMORY_REGION, + .name = TYPE_IOMMU_MEMORY_REGION, + .class_size = sizeof(IOMMUMemoryRegionClass), + .instance_size = sizeof(IOMMUMemoryRegion), + .instance_init = iommu_memory_region_initfn, + .abstract = true, +}; + +static void memory_register_types(void) +{ + type_register_static(&memory_region_info); + type_register_static(&iommu_memory_region_info); +} + +type_init(memory_register_types) diff --git a/softmmu/memory_mapping.c b/softmmu/memory_mapping.c new file mode 100644 index 0000000000..18d0b8067c --- /dev/null +++ b/softmmu/memory_mapping.c @@ -0,0 +1,357 @@ +/* + * QEMU memory mapping + * + * Copyright Fujitsu, Corp. 2011, 2012 + * + * Authors: + * Wen Congyang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" + +#include "cpu.h" +#include "sysemu/memory_mapping.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" + +//#define DEBUG_GUEST_PHYS_REGION_ADD + +static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list, + MemoryMapping *mapping) +{ + MemoryMapping *p; + + QTAILQ_FOREACH(p, &list->head, next) { + if (p->phys_addr >= mapping->phys_addr) { + QTAILQ_INSERT_BEFORE(p, mapping, next); + return; + } + } + QTAILQ_INSERT_TAIL(&list->head, mapping, next); +} + +static void create_new_memory_mapping(MemoryMappingList *list, + hwaddr phys_addr, + hwaddr virt_addr, + ram_addr_t length) +{ + MemoryMapping *memory_mapping; + + memory_mapping = g_malloc(sizeof(MemoryMapping)); + memory_mapping->phys_addr = phys_addr; + memory_mapping->virt_addr = virt_addr; + memory_mapping->length = length; + list->last_mapping = memory_mapping; + list->num++; + memory_mapping_list_add_mapping_sorted(list, memory_mapping); +} + +static inline bool mapping_contiguous(MemoryMapping *map, + hwaddr phys_addr, + hwaddr virt_addr) +{ + return phys_addr == map->phys_addr + map->length && + virt_addr == map->virt_addr + map->length; +} + +/* + * [map->phys_addr, map->phys_addr + map->length) and + * [phys_addr, phys_addr + length) have intersection? + */ +static inline bool mapping_have_same_region(MemoryMapping *map, + hwaddr phys_addr, + ram_addr_t length) +{ + return !(phys_addr + length < map->phys_addr || + phys_addr >= map->phys_addr + map->length); +} + +/* + * [map->phys_addr, map->phys_addr + map->length) and + * [phys_addr, phys_addr + length) have intersection. The virtual address in the + * intersection are the same? + */ +static inline bool mapping_conflict(MemoryMapping *map, + hwaddr phys_addr, + hwaddr virt_addr) +{ + return virt_addr - map->virt_addr != phys_addr - map->phys_addr; +} + +/* + * [map->virt_addr, map->virt_addr + map->length) and + * [virt_addr, virt_addr + length) have intersection. And the physical address + * in the intersection are the same. + */ +static inline void mapping_merge(MemoryMapping *map, + hwaddr virt_addr, + ram_addr_t length) +{ + if (virt_addr < map->virt_addr) { + map->length += map->virt_addr - virt_addr; + map->virt_addr = virt_addr; + } + + if ((virt_addr + length) > + (map->virt_addr + map->length)) { + map->length = virt_addr + length - map->virt_addr; + } +} + +void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, + hwaddr phys_addr, + hwaddr virt_addr, + ram_addr_t length) +{ + MemoryMapping *memory_mapping, *last_mapping; + + if (QTAILQ_EMPTY(&list->head)) { + create_new_memory_mapping(list, phys_addr, virt_addr, length); + return; + } + + last_mapping = list->last_mapping; + if (last_mapping) { + if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) { + last_mapping->length += length; + return; + } + } + + QTAILQ_FOREACH(memory_mapping, &list->head, next) { + if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) { + memory_mapping->length += length; + list->last_mapping = memory_mapping; + return; + } + + if (phys_addr + length < memory_mapping->phys_addr) { + /* create a new region before memory_mapping */ + break; + } + + if (mapping_have_same_region(memory_mapping, phys_addr, length)) { + if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) { + continue; + } + + /* merge this region into memory_mapping */ + mapping_merge(memory_mapping, virt_addr, length); + list->last_mapping = memory_mapping; + return; + } + } + + /* this region can not be merged into any existed memory mapping. */ + create_new_memory_mapping(list, phys_addr, virt_addr, length); +} + +void memory_mapping_list_free(MemoryMappingList *list) +{ + MemoryMapping *p, *q; + + QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { + QTAILQ_REMOVE(&list->head, p, next); + g_free(p); + } + + list->num = 0; + list->last_mapping = NULL; +} + +void memory_mapping_list_init(MemoryMappingList *list) +{ + list->num = 0; + list->last_mapping = NULL; + QTAILQ_INIT(&list->head); +} + +void guest_phys_blocks_free(GuestPhysBlockList *list) +{ + GuestPhysBlock *p, *q; + + QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { + QTAILQ_REMOVE(&list->head, p, next); + memory_region_unref(p->mr); + g_free(p); + } + list->num = 0; +} + +void guest_phys_blocks_init(GuestPhysBlockList *list) +{ + list->num = 0; + QTAILQ_INIT(&list->head); +} + +typedef struct GuestPhysListener { + GuestPhysBlockList *list; + MemoryListener listener; +} GuestPhysListener; + +static void guest_phys_blocks_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + GuestPhysListener *g; + uint64_t section_size; + hwaddr target_start, target_end; + uint8_t *host_addr; + GuestPhysBlock *predecessor; + + /* we only care about RAM */ + if (!memory_region_is_ram(section->mr) || + memory_region_is_ram_device(section->mr) || + memory_region_is_nonvolatile(section->mr)) { + return; + } + + g = container_of(listener, GuestPhysListener, listener); + section_size = int128_get64(section->size); + target_start = section->offset_within_address_space; + target_end = target_start + section_size; + host_addr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + predecessor = NULL; + + /* find continuity in guest physical address space */ + if (!QTAILQ_EMPTY(&g->list->head)) { + hwaddr predecessor_size; + + predecessor = QTAILQ_LAST(&g->list->head); + predecessor_size = predecessor->target_end - predecessor->target_start; + + /* the memory API guarantees monotonically increasing traversal */ + g_assert(predecessor->target_end <= target_start); + + /* we want continuity in both guest-physical and host-virtual memory */ + if (predecessor->target_end < target_start || + predecessor->host_addr + predecessor_size != host_addr) { + predecessor = NULL; + } + } + + if (predecessor == NULL) { + /* isolated mapping, allocate it and add it to the list */ + GuestPhysBlock *block = g_malloc0(sizeof *block); + + block->target_start = target_start; + block->target_end = target_end; + block->host_addr = host_addr; + block->mr = section->mr; + memory_region_ref(section->mr); + + QTAILQ_INSERT_TAIL(&g->list->head, block, next); + ++g->list->num; + } else { + /* expand predecessor until @target_end; predecessor's start doesn't + * change + */ + predecessor->target_end = target_end; + } + +#ifdef DEBUG_GUEST_PHYS_REGION_ADD + fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end=" + TARGET_FMT_plx ": %s (count: %u)\n", __func__, target_start, + target_end, predecessor ? "joined" : "added", g->list->num); +#endif +} + +void guest_phys_blocks_append(GuestPhysBlockList *list) +{ + GuestPhysListener g = { 0 }; + + g.list = list; + g.listener.region_add = &guest_phys_blocks_region_add; + memory_listener_register(&g.listener, &address_space_memory); + memory_listener_unregister(&g.listener); +} + +static CPUState *find_paging_enabled_cpu(CPUState *start_cpu) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu_paging_enabled(cpu)) { + return cpu; + } + } + + return NULL; +} + +void qemu_get_guest_memory_mapping(MemoryMappingList *list, + const GuestPhysBlockList *guest_phys_blocks, + Error **errp) +{ + CPUState *cpu, *first_paging_enabled_cpu; + GuestPhysBlock *block; + ram_addr_t offset, length; + + first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); + if (first_paging_enabled_cpu) { + for (cpu = first_paging_enabled_cpu; cpu != NULL; + cpu = CPU_NEXT(cpu)) { + Error *err = NULL; + cpu_get_memory_mapping(cpu, list, &err); + if (err) { + error_propagate(errp, err); + return; + } + } + return; + } + + /* + * If the guest doesn't use paging, the virtual address is equal to physical + * address. + */ + QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { + offset = block->target_start; + length = block->target_end - block->target_start; + create_new_memory_mapping(list, offset, offset, length); + } +} + +void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list, + const GuestPhysBlockList *guest_phys_blocks) +{ + GuestPhysBlock *block; + + QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { + create_new_memory_mapping(list, block->target_start, 0, + block->target_end - block->target_start); + } +} + +void memory_mapping_filter(MemoryMappingList *list, int64_t begin, + int64_t length) +{ + MemoryMapping *cur, *next; + + QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) { + if (cur->phys_addr >= begin + length || + cur->phys_addr + cur->length <= begin) { + QTAILQ_REMOVE(&list->head, cur, next); + g_free(cur); + list->num--; + continue; + } + + if (cur->phys_addr < begin) { + cur->length -= begin - cur->phys_addr; + if (cur->virt_addr) { + cur->virt_addr += begin - cur->phys_addr; + } + cur->phys_addr = begin; + } + + if (cur->phys_addr + cur->length > begin + length) { + cur->length -= cur->phys_addr + cur->length - begin - length; + } + } +} diff --git a/softmmu/qtest.c b/softmmu/qtest.c new file mode 100644 index 0000000000..5672b75c35 --- /dev/null +++ b/softmmu/qtest.c @@ -0,0 +1,820 @@ +/* + * Test Server + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "sysemu/qtest.h" +#include "sysemu/runstate.h" +#include "chardev/char-fe.h" +#include "exec/ioport.h" +#include "exec/memory.h" +#include "hw/irq.h" +#include "sysemu/accel.h" +#include "sysemu/cpus.h" +#include "qemu/config-file.h" +#include "qemu/option.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/cutils.h" +#include "config-devices.h" +#ifdef CONFIG_PSERIES +#include "hw/ppc/spapr_rtas.h" +#endif + +#define MAX_IRQ 256 + +bool qtest_allowed; + +static DeviceState *irq_intercept_dev; +static FILE *qtest_log_fp; +static CharBackend qtest_chr; +static GString *inbuf; +static int irq_levels[MAX_IRQ]; +static qemu_timeval start_time; +static bool qtest_opened; +static void (*qtest_server_send)(void*, const char*); +static void *qtest_server_send_opaque; + +#define FMT_timeval "%ld.%06ld" + +/** + * QTest Protocol + * + * Line based protocol, request/response based. Server can send async messages + * so clients should always handle many async messages before the response + * comes in. + * + * Valid requests + * + * Clock management: + * + * The qtest client is completely in charge of the QEMU_CLOCK_VIRTUAL. qtest commands + * let you adjust the value of the clock (monotonically). All the commands + * return the current value of the clock in nanoseconds. + * + * > clock_step + * < OK VALUE + * + * Advance the clock to the next deadline. Useful when waiting for + * asynchronous events. + * + * > clock_step NS + * < OK VALUE + * + * Advance the clock by NS nanoseconds. + * + * > clock_set NS + * < OK VALUE + * + * Advance the clock to NS nanoseconds (do nothing if it's already past). + * + * PIO and memory access: + * + * > outb ADDR VALUE + * < OK + * + * > outw ADDR VALUE + * < OK + * + * > outl ADDR VALUE + * < OK + * + * > inb ADDR + * < OK VALUE + * + * > inw ADDR + * < OK VALUE + * + * > inl ADDR + * < OK VALUE + * + * > writeb ADDR VALUE + * < OK + * + * > writew ADDR VALUE + * < OK + * + * > writel ADDR VALUE + * < OK + * + * > writeq ADDR VALUE + * < OK + * + * > readb ADDR + * < OK VALUE + * + * > readw ADDR + * < OK VALUE + * + * > readl ADDR + * < OK VALUE + * + * > readq ADDR + * < OK VALUE + * + * > read ADDR SIZE + * < OK DATA + * + * > write ADDR SIZE DATA + * < OK + * + * > b64read ADDR SIZE + * < OK B64_DATA + * + * > b64write ADDR SIZE B64_DATA + * < OK + * + * > memset ADDR SIZE VALUE + * < OK + * + * ADDR, SIZE, VALUE are all integers parsed with strtoul() with a base of 0. + * For 'memset' a zero size is permitted and does nothing. + * + * DATA is an arbitrarily long hex number prefixed with '0x'. If it's smaller + * than the expected size, the value will be zero filled at the end of the data + * sequence. + * + * B64_DATA is an arbitrarily long base64 encoded string. + * If the sizes do not match, the data will be truncated. + * + * IRQ management: + * + * > irq_intercept_in QOM-PATH + * < OK + * + * > irq_intercept_out QOM-PATH + * < OK + * + * Attach to the gpio-in (resp. gpio-out) pins exported by the device at + * QOM-PATH. When the pin is triggered, one of the following async messages + * will be printed to the qtest stream: + * + * IRQ raise NUM + * IRQ lower NUM + * + * where NUM is an IRQ number. For the PC, interrupts can be intercepted + * simply with "irq_intercept_in ioapic" (note that IRQ0 comes out with + * NUM=0 even though it is remapped to GSI 2). + * + * Setting interrupt level: + * + * > set_irq_in QOM-PATH NAME NUM LEVEL + * < OK + * + * where NAME is the name of the irq/gpio list, NUM is an IRQ number and + * LEVEL is an signed integer IRQ level. + * + * Forcibly set the given interrupt pin to the given level. + * + */ + +static int hex2nib(char ch) +{ + if (ch >= '0' && ch <= '9') { + return ch - '0'; + } else if (ch >= 'a' && ch <= 'f') { + return 10 + (ch - 'a'); + } else if (ch >= 'A' && ch <= 'F') { + return 10 + (ch - 'A'); + } else { + return -1; + } +} + +static void qtest_get_time(qemu_timeval *tv) +{ + qemu_gettimeofday(tv); + tv->tv_sec -= start_time.tv_sec; + tv->tv_usec -= start_time.tv_usec; + if (tv->tv_usec < 0) { + tv->tv_usec += 1000000; + tv->tv_sec -= 1; + } +} + +static void qtest_send_prefix(CharBackend *chr) +{ + qemu_timeval tv; + + if (!qtest_log_fp || !qtest_opened) { + return; + } + + qtest_get_time(&tv); + fprintf(qtest_log_fp, "[S +" FMT_timeval "] ", + (long) tv.tv_sec, (long) tv.tv_usec); +} + +static void GCC_FMT_ATTR(1, 2) qtest_log_send(const char *fmt, ...) +{ + va_list ap; + + if (!qtest_log_fp || !qtest_opened) { + return; + } + + qtest_send_prefix(NULL); + + va_start(ap, fmt); + vfprintf(qtest_log_fp, fmt, ap); + va_end(ap); +} + +static void qtest_server_char_be_send(void *opaque, const char *str) +{ + size_t len = strlen(str); + CharBackend* chr = (CharBackend *)opaque; + qemu_chr_fe_write_all(chr, (uint8_t *)str, len); + if (qtest_log_fp && qtest_opened) { + fprintf(qtest_log_fp, "%s", str); + } +} + +static void qtest_send(CharBackend *chr, const char *str) +{ + qtest_server_send(qtest_server_send_opaque, str); +} + +static void GCC_FMT_ATTR(2, 3) qtest_sendf(CharBackend *chr, + const char *fmt, ...) +{ + va_list ap; + gchar *buffer; + + va_start(ap, fmt); + buffer = g_strdup_vprintf(fmt, ap); + qtest_send(chr, buffer); + g_free(buffer); + va_end(ap); +} + +static void qtest_irq_handler(void *opaque, int n, int level) +{ + qemu_irq old_irq = *(qemu_irq *)opaque; + qemu_set_irq(old_irq, level); + + if (irq_levels[n] != level) { + CharBackend *chr = &qtest_chr; + irq_levels[n] = level; + qtest_send_prefix(chr); + qtest_sendf(chr, "IRQ %s %d\n", + level ? "raise" : "lower", n); + } +} + +static void qtest_process_command(CharBackend *chr, gchar **words) +{ + const gchar *command; + + g_assert(words); + + command = words[0]; + + if (qtest_log_fp) { + qemu_timeval tv; + int i; + + qtest_get_time(&tv); + fprintf(qtest_log_fp, "[R +" FMT_timeval "]", + (long) tv.tv_sec, (long) tv.tv_usec); + for (i = 0; words[i]; i++) { + fprintf(qtest_log_fp, " %s", words[i]); + } + fprintf(qtest_log_fp, "\n"); + } + + g_assert(command); + if (strcmp(words[0], "irq_intercept_out") == 0 + || strcmp(words[0], "irq_intercept_in") == 0) { + DeviceState *dev; + NamedGPIOList *ngl; + + g_assert(words[1]); + dev = DEVICE(object_resolve_path(words[1], NULL)); + if (!dev) { + qtest_send_prefix(chr); + qtest_send(chr, "FAIL Unknown device\n"); + return; + } + + if (irq_intercept_dev) { + qtest_send_prefix(chr); + if (irq_intercept_dev != dev) { + qtest_send(chr, "FAIL IRQ intercept already enabled\n"); + } else { + qtest_send(chr, "OK\n"); + } + return; + } + + QLIST_FOREACH(ngl, &dev->gpios, node) { + /* We don't support intercept of named GPIOs yet */ + if (ngl->name) { + continue; + } + if (words[0][14] == 'o') { + int i; + for (i = 0; i < ngl->num_out; ++i) { + qemu_irq *disconnected = g_new0(qemu_irq, 1); + qemu_irq icpt = qemu_allocate_irq(qtest_irq_handler, + disconnected, i); + + *disconnected = qdev_intercept_gpio_out(dev, icpt, + ngl->name, i); + } + } else { + qemu_irq_intercept_in(ngl->in, qtest_irq_handler, + ngl->num_in); + } + } + irq_intercept_dev = dev; + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "set_irq_in") == 0) { + DeviceState *dev; + qemu_irq irq; + char *name; + int ret; + int num; + int level; + + g_assert(words[1] && words[2] && words[3] && words[4]); + + dev = DEVICE(object_resolve_path(words[1], NULL)); + if (!dev) { + qtest_send_prefix(chr); + qtest_send(chr, "FAIL Unknown device\n"); + return; + } + + if (strcmp(words[2], "unnamed-gpio-in") == 0) { + name = NULL; + } else { + name = words[2]; + } + + ret = qemu_strtoi(words[3], NULL, 0, &num); + g_assert(!ret); + ret = qemu_strtoi(words[4], NULL, 0, &level); + g_assert(!ret); + + irq = qdev_get_gpio_in_named(dev, name, num); + + qemu_set_irq(irq, level); + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "outb") == 0 || + strcmp(words[0], "outw") == 0 || + strcmp(words[0], "outl") == 0) { + unsigned long addr; + unsigned long value; + int ret; + + g_assert(words[1] && words[2]); + ret = qemu_strtoul(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtoul(words[2], NULL, 0, &value); + g_assert(ret == 0); + g_assert(addr <= 0xffff); + + if (words[0][3] == 'b') { + cpu_outb(addr, value); + } else if (words[0][3] == 'w') { + cpu_outw(addr, value); + } else if (words[0][3] == 'l') { + cpu_outl(addr, value); + } + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "inb") == 0 || + strcmp(words[0], "inw") == 0 || + strcmp(words[0], "inl") == 0) { + unsigned long addr; + uint32_t value = -1U; + int ret; + + g_assert(words[1]); + ret = qemu_strtoul(words[1], NULL, 0, &addr); + g_assert(ret == 0); + g_assert(addr <= 0xffff); + + if (words[0][2] == 'b') { + value = cpu_inb(addr); + } else if (words[0][2] == 'w') { + value = cpu_inw(addr); + } else if (words[0][2] == 'l') { + value = cpu_inl(addr); + } + qtest_send_prefix(chr); + qtest_sendf(chr, "OK 0x%04x\n", value); + } else if (strcmp(words[0], "writeb") == 0 || + strcmp(words[0], "writew") == 0 || + strcmp(words[0], "writel") == 0 || + strcmp(words[0], "writeq") == 0) { + uint64_t addr; + uint64_t value; + int ret; + + g_assert(words[1] && words[2]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtou64(words[2], NULL, 0, &value); + g_assert(ret == 0); + + if (words[0][5] == 'b') { + uint8_t data = value; + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 1); + } else if (words[0][5] == 'w') { + uint16_t data = value; + tswap16s(&data); + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 2); + } else if (words[0][5] == 'l') { + uint32_t data = value; + tswap32s(&data); + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 4); + } else if (words[0][5] == 'q') { + uint64_t data = value; + tswap64s(&data); + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 8); + } + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "readb") == 0 || + strcmp(words[0], "readw") == 0 || + strcmp(words[0], "readl") == 0 || + strcmp(words[0], "readq") == 0) { + uint64_t addr; + uint64_t value = UINT64_C(-1); + int ret; + + g_assert(words[1]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + + if (words[0][4] == 'b') { + uint8_t data; + address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 1); + value = data; + } else if (words[0][4] == 'w') { + uint16_t data; + address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 2); + value = tswap16(data); + } else if (words[0][4] == 'l') { + uint32_t data; + address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &data, 4); + value = tswap32(data); + } else if (words[0][4] == 'q') { + address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + &value, 8); + tswap64s(&value); + } + qtest_send_prefix(chr); + qtest_sendf(chr, "OK 0x%016" PRIx64 "\n", value); + } else if (strcmp(words[0], "read") == 0) { + uint64_t addr, len, i; + uint8_t *data; + char *enc; + int ret; + + g_assert(words[1] && words[2]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtou64(words[2], NULL, 0, &len); + g_assert(ret == 0); + /* We'd send garbage to libqtest if len is 0 */ + g_assert(len); + + data = g_malloc(len); + address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, + len); + + enc = g_malloc(2 * len + 1); + for (i = 0; i < len; i++) { + sprintf(&enc[i * 2], "%02x", data[i]); + } + + qtest_send_prefix(chr); + qtest_sendf(chr, "OK 0x%s\n", enc); + + g_free(data); + g_free(enc); + } else if (strcmp(words[0], "b64read") == 0) { + uint64_t addr, len; + uint8_t *data; + gchar *b64_data; + int ret; + + g_assert(words[1] && words[2]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtou64(words[2], NULL, 0, &len); + g_assert(ret == 0); + + data = g_malloc(len); + address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, + len); + b64_data = g_base64_encode(data, len); + qtest_send_prefix(chr); + qtest_sendf(chr, "OK %s\n", b64_data); + + g_free(data); + g_free(b64_data); + } else if (strcmp(words[0], "write") == 0) { + uint64_t addr, len, i; + uint8_t *data; + size_t data_len; + int ret; + + g_assert(words[1] && words[2] && words[3]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtou64(words[2], NULL, 0, &len); + g_assert(ret == 0); + + data_len = strlen(words[3]); + if (data_len < 3) { + qtest_send(chr, "ERR invalid argument size\n"); + return; + } + + data = g_malloc(len); + for (i = 0; i < len; i++) { + if ((i * 2 + 4) <= data_len) { + data[i] = hex2nib(words[3][i * 2 + 2]) << 4; + data[i] |= hex2nib(words[3][i * 2 + 3]); + } else { + data[i] = 0; + } + } + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, + len); + g_free(data); + + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "memset") == 0) { + uint64_t addr, len; + uint8_t *data; + unsigned long pattern; + int ret; + + g_assert(words[1] && words[2] && words[3]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtou64(words[2], NULL, 0, &len); + g_assert(ret == 0); + ret = qemu_strtoul(words[3], NULL, 0, &pattern); + g_assert(ret == 0); + + if (len) { + data = g_malloc(len); + memset(data, pattern, len); + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, + data, len); + g_free(data); + } + + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "b64write") == 0) { + uint64_t addr, len; + uint8_t *data; + size_t data_len; + gsize out_len; + int ret; + + g_assert(words[1] && words[2] && words[3]); + ret = qemu_strtou64(words[1], NULL, 0, &addr); + g_assert(ret == 0); + ret = qemu_strtou64(words[2], NULL, 0, &len); + g_assert(ret == 0); + + data_len = strlen(words[3]); + if (data_len < 3) { + qtest_send(chr, "ERR invalid argument size\n"); + return; + } + + data = g_base64_decode_inplace(words[3], &out_len); + if (out_len != len) { + qtest_log_send("b64write: data length mismatch (told %"PRIu64", " + "found %zu)\n", + len, out_len); + out_len = MIN(out_len, len); + } + + address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, + len); + + qtest_send_prefix(chr); + qtest_send(chr, "OK\n"); + } else if (strcmp(words[0], "endianness") == 0) { + qtest_send_prefix(chr); +#if defined(TARGET_WORDS_BIGENDIAN) + qtest_sendf(chr, "OK big\n"); +#else + qtest_sendf(chr, "OK little\n"); +#endif +#ifdef CONFIG_PSERIES + } else if (strcmp(words[0], "rtas") == 0) { + uint64_t res, args, ret; + unsigned long nargs, nret; + int rc; + + rc = qemu_strtoul(words[2], NULL, 0, &nargs); + g_assert(rc == 0); + rc = qemu_strtou64(words[3], NULL, 0, &args); + g_assert(rc == 0); + rc = qemu_strtoul(words[4], NULL, 0, &nret); + g_assert(rc == 0); + rc = qemu_strtou64(words[5], NULL, 0, &ret); + g_assert(rc == 0); + res = qtest_rtas_call(words[1], nargs, args, nret, ret); + + qtest_send_prefix(chr); + qtest_sendf(chr, "OK %"PRIu64"\n", res); +#endif + } else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) { + int64_t ns; + + if (words[1]) { + int ret = qemu_strtoi64(words[1], NULL, 0, &ns); + g_assert(ret == 0); + } else { + ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, + QEMU_TIMER_ATTR_ALL); + } + qtest_clock_warp(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns); + qtest_send_prefix(chr); + qtest_sendf(chr, "OK %"PRIi64"\n", + (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else if (strcmp(words[0], "module_load") == 0) { + g_assert(words[1] && words[2]); + + qtest_send_prefix(chr); + if (module_load_one(words[1], words[2])) { + qtest_sendf(chr, "OK\n"); + } else { + qtest_sendf(chr, "FAIL\n"); + } + } else if (qtest_enabled() && strcmp(words[0], "clock_set") == 0) { + int64_t ns; + int ret; + + g_assert(words[1]); + ret = qemu_strtoi64(words[1], NULL, 0, &ns); + g_assert(ret == 0); + qtest_clock_warp(ns); + qtest_send_prefix(chr); + qtest_sendf(chr, "OK %"PRIi64"\n", + (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else { + qtest_send_prefix(chr); + qtest_sendf(chr, "FAIL Unknown command '%s'\n", words[0]); + } +} + +static void qtest_process_inbuf(CharBackend *chr, GString *inbuf) +{ + char *end; + + while ((end = strchr(inbuf->str, '\n')) != NULL) { + size_t offset; + GString *cmd; + gchar **words; + + offset = end - inbuf->str; + + cmd = g_string_new_len(inbuf->str, offset); + g_string_erase(inbuf, 0, offset + 1); + + words = g_strsplit(cmd->str, " ", 0); + qtest_process_command(chr, words); + g_strfreev(words); + + g_string_free(cmd, TRUE); + } +} + +static void qtest_read(void *opaque, const uint8_t *buf, int size) +{ + CharBackend *chr = opaque; + + g_string_append_len(inbuf, (const gchar *)buf, size); + qtest_process_inbuf(chr, inbuf); +} + +static int qtest_can_read(void *opaque) +{ + return 1024; +} + +static void qtest_event(void *opaque, QEMUChrEvent event) +{ + int i; + + switch (event) { + case CHR_EVENT_OPENED: + /* + * We used to call qemu_system_reset() here, hoping we could + * use the same process for multiple tests that way. Never + * used. Injects an extra reset even when it's not used, and + * that can mess up tests, e.g. -boot once. + */ + for (i = 0; i < ARRAY_SIZE(irq_levels); i++) { + irq_levels[i] = 0; + } + qemu_gettimeofday(&start_time); + qtest_opened = true; + if (qtest_log_fp) { + fprintf(qtest_log_fp, "[I " FMT_timeval "] OPENED\n", + (long) start_time.tv_sec, (long) start_time.tv_usec); + } + break; + case CHR_EVENT_CLOSED: + qtest_opened = false; + if (qtest_log_fp) { + qemu_timeval tv; + qtest_get_time(&tv); + fprintf(qtest_log_fp, "[I +" FMT_timeval "] CLOSED\n", + (long) tv.tv_sec, (long) tv.tv_usec); + } + break; + default: + break; + } +} +void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp) +{ + Chardev *chr; + + chr = qemu_chr_new("qtest", qtest_chrdev, NULL); + + if (chr == NULL) { + error_setg(errp, "Failed to initialize device for qtest: \"%s\"", + qtest_chrdev); + return; + } + + if (qtest_log) { + if (strcmp(qtest_log, "none") != 0) { + qtest_log_fp = fopen(qtest_log, "w+"); + } + } else { + qtest_log_fp = stderr; + } + + qemu_chr_fe_init(&qtest_chr, chr, errp); + qemu_chr_fe_set_handlers(&qtest_chr, qtest_can_read, qtest_read, + qtest_event, NULL, &qtest_chr, NULL, true); + qemu_chr_fe_set_echo(&qtest_chr, true); + + inbuf = g_string_new(""); + + if (!qtest_server_send) { + qtest_server_set_send_handler(qtest_server_char_be_send, &qtest_chr); + } +} + +void qtest_server_set_send_handler(void (*send)(void*, const char*), + void *opaque) +{ + qtest_server_send = send; + qtest_server_send_opaque = opaque; +} + +bool qtest_driver(void) +{ + return qtest_chr.chr != NULL; +} + +void qtest_server_inproc_recv(void *dummy, const char *buf) +{ + static GString *gstr; + if (!gstr) { + gstr = g_string_new(NULL); + } + g_string_append(gstr, buf); + if (gstr->str[gstr->len - 1] == '\n') { + qtest_process_inbuf(NULL, gstr); + g_string_truncate(gstr, 0); + } +}