*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qapi/error.h"
#include "exec/target_page.h"
#ifdef CONFIG_USER_ONLY
#include "qemu.h"
#else
+#include "hw/core/sysemu-cpu-ops.h"
#include "exec/address-spaces.h"
#endif
#include "sysemu/tcg.h"
#include "sysemu/kvm.h"
#include "sysemu/replay.h"
+#include "exec/cpu-common.h"
+#include "exec/exec-all.h"
#include "exec/translate-all.h"
#include "exec/log.h"
#include "hw/core/accel-cpu.h"
+#include "trace/trace-root.h"
+#include "qemu/accel.h"
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ /* cache the cpu class for the hotpath */
+ cpu->cc = CPU_GET_CLASS(cpu);
- cpu_list_add(cpu);
if (!accel_cpu_realizefn(cpu, errp)) {
return;
}
-#ifdef CONFIG_TCG
+
/* NB: errp parameter is unused currently */
if (tcg_enabled()) {
tcg_exec_realizefn(cpu, errp);
}
-#endif /* CONFIG_TCG */
+
+ /* Wait until cpu initialization complete before exposing cpu. */
+ cpu_list_add(cpu);
+
+ /* Plugin initialization must wait until cpu_index assigned. */
+ if (tcg_enabled()) {
+ qemu_plugin_vcpu_init_hook(cpu);
+ }
#ifdef CONFIG_USER_ONLY
- assert(cc->vmsd == NULL);
+ assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
+ qdev_get_vmsd(DEVICE(cpu))->unmigratable);
#else
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
- if (cc->vmsd != NULL) {
- vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
+ vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
}
#endif /* CONFIG_USER_ONLY */
}
void cpu_exec_unrealizefn(CPUState *cpu)
{
+#ifndef CONFIG_USER_ONLY
CPUClass *cc = CPU_GET_CLASS(cpu);
-#ifdef CONFIG_USER_ONLY
- assert(cc->vmsd == NULL);
-#else
- if (cc->vmsd != NULL) {
- vmstate_unregister(NULL, cc->vmsd, cpu);
+ if (cc->sysemu_ops->legacy_vmsd != NULL) {
+ vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
}
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
}
#endif
-#ifdef CONFIG_TCG
- /* NB: errp parameter is unused currently */
if (tcg_enabled()) {
tcg_exec_unrealizefn(cpu);
}
-#endif /* CONFIG_TCG */
cpu_list_remove(cpu);
}
+/*
+ * This can't go in hw/core/cpu.c because that file is compiled only
+ * once for both user-mode and system builds.
+ */
+static Property cpu_common_props[] = {
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Create a property for the user-only object, so users can
+ * adjust prctl(PR_SET_UNALIGN) from the command-line.
+ * Has no effect if the target does not support the feature.
+ */
+ DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
+ prctl_unalign_sigbus, false),
+#else
+ /*
+ * Create a memory property for softmmu CPU object, so users can
+ * wire up its memory. The default if no link is set up is to use
+ * the system address space.
+ */
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+#endif
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static bool cpu_get_start_powered_off(Object *obj, Error **errp)
+{
+ CPUState *cpu = CPU(obj);
+ return cpu->start_powered_off;
+}
+
+static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
+{
+ CPUState *cpu = CPU(obj);
+ cpu->start_powered_off = value;
+}
+
+void cpu_class_init_props(DeviceClass *dc)
+{
+ ObjectClass *oc = OBJECT_CLASS(dc);
+
+ device_class_set_props(dc, cpu_common_props);
+ /*
+ * We can't use DEFINE_PROP_BOOL in the Property array for this
+ * property, because we want this to be settable after realize.
+ */
+ object_class_property_add_bool(oc, "start-powered-off",
+ cpu_get_start_powered_off,
+ cpu_set_start_powered_off);
+}
+
void cpu_exec_initfn(CPUState *cpu)
{
cpu->as = NULL;
return cpu_type;
}
+void list_cpus(const char *optarg)
+{
+ /* XXX: implement xxx_cpu_list for targets that still miss it */
+#if defined(cpu_list)
+ cpu_list();
+#endif
+}
+
#if defined(CONFIG_USER_ONLY)
void tb_invalidate_phys_addr(target_ulong addr)
{
mmap_lock();
- tb_invalidate_phys_page_range(addr, addr + 1);
+ tb_invalidate_phys_page(addr);
mmap_unlock();
}
-
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
-{
- tb_invalidate_phys_addr(pc);
-}
#else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
{
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
-}
-
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
-{
- /*
- * There may not be a virtual to physical translation for the pc
- * right now, but there may exist cached TB for this pc.
- * Flush the whole TB cache to force re-translation of such TBs.
- * This is heavyweight, but we're debugging anyway.
- */
- tb_flush(cpu);
+ tb_invalidate_phys_page(ram_addr);
}
#endif
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint)
{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp;
+ if (cc->gdb_adjust_breakpoint) {
+ pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ }
+
bp = g_malloc(sizeof(*bp));
bp->pc = pc;
QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
}
- breakpoint_invalidate(cpu, pc);
-
if (breakpoint) {
*breakpoint = bp;
}
+
+ trace_breakpoint_insert(cpu->cpu_index, pc, flags);
return 0;
}
/* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUBreakpoint *bp;
+ if (cc->gdb_adjust_breakpoint) {
+ pc = cc->gdb_adjust_breakpoint(cpu, pc);
+ }
+
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(cpu, bp);
}
/* Remove a specific breakpoint by reference. */
-void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
+void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp)
{
- QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
+ QTAILQ_REMOVE(&cpu->breakpoints, bp, entry);
- breakpoint_invalidate(cpu, breakpoint->pc);
-
- g_free(breakpoint);
+ trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags);
+ g_free(bp);
}
/* Remove all matching breakpoints. */
cpu->singlestep_enabled = enabled;
if (kvm_enabled()) {
kvm_update_guest_debug(cpu, 0);
- } else {
- /* must flush all the translated code to avoid inconsistencies */
- /* XXX: only flush what is necessary */
- tb_flush(cpu);
}
+ trace_breakpoint_singlestep(cpu->cpu_index, enabled);
}
}
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
- FILE *logfile = qemu_log_lock();
- qemu_log("qemu: fatal: ");
- qemu_log_vprintf(fmt, ap2);
- qemu_log("\n");
- log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
- qemu_log_flush();
- qemu_log_unlock(logfile);
- qemu_log_close();
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "qemu: fatal: ");
+ vfprintf(logfile, fmt, ap2);
+ fprintf(logfile, "\n");
+ cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP);
+ qemu_log_unlock(logfile);
+ }
}
va_end(ap2);
va_end(ap);
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
-int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- void *ptr, target_ulong len, bool is_write)
+int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ void *ptr, size_t len, bool is_write)
{
int flags;
- target_ulong l, page;
+ vaddr l, page;
void * p;
uint8_t *buf = ptr;
bool target_words_bigendian(void)
{
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
return true;
#else
return false;
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
if (qemu_host_page_size == 0) {
- qemu_host_page_size = qemu_real_host_page_size;
+ qemu_host_page_size = qemu_real_host_page_size();
}
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
qemu_host_page_size = TARGET_PAGE_SIZE;