#define TARGET_VIRT_ADDR_SPACE_BITS 42
#elif defined(TARGET_PPC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
-#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
+#elif defined(TARGET_X86_64)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
-#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
+#elif defined(TARGET_I386)
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#else
-/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
#endif
/* log support */
+#ifdef WIN32
+static const char *logfilename = "qemu.log";
+#else
static const char *logfilename = "/tmp/qemu.log";
+#endif
FILE *logfile;
int loglevel;
static int log_append = 0;
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
target_phys_addr_t base;
- CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
- CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
+ CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
+ CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
void *opaque[TARGET_PAGE_SIZE][2][4];
ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
} subpage_t;
#if defined(CONFIG_USER_ONLY)
size_t len = sizeof(PageDesc) * L2_SIZE;
/* Don't use qemu_malloc because it may recurse. */
- p = mmap(0, len, PROT_READ | PROT_WRITE,
+ p = mmap(NULL, len, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
*lp = p;
if (h2g_valid(p)) {
return NULL;
p = *lp;
- if (!p)
- return 0;
+ if (!p) {
+ return NULL;
+ }
return p + (index & (L2_SIZE - 1));
}
exit(1);
}
}
-#elif defined(__FreeBSD__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
{
int flags;
void *addr = NULL;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
-#define CPU_COMMON_SAVE_VERSION 1
-
-static void cpu_common_save(QEMUFile *f, void *opaque)
+static void cpu_common_pre_save(void *opaque)
{
CPUState *env = opaque;
- cpu_synchronize_state(env, 0);
-
- qemu_put_be32s(f, &env->halted);
- qemu_put_be32s(f, &env->interrupt_request);
+ cpu_synchronize_state(env);
}
-static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
+static int cpu_common_pre_load(void *opaque)
{
CPUState *env = opaque;
- if (version_id != CPU_COMMON_SAVE_VERSION)
- return -EINVAL;
+ cpu_synchronize_state(env);
+ return 0;
+}
+
+static int cpu_common_post_load(void *opaque, int version_id)
+{
+ CPUState *env = opaque;
- qemu_get_be32s(f, &env->halted);
- qemu_get_be32s(f, &env->interrupt_request);
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
env->interrupt_request &= ~0x01;
tlb_flush(env, 1);
- cpu_synchronize_state(env, 1);
return 0;
}
+
+static const VMStateDescription vmstate_cpu_common = {
+ .name = "cpu_common",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .pre_save = cpu_common_pre_save,
+ .pre_load = cpu_common_pre_load,
+ .post_load = cpu_common_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(halted, CPUState),
+ VMSTATE_UINT32(interrupt_request, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
#endif
CPUState *qemu_get_cpu(int cpu)
}
env->cpu_index = cpu_index;
env->numa_node = 0;
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&env->breakpoints);
+ QTAILQ_INIT(&env->watchpoints);
*penv = env;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
- register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
- cpu_common_save, cpu_common_load, env);
+ vmstate_register(cpu_index, &vmstate_cpu_common, env);
register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
cpu_save, cpu_load, env);
#endif
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
else
- TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+ QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
tlb_flush_page(env, addr);
target_ulong len_mask = ~(len - 1);
CPUWatchpoint *wp;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp);
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
{
- TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+ QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr);
{
CPUWatchpoint *wp, *next;
- TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+ QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
if (wp->flags & mask)
cpu_watchpoint_remove_by_ref(env, wp);
}
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
else
- TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+ QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
breakpoint_invalidate(env, pc);
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp);
return 0;
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
+ QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
breakpoint_invalidate(env, breakpoint->pc);
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
- TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+ QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
}
static char logfile_buf[4096];
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
}
-#else
+#elif !defined(_WIN32)
+ /* Win32 doesn't support line-buffering and requires size >= 2 */
setvbuf(logfile, NULL, _IOLBF, 0);
#endif
log_append = 1;
static void cpu_unlink_tb(CPUState *env)
{
-#if defined(USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
signals are used primarily to interrupt blocking syscalls. */
-#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
tb = env->current_tb;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
+ if (tb) {
+ spin_lock(&interrupt_lock);
env->current_tb = NULL;
tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
+ spin_unlock(&interrupt_lock);
}
-#endif
}
/* mask must never be zero, except for A20 change call */
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&env->breakpoints);
+ QTAILQ_INIT(&env->watchpoints);
#if defined(TARGET_HAS_ICE)
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
}
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
wp->flags, NULL);
}
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
-#ifdef CONFIG_KQEMU
- if (env->kqemu_enabled) {
- kqemu_flush(env, flush_global);
- }
-#endif
tlb_flush_count++;
}
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
tlb_flush_jmp_cache(env, addr);
-
-#ifdef CONFIG_KQEMU
- if (env->kqemu_enabled) {
- kqemu_flush_page(env, addr);
- }
-#endif
}
/* update the TLBs so that writes to code in the virtual page 'addr'
if (length == 0)
return;
len = length >> TARGET_PAGE_BITS;
-#ifdef CONFIG_KQEMU
- /* XXX: should not depend on cpu context */
- env = first_cpu;
- if (env->kqemu_enabled) {
- ram_addr_t addr;
- addr = start;
- for(i = 0; i < len; i++) {
- kqemu_set_notdirty(env, addr);
- addr += TARGET_PAGE_SIZE;
- }
- }
-#endif
mask = ~dirty_flags;
p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
for(i = 0; i < len; i++)
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
} \
} while (0)
-/* register physical memory. 'size' must be a multiple of the target
- page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+/* register physical memory.
+ For RAM, 'size' must be a multiple of the target page size.
+ If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page. The address used when calling the IO function is
the offset from the start of the region, plus region_offset. Both
start_addr and region_offset are rounded down to a page boundary
ram_addr_t orig_size = size;
void *subpage;
-#ifdef CONFIG_KQEMU
- /* XXX: should not depend on cpu context */
- env = first_cpu;
- if (env->kqemu_enabled) {
- kqemu_set_phys_mem(start_addr, size, phys_offset);
- }
-#endif
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
kvm_uncoalesce_mmio_region(addr, size);
}
-#ifdef CONFIG_KQEMU
-/* XXX: better than nothing */
-static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
-{
- ram_addr_t addr;
- if ((last_ram_offset + size) > kqemu_phys_ram_size) {
- fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
- (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
- abort();
- }
- addr = last_ram_offset;
- last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
- return addr;
-}
-#endif
-
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
RAMBlock *new_block;
-#ifdef CONFIG_KQEMU
- if (kqemu_phys_ram_base) {
- return kqemu_ram_alloc(size);
- }
-#endif
-
size = TARGET_PAGE_ALIGN(size);
new_block = qemu_malloc(sizeof(*new_block));
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#else
new_block->host = qemu_vmalloc(size);
+#endif
+#ifdef MADV_MERGEABLE
+ madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
new_block->offset = last_ram_offset;
new_block->length = size;
RAMBlock **prevp;
RAMBlock *block;
-#ifdef CONFIG_KQEMU
- if (kqemu_phys_ram_base) {
- return kqemu_phys_ram_base + addr;
- }
-#endif
-
prev = NULL;
prevp = &ram_blocks;
block = ram_blocks;
RAMBlock *block;
uint8_t *host = ptr;
-#ifdef CONFIG_KQEMU
- if (kqemu_phys_ram_base) {
- return host - kqemu_phys_ram_base;
- }
-#endif
-
prev = NULL;
prevp = &ram_blocks;
block = ram_blocks;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 0, 0, 0, 1);
#endif
return 0;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 0, 0, 0, 2);
#endif
return 0;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 0, 0, 0, 4);
#endif
return 0;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 1, 0, 0, 1);
#endif
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 1, 0, 0, 2);
#endif
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
do_unassigned_access(addr, 1, 0, 0, 4);
#endif
}
-static CPUReadMemoryFunc *unassigned_mem_read[3] = {
+static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
unassigned_mem_readb,
unassigned_mem_readw,
unassigned_mem_readl,
};
-static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
+static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
unassigned_mem_writeb,
unassigned_mem_writew,
unassigned_mem_writel,
#endif
}
stb_p(qemu_get_ram_ptr(ram_addr), val);
-#ifdef CONFIG_KQEMU
- if (cpu_single_env->kqemu_enabled &&
- (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
- kqemu_modify_page(cpu_single_env, ram_addr);
-#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
#endif
}
stw_p(qemu_get_ram_ptr(ram_addr), val);
-#ifdef CONFIG_KQEMU
- if (cpu_single_env->kqemu_enabled &&
- (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
- kqemu_modify_page(cpu_single_env, ram_addr);
-#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
#endif
}
stl_p(qemu_get_ram_ptr(ram_addr), val);
-#ifdef CONFIG_KQEMU
- if (cpu_single_env->kqemu_enabled &&
- (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
- kqemu_modify_page(cpu_single_env, ram_addr);
-#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
}
-static CPUReadMemoryFunc *error_mem_read[3] = {
+static CPUReadMemoryFunc * const error_mem_read[3] = {
NULL, /* never used */
NULL, /* never used */
NULL, /* never used */
};
-static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
+static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
notdirty_mem_writeb,
notdirty_mem_writew,
notdirty_mem_writel,
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
stl_phys(addr, val);
}
-static CPUReadMemoryFunc *watch_mem_read[3] = {
+static CPUReadMemoryFunc * const watch_mem_read[3] = {
watch_mem_readb,
watch_mem_readw,
watch_mem_readl,
};
-static CPUWriteMemoryFunc *watch_mem_write[3] = {
+static CPUWriteMemoryFunc * const watch_mem_write[3] = {
watch_mem_writeb,
watch_mem_writew,
watch_mem_writel,
subpage_writelen(opaque, addr, value, 2);
}
-static CPUReadMemoryFunc *subpage_read[] = {
+static CPUReadMemoryFunc * const subpage_read[] = {
&subpage_readb,
&subpage_readw,
&subpage_readl,
};
-static CPUWriteMemoryFunc *subpage_write[] = {
+static CPUWriteMemoryFunc * const subpage_write[] = {
&subpage_writeb,
&subpage_writew,
&subpage_writel,
io_mem_used[i] = 1;
return i;
}
-
+ fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
return -1;
}
value can be used with cpu_register_physical_memory(). (-1) is
returned if error. */
static int cpu_register_io_memory_fixed(int io_index,
- CPUReadMemoryFunc **mem_read,
- CPUWriteMemoryFunc **mem_write,
+ CPUReadMemoryFunc * const *mem_read,
+ CPUWriteMemoryFunc * const *mem_write,
void *opaque)
{
int i, subwidth = 0;
return (io_index << IO_MEM_SHIFT) | subwidth;
}
-int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
- CPUWriteMemoryFunc **mem_write,
+int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
+ CPUWriteMemoryFunc * const *mem_write,
void *opaque)
{
return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
io_mem_watch = cpu_register_io_memory(watch_mem_read,
watch_mem_write, NULL);
-#ifdef CONFIG_KQEMU
- if (kqemu_phys_ram_base) {
- /* alloc dirty bits array */
- phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
- memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
- }
-#endif
}
#endif /* !defined(CONFIG_USER_ONLY) */
typedef struct MapClient {
void *opaque;
void (*callback)(void *opaque);
- LIST_ENTRY(MapClient) link;
+ QLIST_ENTRY(MapClient) link;
} MapClient;
-static LIST_HEAD(map_client_list, MapClient) map_client_list
- = LIST_HEAD_INITIALIZER(map_client_list);
+static QLIST_HEAD(map_client_list, MapClient) map_client_list
+ = QLIST_HEAD_INITIALIZER(map_client_list);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
{
client->opaque = opaque;
client->callback = callback;
- LIST_INSERT_HEAD(&map_client_list, client, link);
+ QLIST_INSERT_HEAD(&map_client_list, client, link);
return client;
}
{
MapClient *client = (MapClient *)_client;
- LIST_REMOVE(client, link);
+ QLIST_REMOVE(client, link);
qemu_free(client);
}
{
MapClient *client;
- while (!LIST_EMPTY(&map_client_list)) {
- client = LIST_FIRST(&map_client_list);
+ while (!QLIST_EMPTY(&map_client_list)) {
+ client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque);
cpu_unregister_map_client(client);
}