#include "hw/qdev.h"
#include "qemu/osdep.h"
#include "sysemu/kvm.h"
-#include "hw/xen.h"
+#include "hw/xen/xen.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "exec/memory.h"
#include "sysemu/xen-mapcache.h"
#include "trace.h"
#endif
+#include "exec/cpu-all.h"
#include "exec/cputlb.h"
#include "translate-all.h"
int phys_ram_fd;
static int in_migration;
-RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
+RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
static MemoryRegion *system_memory;
static MemoryRegion *system_io;
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
-int use_icount = 0;
+int use_icount;
#if !defined(CONFIG_USER_ONLY)
void cpu_exec_init_all(void)
{
#if !defined(CONFIG_USER_ONLY)
+ qemu_mutex_init(&ram_list.mutex);
memory_map_init();
io_mem_init();
#endif
}
-#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
+#if !defined(CONFIG_USER_ONLY)
static int cpu_common_post_load(void *opaque, int version_id)
{
- CPUArchState *env = opaque;
+ CPUState *cpu = opaque;
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
- env->interrupt_request &= ~0x01;
- tlb_flush(env, 1);
+ cpu->interrupt_request &= ~0x01;
+ tlb_flush(cpu->env_ptr, 1);
return 0;
}
.minimum_version_id_old = 1,
.post_load = cpu_common_post_load,
.fields = (VMStateField []) {
- VMSTATE_UINT32(halted, CPUArchState),
- VMSTATE_UINT32(interrupt_request, CPUArchState),
+ VMSTATE_UINT32(halted, CPUState),
+ VMSTATE_UINT32(interrupt_request, CPUState),
VMSTATE_END_OF_LIST()
}
};
+#else
+#define vmstate_cpu_common vmstate_dummy
#endif
-CPUArchState *qemu_get_cpu(int cpu)
+CPUState *qemu_get_cpu(int index)
{
CPUArchState *env = first_cpu;
+ CPUState *cpu = NULL;
while (env) {
- if (env->cpu_index == cpu)
+ cpu = ENV_GET_CPU(env);
+ if (cpu->cpu_index == index) {
break;
+ }
env = env->next_cpu;
}
- return env;
+ return env ? cpu : NULL;
+}
+
+void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
+{
+ CPUArchState *env = first_cpu;
+
+ while (env) {
+ func(ENV_GET_CPU(env), data);
+ env = env->next_cpu;
+ }
}
void cpu_exec_init(CPUArchState *env)
{
-#ifndef CONFIG_USER_ONLY
CPUState *cpu = ENV_GET_CPU(env);
-#endif
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState **penv;
int cpu_index;
penv = &(*penv)->next_cpu;
cpu_index++;
}
- env->cpu_index = cpu_index;
- env->numa_node = 0;
+ cpu->cpu_index = cpu_index;
+ cpu->numa_node = 0;
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
#ifndef CONFIG_USER_ONLY
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
+ vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
- vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
cpu_save, cpu_load, env);
+ assert(cc->vmsd == NULL);
#endif
+ if (cc->vmsd != NULL) {
+ vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
+ }
}
#if defined(TARGET_HAS_ICE)
#endif
}
-void cpu_reset_interrupt(CPUArchState *env, int mask)
-{
- env->interrupt_request &= ~mask;
-}
-
void cpu_exit(CPUArchState *env)
{
- env->exit_request = 1;
- cpu_unlink_tb(env);
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ cpu->exit_request = 1;
+ cpu->tcg_exit_req = 1;
}
void cpu_abort(CPUArchState *env, const char *fmt, ...)
{
CPUArchState *new_env = cpu_init(env->cpu_model_str);
CPUArchState *next_cpu = new_env->next_cpu;
- int cpu_index = new_env->cpu_index;
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
CPUWatchpoint *wp;
memcpy(new_env, env, sizeof(CPUArchState));
- /* Preserve chaining and index. */
+ /* Preserve chaining. */
new_env->next_cpu = next_cpu;
- new_env->cpu_index = cpu_index;
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
kvm_flush_coalesced_mmio_buffer();
}
+void qemu_mutex_lock_ramlist(void)
+{
+ qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_ramlist(void)
+{
+ qemu_mutex_unlock(&ram_list.mutex);
+}
+
#if defined(__linux__) && !defined(TARGET_S390X)
#include <sys/vfs.h>
const char *path)
{
char *filename;
+ char *sanitized_name;
+ char *c;
void *area;
int fd;
#ifdef MAP_POPULATE
return NULL;
}
- if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
- return NULL;
+ /* Make name safe to use with mkstemp by replacing '/' with '_'. */
+ sanitized_name = g_strdup(block->mr->name);
+ for (c = sanitized_name; *c != '\0'; c++) {
+ if (*c == '/')
+ *c = '_';
}
+ filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
+ sanitized_name);
+ g_free(sanitized_name);
+
fd = mkstemp(filename);
if (fd < 0) {
perror("unable to create backing store for hugepages");
- free(filename);
+ g_free(filename);
return NULL;
}
unlink(filename);
- free(filename);
+ g_free(filename);
memory = (memory+hpagesize-1) & ~(hpagesize-1);
RAMBlock *block, *next_block;
ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
- if (QLIST_EMPTY(&ram_list.blocks))
+ assert(size != 0); /* it would hand out same offset multiple times */
+
+ if (QTAILQ_EMPTY(&ram_list.blocks))
return 0;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
ram_addr_t end, next = RAM_ADDR_MAX;
end = block->offset + block->length;
- QLIST_FOREACH(next_block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
if (next_block->offset >= end) {
next = MIN(next, next_block->offset);
}
RAMBlock *block;
ram_addr_t last = 0;
- QLIST_FOREACH(block, &ram_list.blocks, next)
+ QTAILQ_FOREACH(block, &ram_list.blocks, next)
last = MAX(last, block->offset + block->length);
return last;
RAMBlock *new_block, *block;
new_block = NULL;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block->offset == addr) {
new_block = block;
break;
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
new_block->idstr);
abort();
}
}
+ qemu_mutex_unlock_ramlist();
}
static int memory_try_enable_merging(void *addr, size_t len)
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr)
{
- RAMBlock *new_block;
+ RAMBlock *block, *new_block;
size = TARGET_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
new_block->mr = mr;
new_block->offset = find_ram_offset(size);
if (host) {
#if defined (__linux__) && !defined(TARGET_S390X)
new_block->host = file_ram_alloc(new_block, size, mem_path);
if (!new_block->host) {
- new_block->host = qemu_vmalloc(size);
+ new_block->host = qemu_anon_ram_alloc(size);
memory_try_enable_merging(new_block->host, size);
}
#else
xen_ram_alloc(new_block->offset, size, mr);
} else if (kvm_enabled()) {
/* some s390/kvm configurations have special constraints */
- new_block->host = kvm_vmalloc(size);
+ new_block->host = kvm_ram_alloc(size);
} else {
- new_block->host = qemu_vmalloc(size);
+ new_block->host = qemu_anon_ram_alloc(size);
}
memory_try_enable_merging(new_block->host, size);
}
}
new_block->length = size;
- QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+ /* Keep the list sorted from biggest to smallest block. */
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ if (block->length < new_block->length) {
+ break;
+ }
+ }
+ if (block) {
+ QTAILQ_INSERT_BEFORE(block, new_block, next);
+ } else {
+ QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
+ }
+ ram_list.mru_block = NULL;
+
+ ram_list.version++;
+ qemu_mutex_unlock_ramlist();
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QLIST_REMOVE(block, next);
+ QTAILQ_REMOVE(&ram_list.blocks, block, next);
+ ram_list.mru_block = NULL;
+ ram_list.version++;
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
void qemu_ram_free(ram_addr_t addr)
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QLIST_REMOVE(block, next);
+ QTAILQ_REMOVE(&ram_list.blocks, block, next);
+ ram_list.mru_block = NULL;
+ ram_list.version++;
if (block->flags & RAM_PREALLOC_MASK) {
;
} else if (mem_path) {
munmap(block->host, block->length);
close(block->fd);
} else {
- qemu_vfree(block->host);
+ qemu_anon_ram_free(block->host, block->length);
}
#else
abort();
#endif
} else {
-#if defined(TARGET_S390X) && defined(CONFIG_KVM)
- munmap(block->host, block->length);
-#else
if (xen_enabled()) {
xen_invalidate_map_cache_entry(block->host);
} else {
- qemu_vfree(block->host);
+ qemu_anon_ram_free(block->host, block->length);
}
-#endif
}
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
int flags;
void *area, *vaddr;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
offset = addr - block->offset;
if (offset < block->length) {
vaddr = block->host + offset;
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* The list is protected by the iothread lock here. */
+ block = ram_list.mru_block;
+ if (block && addr - block->offset < block->length) {
+ goto found;
+ }
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
- /* Move this entry to to start of the list. */
- if (block != QLIST_FIRST(&ram_list.blocks)) {
- QLIST_REMOVE(block, next);
- QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
- }
- if (xen_enabled()) {
- /* We need to check if the requested address is in the RAM
- * because we don't want to map the entire memory in QEMU.
- * In that case just map until the end of the page.
- */
- if (block->offset == 0) {
- return xen_map_cache(addr, 0, 0);
- } else if (block->host == NULL) {
- block->host =
- xen_map_cache(block->offset, block->length, 1);
- }
- }
- return block->host + (addr - block->offset);
+ goto found;
}
}
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
abort();
- return NULL;
+found:
+ ram_list.mru_block = block;
+ if (xen_enabled()) {
+ /* We need to check if the requested address is in the RAM
+ * because we don't want to map the entire memory in QEMU.
+ * In that case just map until the end of the page.
+ */
+ if (block->offset == 0) {
+ return xen_map_cache(addr, 0, 0);
+ } else if (block->host == NULL) {
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
+ }
+ }
+ return block->host + (addr - block->offset);
}
-/* Return a host pointer to ram allocated with qemu_ram_alloc.
- * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
+/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
+ * qemu_get_ram_ptr but do not touch ram_list.mru_block.
+ *
+ * ??? Is this still necessary?
*/
static void *qemu_safe_ram_ptr(ram_addr_t addr)
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* The list is protected by the iothread lock here. */
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
} else {
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
if (addr - block->offset + *size > block->length)
*size = block->length - addr + block->offset;
return 0;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;
/* We re-entered the check after replacing the TB. Now raise
* the debug interrupt so that is will trigger after the
* current instruction. */
- cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
+ cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;