int phys_ram_fd;
static int in_migration;
-RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
+RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
static MemoryRegion *system_memory;
static MemoryRegion *system_io;
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
-int use_icount = 0;
+int use_icount;
#if !defined(CONFIG_USER_ONLY)
void cpu_exec_init_all(void)
{
#if !defined(CONFIG_USER_ONLY)
+ qemu_mutex_init(&ram_list.mutex);
memory_map_init();
io_mem_init();
#endif
};
#endif
-CPUArchState *qemu_get_cpu(int cpu)
+CPUState *qemu_get_cpu(int index)
{
CPUArchState *env = first_cpu;
+ CPUState *cpu = NULL;
while (env) {
- if (env->cpu_index == cpu)
+ cpu = ENV_GET_CPU(env);
+ if (cpu->cpu_index == index) {
break;
+ }
env = env->next_cpu;
}
- return env;
+ return cpu;
}
void cpu_exec_init(CPUArchState *env)
{
-#ifndef CONFIG_USER_ONLY
CPUState *cpu = ENV_GET_CPU(env);
-#endif
CPUArchState **penv;
int cpu_index;
penv = &(*penv)->next_cpu;
cpu_index++;
}
- env->cpu_index = cpu_index;
- env->numa_node = 0;
+ cpu->cpu_index = cpu_index;
+ cpu->numa_node = 0;
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
#ifndef CONFIG_USER_ONLY
void cpu_exit(CPUArchState *env)
{
- env->exit_request = 1;
- cpu_unlink_tb(env);
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ cpu->exit_request = 1;
+ cpu->tcg_exit_req = 1;
}
void cpu_abort(CPUArchState *env, const char *fmt, ...)
{
CPUArchState *new_env = cpu_init(env->cpu_model_str);
CPUArchState *next_cpu = new_env->next_cpu;
- int cpu_index = new_env->cpu_index;
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
CPUWatchpoint *wp;
memcpy(new_env, env, sizeof(CPUArchState));
- /* Preserve chaining and index. */
+ /* Preserve chaining. */
new_env->next_cpu = next_cpu;
- new_env->cpu_index = cpu_index;
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
kvm_flush_coalesced_mmio_buffer();
}
+void qemu_mutex_lock_ramlist(void)
+{
+ qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_ramlist(void)
+{
+ qemu_mutex_unlock(&ram_list.mutex);
+}
+
#if defined(__linux__) && !defined(TARGET_S390X)
#include <sys/vfs.h>
return NULL;
}
- if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
- return NULL;
- }
+ filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
fd = mkstemp(filename);
if (fd < 0) {
perror("unable to create backing store for hugepages");
- free(filename);
+ g_free(filename);
return NULL;
}
unlink(filename);
- free(filename);
+ g_free(filename);
memory = (memory+hpagesize-1) & ~(hpagesize-1);
RAMBlock *block, *next_block;
ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
- if (QLIST_EMPTY(&ram_list.blocks))
+ if (QTAILQ_EMPTY(&ram_list.blocks))
return 0;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
ram_addr_t end, next = RAM_ADDR_MAX;
end = block->offset + block->length;
- QLIST_FOREACH(next_block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
if (next_block->offset >= end) {
next = MIN(next, next_block->offset);
}
RAMBlock *block;
ram_addr_t last = 0;
- QLIST_FOREACH(block, &ram_list.blocks, next)
+ QTAILQ_FOREACH(block, &ram_list.blocks, next)
last = MAX(last, block->offset + block->length);
return last;
RAMBlock *new_block, *block;
new_block = NULL;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block->offset == addr) {
new_block = block;
break;
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
new_block->idstr);
abort();
}
}
+ qemu_mutex_unlock_ramlist();
}
static int memory_try_enable_merging(void *addr, size_t len)
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr)
{
- RAMBlock *new_block;
+ RAMBlock *block, *new_block;
size = TARGET_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
new_block->mr = mr;
new_block->offset = find_ram_offset(size);
if (host) {
}
new_block->length = size;
- QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+ /* Keep the list sorted from biggest to smallest block. */
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ if (block->length < new_block->length) {
+ break;
+ }
+ }
+ if (block) {
+ QTAILQ_INSERT_BEFORE(block, new_block, next);
+ } else {
+ QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
+ }
ram_list.mru_block = NULL;
+ ram_list.version++;
+ qemu_mutex_unlock_ramlist();
+
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QLIST_REMOVE(block, next);
+ QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
+ ram_list.version++;
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
void qemu_ram_free(ram_addr_t addr)
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QLIST_REMOVE(block, next);
+ QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
+ ram_list.version++;
if (block->flags & RAM_PREALLOC_MASK) {
;
} else if (mem_path) {
#endif
}
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
int flags;
void *area, *vaddr;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
offset = addr - block->offset;
if (offset < block->length) {
vaddr = block->host + offset;
{
RAMBlock *block;
+ /* The list is protected by the iothread lock here. */
block = ram_list.mru_block;
if (block && addr - block->offset < block->length) {
goto found;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
goto found;
}
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ /* The list is protected by the iothread lock here. */
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
} else {
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
if (addr - block->offset + *size > block->length)
*size = block->length - addr + block->offset;
return 0;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;