void cpu_exec_init_all(void)
{
#if !defined(CONFIG_USER_ONLY)
+ qemu_mutex_init(&ram_list.mutex);
memory_map_init();
io_mem_init();
#endif
kvm_flush_coalesced_mmio_buffer();
}
+void qemu_mutex_lock_ramlist(void)
+{
+ qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_ramlist(void)
+{
+ qemu_mutex_unlock(&ram_list.mutex);
+}
+
#if defined(__linux__) && !defined(TARGET_S390X)
#include <sys/vfs.h>
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
abort();
}
}
+ qemu_mutex_unlock_ramlist();
}
static int memory_try_enable_merging(void *addr, size_t len)
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr)
{
- RAMBlock *new_block;
+ RAMBlock *block, *new_block;
size = TARGET_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
new_block->mr = mr;
new_block->offset = find_ram_offset(size);
if (host) {
}
new_block->length = size;
- QTAILQ_INSERT_HEAD(&ram_list.blocks, new_block, next);
+ /* Keep the list sorted from biggest to smallest block. */
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ if (block->length < new_block->length) {
+ break;
+ }
+ }
+ if (block) {
+ QTAILQ_INSERT_BEFORE(block, new_block, next);
+ } else {
+ QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
+ }
ram_list.mru_block = NULL;
+ ram_list.version++;
+ qemu_mutex_unlock_ramlist();
+
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
{
RAMBlock *block;
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
+ ram_list.version++;
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
void qemu_ram_free(ram_addr_t addr)
{
RAMBlock *block;
+ /* This assumes the iothread lock is taken here too. */
+ qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
+ ram_list.version++;
if (block->flags & RAM_PREALLOC_MASK) {
;
} else if (mem_path) {
#endif
}
g_free(block);
- return;
+ break;
}
}
+ qemu_mutex_unlock_ramlist();
}
{
RAMBlock *block;
+ /* The list is protected by the iothread lock here. */
block = ram_list.mru_block;
if (block && addr - block->offset < block->length) {
goto found;
{
RAMBlock *block;
+ /* The list is protected by the iothread lock here. */
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
if (xen_enabled()) {