#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
-#include "exec/cputlb.h"
#include "translate-all.h"
+#include "sysemu/replay.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "qemu/range.h"
+#ifndef _WIN32
+#include "qemu/mmap-alloc.h"
+#endif
//#define DEBUG_SUBPAGE
static void tcg_commit(MemoryListener *listener);
static MemoryRegion io_mem_watch;
+
+/**
+ * CPUAddressSpace: all the information a CPU needs about an AddressSpace
+ * @cpu: the CPU whose AddressSpace this is
+ * @as: the AddressSpace itself
+ * @memory_dispatch: its dispatch pointer (cached, RCU protected)
+ * @tcg_as_listener: listener for tracking changes to the AddressSpace
+ */
+struct CPUAddressSpace {
+ CPUState *cpu;
+ AddressSpace *as;
+ struct AddressSpaceDispatch *memory_dispatch;
+ MemoryListener tcg_as_listener;
+};
+
#endif
#if !defined(CONFIG_USER_ONLY)
return section;
}
-static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
-{
- if (memory_region_is_ram(mr)) {
- return !(is_write && mr->readonly);
- }
- if (memory_region_is_romd(mr)) {
- return !is_write;
- }
-
- return false;
-}
-
/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
hwaddr *xlat, hwaddr *plen)
{
MemoryRegionSection *section;
- section = address_space_translate_internal(cpu->memory_dispatch,
+ section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
addr, xlat, plen, false);
assert(!section->mr->iommu_ops);
/* We only support one address space per cpu at the moment. */
assert(cpu->as == as);
- if (cpu->tcg_as_listener) {
- memory_listener_unregister(cpu->tcg_as_listener);
- } else {
- cpu->tcg_as_listener = g_new0(MemoryListener, 1);
+ if (cpu->cpu_ases) {
+ /* We've already registered the listener for our only AS */
+ return;
}
- cpu->tcg_as_listener->commit = tcg_commit;
- memory_listener_register(cpu->tcg_as_listener, as);
+
+ cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
+ cpu->cpu_ases[0].cpu = cpu;
+ cpu->cpu_ases[0].as = as;
+ cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
+ memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
}
#endif
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
- cpu_reload_memory_map(cpu);
#endif
#if defined(CONFIG_USER_ONLY)
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
- if (qemu_log_enabled()) {
+ if (qemu_log_separate()) {
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
}
va_end(ap2);
va_end(ap);
+ replay_finish();
#if defined(CONFIG_USER_ONLY)
{
struct sigaction act;
block = atomic_rcu_read(&ram_list.mru_block);
if (block && addr - block->offset < block->max_length) {
- goto found;
+ return block;
}
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (addr - block->offset < block->max_length) {
static void phys_section_destroy(MemoryRegion *mr)
{
+ bool have_sub_page = mr->subpage;
+
memory_region_unref(mr);
- if (mr->subpage) {
+ if (have_sub_page) {
subpage_t *subpage = container_of(mr, subpage_t, iomem);
object_unref(OBJECT(&subpage->iomem));
g_free(subpage);
return 0;
}
- if (fs.f_type != HUGETLBFS_MAGIC)
- fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
-
return fs.f_bsize;
}
const char *path,
Error **errp)
{
+ struct stat st;
char *filename;
char *sanitized_name;
char *c;
- void *area = NULL;
+ void *area;
int fd;
uint64_t hpagesize;
Error *local_err = NULL;
goto error;
}
- /* Make name safe to use with mkstemp by replacing '/' with '_'. */
- sanitized_name = g_strdup(memory_region_name(block->mr));
- for (c = sanitized_name; *c != '\0'; c++) {
- if (*c == '/')
- *c = '_';
- }
+ if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
+ /* Make name safe to use with mkstemp by replacing '/' with '_'. */
+ sanitized_name = g_strdup(memory_region_name(block->mr));
+ for (c = sanitized_name; *c != '\0'; c++) {
+ if (*c == '/') {
+ *c = '_';
+ }
+ }
+
+ filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
+ sanitized_name);
+ g_free(sanitized_name);
- filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
- sanitized_name);
- g_free(sanitized_name);
+ fd = mkstemp(filename);
+ if (fd >= 0) {
+ unlink(filename);
+ }
+ g_free(filename);
+ } else {
+ fd = open(path, O_RDWR | O_CREAT, 0644);
+ }
- fd = mkstemp(filename);
if (fd < 0) {
error_setg_errno(errp, errno,
"unable to create backing store for hugepages");
- g_free(filename);
goto error;
}
- unlink(filename);
- g_free(filename);
memory = ROUND_UP(memory, hpagesize);
perror("ftruncate");
}
- area = mmap(0, memory, PROT_READ | PROT_WRITE,
- (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
- fd, 0);
+ area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
"unable to map backing store for hugepages");
return area;
error:
- if (mem_prealloc) {
- error_report("%s", error_get_pretty(*errp));
- exit(1);
- }
return NULL;
}
#endif
return NULL;
}
+const char *qemu_ram_get_idstr(RAMBlock *rb)
+{
+ return rb->idstr;
+}
+
/* Called with iothread lock held. */
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
{
assert(block);
- newsize = TARGET_PAGE_ALIGN(newsize);
+ newsize = HOST_PAGE_ALIGN(newsize);
if (block->used_length == newsize) {
return 0;
return -1;
}
- size = TARGET_PAGE_ALIGN(size);
+ size = HOST_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
new_block->mr = mr;
new_block->used_length = size;
ram_addr_t addr;
Error *local_err = NULL;
- size = TARGET_PAGE_ALIGN(size);
- max_size = TARGET_PAGE_ALIGN(max_size);
+ size = HOST_PAGE_ALIGN(size);
+ max_size = HOST_PAGE_ALIGN(max_size);
new_block = g_malloc0(sizeof(*new_block));
new_block->mr = mr;
new_block->resized = resized;
return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
}
-void qemu_ram_free_from_ptr(ram_addr_t addr)
-{
- RAMBlock *block;
-
- qemu_mutex_lock_ramlist();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- if (addr == block->offset) {
- QLIST_REMOVE_RCU(block, next);
- ram_list.mru_block = NULL;
- /* Write list before version */
- smp_wmb();
- ram_list.version++;
- g_free_rcu(block, rcu);
- break;
- }
- }
- qemu_mutex_unlock_ramlist();
-}
-
static void reclaim_ramblock(RAMBlock *block)
{
if (block->flags & RAM_PREALLOC) {
xen_invalidate_map_cache_entry(block->host);
#ifndef _WIN32
} else if (block->fd >= 0) {
- munmap(block->host, block->max_length);
+ qemu_ram_munmap(block->host, block->max_length);
close(block->fd);
#endif
} else {
return fd;
}
+void qemu_set_ram_fd(ram_addr_t addr, int fd)
+{
+ RAMBlock *block;
+
+ rcu_read_lock();
+ block = qemu_get_ram_block(addr);
+ block->fd = fd;
+ rcu_read_unlock();
+}
+
void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
{
RAMBlock *block;
* or address_space_rw instead. For local memory (e.g. video ram) that the
* device owns, use memory_region_get_ram_ptr.
*
- * By the time this function returns, the returned pointer is not protected
- * by RCU anymore. If the caller is not within an RCU critical section and
- * does not hold the iothread lock, it must have other means of protecting the
- * pointer, such as a reference to the region that includes the incoming
- * ram_addr_t.
+ * Called within RCU critical section.
*/
void *qemu_get_ram_ptr(ram_addr_t addr)
{
- RAMBlock *block;
- void *ptr;
-
- rcu_read_lock();
- block = qemu_get_ram_block(addr);
+ RAMBlock *block = qemu_get_ram_block(addr);
if (xen_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- ptr = xen_map_cache(addr, 0, 0);
- goto unlock;
+ return xen_map_cache(addr, 0, 0);
}
block->host = xen_map_cache(block->offset, block->max_length, 1);
}
- ptr = ramblock_ptr(block, addr - block->offset);
-
-unlock:
- rcu_read_unlock();
- return ptr;
+ return ramblock_ptr(block, addr - block->offset);
}
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
* but takes a size argument.
*
- * By the time this function returns, the returned pointer is not protected
- * by RCU anymore. If the caller is not within an RCU critical section and
- * does not hold the iothread lock, it must have other means of protecting the
- * pointer, such as a reference to the region that includes the incoming
- * ram_addr_t.
+ * Called within RCU critical section.
*/
static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
{
- void *ptr;
+ RAMBlock *block;
+ ram_addr_t offset_inside_block;
if (*size == 0) {
return NULL;
}
- if (xen_enabled()) {
- return xen_map_cache(addr, *size, 1);
- } else {
- RAMBlock *block;
- rcu_read_lock();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- if (addr - block->offset < block->max_length) {
- if (addr - block->offset + *size > block->max_length)
- *size = block->max_length - addr + block->offset;
- ptr = ramblock_ptr(block, addr - block->offset);
- rcu_read_unlock();
- return ptr;
- }
+
+ block = qemu_get_ram_block(addr);
+ offset_inside_block = addr - block->offset;
+ *size = MIN(*size, block->max_length - offset_inside_block);
+
+ if (xen_enabled() && block->host == NULL) {
+ /* We need to check if the requested address is in the RAM
+ * because we don't want to map the entire memory in QEMU.
+ * In that case just map the requested area.
+ */
+ if (block->offset == 0) {
+ return xen_map_cache(addr, *size, 1);
}
- fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
- abort();
+ block->host = xen_map_cache(block->offset, block->max_length, 1);
}
+
+ return ramblock_ptr(block, offset_inside_block);
}
-/* Some of the softmmu routines need to translate from a host pointer
- * (typically a TLB entry) back to a ram offset.
+/*
+ * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
+ * in that RAMBlock.
+ *
+ * ptr: Host pointer to look up
+ * round_offset: If true round the result offset down to a page boundary
+ * *ram_addr: set to result ram_addr
+ * *offset: set to result offset within the RAMBlock
+ *
+ * Returns: RAMBlock (or NULL if not found)
*
* By the time this function returns, the returned pointer is not protected
* by RCU anymore. If the caller is not within an RCU critical section and
* pointer, such as a reference to the region that includes the incoming
* ram_addr_t.
*/
-MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
+RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
+ ram_addr_t *ram_addr,
+ ram_addr_t *offset)
{
RAMBlock *block;
uint8_t *host = ptr;
- MemoryRegion *mr;
if (xen_enabled()) {
rcu_read_lock();
*ram_addr = xen_ram_addr_from_mapcache(ptr);
- mr = qemu_get_ram_block(*ram_addr)->mr;
+ block = qemu_get_ram_block(*ram_addr);
+ if (block) {
+ *offset = (host - block->host);
+ }
rcu_read_unlock();
- return mr;
+ return block;
}
rcu_read_lock();
return NULL;
found:
- *ram_addr = block->offset + (host - block->host);
- mr = block->mr;
+ *offset = (host - block->host);
+ if (round_offset) {
+ *offset &= TARGET_PAGE_MASK;
+ }
+ *ram_addr = block->offset + *offset;
rcu_read_unlock();
- return mr;
+ return block;
+}
+
+/*
+ * Finds the named RAMBlock
+ *
+ * name: The name of RAMBlock to find
+ *
+ * Returns: RAMBlock (or NULL if not found)
+ */
+RAMBlock *qemu_ram_block_by_name(const char *name)
+{
+ RAMBlock *block;
+
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ if (!strcmp(name, block->idstr)) {
+ return block;
+ }
+ }
+
+ return NULL;
+}
+
+/* Some of the softmmu routines need to translate from a host pointer
+ (typically a TLB entry) back to a ram offset. */
+MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
+{
+ RAMBlock *block;
+ ram_addr_t offset; /* Not used */
+
+ block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
+
+ if (!block) {
+ return NULL;
+ }
+
+ return block->mr;
}
+/* Called within RCU critical section. */
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size)
{
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
- CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, current_cpu->mem_io_vaddr);
+ tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
}
}
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
{
- AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
+ CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
return sections[index & ~TARGET_PAGE_MASK].mr;
static void tcg_commit(MemoryListener *listener)
{
- CPUState *cpu;
+ CPUAddressSpace *cpuas;
+ AddressSpaceDispatch *d;
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
- /* XXX: slow ! */
- CPU_FOREACH(cpu) {
- /* FIXME: Disentangle the cpu.h circular files deps so we can
- directly get the right CPU from listener. */
- if (cpu->tcg_as_listener != listener) {
- continue;
- }
- cpu_reload_memory_map(cpu);
- }
+ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
+ cpu_reloading_memory_map();
+ /* The CPU and TLB are protected by the iothread lock.
+ * We reload the dispatch pointer now because cpu_reloading_memory_map()
+ * may have split the RCU critical section.
+ */
+ d = atomic_rcu_read(&cpuas->as->dispatch);
+ cpuas->memory_dispatch = d;
+ tlb_flush(cpuas->cpu, 1);
}
void address_space_init_dispatch(AddressSpace *as)
return release_lock;
}
-MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, int len, bool is_write)
+/* Called within RCU critical section. */
+static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf,
+ int len, hwaddr addr1,
+ hwaddr l, MemoryRegion *mr)
{
- hwaddr l;
uint8_t *ptr;
uint64_t val;
- hwaddr addr1;
- MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
- rcu_read_lock();
- while (len > 0) {
- l = len;
- mr = address_space_translate(as, addr, &addr1, &l, is_write);
-
- if (is_write) {
- if (!memory_access_is_direct(mr, is_write)) {
- release_lock |= prepare_mmio_access(mr);
- l = memory_access_size(mr, l, addr1);
- /* XXX: could force current_cpu to NULL to avoid
- potential bugs */
- switch (l) {
- case 8:
- /* 64 bit write access */
- val = ldq_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 8,
- attrs);
- break;
- case 4:
- /* 32 bit write access */
- val = ldl_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 4,
- attrs);
- break;
- case 2:
- /* 16 bit write access */
- val = lduw_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 2,
- attrs);
- break;
- case 1:
- /* 8 bit write access */
- val = ldub_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 1,
- attrs);
- break;
- default:
- abort();
- }
- } else {
- addr1 += memory_region_get_ram_addr(mr);
- /* RAM case */
- ptr = qemu_get_ram_ptr(addr1);
- memcpy(ptr, buf, l);
- invalidate_and_set_dirty(mr, addr1, l);
+ for (;;) {
+ if (!memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+ /* XXX: could force current_cpu to NULL to avoid
+ potential bugs */
+ switch (l) {
+ case 8:
+ /* 64 bit write access */
+ val = ldq_p(buf);
+ result |= memory_region_dispatch_write(mr, addr1, val, 8,
+ attrs);
+ break;
+ case 4:
+ /* 32 bit write access */
+ val = ldl_p(buf);
+ result |= memory_region_dispatch_write(mr, addr1, val, 4,
+ attrs);
+ break;
+ case 2:
+ /* 16 bit write access */
+ val = lduw_p(buf);
+ result |= memory_region_dispatch_write(mr, addr1, val, 2,
+ attrs);
+ break;
+ case 1:
+ /* 8 bit write access */
+ val = ldub_p(buf);
+ result |= memory_region_dispatch_write(mr, addr1, val, 1,
+ attrs);
+ break;
+ default:
+ abort();
}
} else {
- if (!memory_access_is_direct(mr, is_write)) {
- /* I/O case */
- release_lock |= prepare_mmio_access(mr);
- l = memory_access_size(mr, l, addr1);
- switch (l) {
- case 8:
- /* 64 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 8,
- attrs);
- stq_p(buf, val);
- break;
- case 4:
- /* 32 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 4,
- attrs);
- stl_p(buf, val);
- break;
- case 2:
- /* 16 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 2,
- attrs);
- stw_p(buf, val);
- break;
- case 1:
- /* 8 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 1,
- attrs);
- stb_p(buf, val);
- break;
- default:
- abort();
- }
- } else {
- /* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
- memcpy(buf, ptr, l);
- }
+ addr1 += memory_region_get_ram_addr(mr);
+ /* RAM case */
+ ptr = qemu_get_ram_ptr(addr1);
+ memcpy(ptr, buf, l);
+ invalidate_and_set_dirty(mr, addr1, l);
}
if (release_lock) {
len -= l;
buf += l;
addr += l;
+
+ if (!len) {
+ break;
+ }
+
+ l = len;
+ mr = address_space_translate(as, addr, &addr1, &l, true);
}
- rcu_read_unlock();
return result;
}
MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
const uint8_t *buf, int len)
{
- return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
+ hwaddr l;
+ hwaddr addr1;
+ MemoryRegion *mr;
+ MemTxResult result = MEMTX_OK;
+
+ if (len > 0) {
+ rcu_read_lock();
+ l = len;
+ mr = address_space_translate(as, addr, &addr1, &l, true);
+ result = address_space_write_continue(as, addr, attrs, buf, len,
+ addr1, l, mr);
+ rcu_read_unlock();
+ }
+
+ return result;
}
-MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, int len)
+/* Called within RCU critical section. */
+MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ int len, hwaddr addr1, hwaddr l,
+ MemoryRegion *mr)
{
- return address_space_rw(as, addr, attrs, buf, len, false);
+ uint8_t *ptr;
+ uint64_t val;
+ MemTxResult result = MEMTX_OK;
+ bool release_lock = false;
+
+ for (;;) {
+ if (!memory_access_is_direct(mr, false)) {
+ /* I/O case */
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+ switch (l) {
+ case 8:
+ /* 64 bit read access */
+ result |= memory_region_dispatch_read(mr, addr1, &val, 8,
+ attrs);
+ stq_p(buf, val);
+ break;
+ case 4:
+ /* 32 bit read access */
+ result |= memory_region_dispatch_read(mr, addr1, &val, 4,
+ attrs);
+ stl_p(buf, val);
+ break;
+ case 2:
+ /* 16 bit read access */
+ result |= memory_region_dispatch_read(mr, addr1, &val, 2,
+ attrs);
+ stw_p(buf, val);
+ break;
+ case 1:
+ /* 8 bit read access */
+ result |= memory_region_dispatch_read(mr, addr1, &val, 1,
+ attrs);
+ stb_p(buf, val);
+ break;
+ default:
+ abort();
+ }
+ } else {
+ /* RAM case */
+ ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
+ memcpy(buf, ptr, l);
+ }
+
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ release_lock = false;
+ }
+
+ len -= l;
+ buf += l;
+ addr += l;
+
+ if (!len) {
+ break;
+ }
+
+ l = len;
+ mr = address_space_translate(as, addr, &addr1, &l, false);
+ }
+
+ return result;
+}
+
+MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, int len)
+{
+ hwaddr l;
+ hwaddr addr1;
+ MemoryRegion *mr;
+ MemTxResult result = MEMTX_OK;
+
+ if (len > 0) {
+ rcu_read_lock();
+ l = len;
+ mr = address_space_translate(as, addr, &addr1, &l, false);
+ result = address_space_read_continue(as, addr, attrs, buf, len,
+ addr1, l, mr);
+ rcu_read_unlock();
+ }
+
+ return result;
}
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len, bool is_write)
+{
+ if (is_write) {
+ return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
+ } else {
+ return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
+ }
+}
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
int len, int is_write)
void cpu_exec_init_all(void)
{
qemu_mutex_init(&ram_list.mutex);
- memory_map_init();
io_mem_init();
+ memory_map_init();
qemu_mutex_init(&map_client_list_lock);
}
hwaddr l, xlat, base;
MemoryRegion *mr, *this_mr;
ram_addr_t raddr;
+ void *ptr;
if (len == 0) {
return NULL;
}
memory_region_ref(mr);
- rcu_read_unlock();
*plen = done;
- return qemu_ram_ptr_length(raddr + base, plen);
+ ptr = qemu_ram_ptr_length(raddr + base, plen);
+ rcu_read_unlock();
+
+ return ptr;
}
/* Unmaps a memory region previously mapped by address_space_map().
}
return 0;
}
+
+/*
+ * Allows code that needs to deal with migration bitmaps etc to still be built
+ * target independent.
+ */
+size_t qemu_target_page_bits(void)
+{
+ return TARGET_PAGE_BITS;
+}
+
#endif
/*