* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#ifndef _WIN32
-#include <sys/mman.h>
#endif
-#include "qemu-common.h"
+#include "qemu/cutils.h"
#include "cpu.h"
+#include "exec/exec-all.h"
#include "tcg.h"
-#include "hw/hw.h"
+#include "hw/qdev-core.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/boards.h"
+#include "hw/xen/xen.h"
#endif
-#include "hw/qdev.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
-#include "hw/xen/xen.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
+#if defined(CONFIG_USER_ONLY)
+#include "qemu.h"
+#else /* !CONFIG_USER_ONLY */
+#include "hw/hw.h"
#include "exec/memory.h"
+#include "exec/ioport.h"
#include "sysemu/dma.h"
#include "exec/address-spaces.h"
-#if defined(CONFIG_USER_ONLY)
-#include <qemu.h>
-#else /* !CONFIG_USER_ONLY */
#include "sysemu/xen-mapcache.h"
#include "trace.h"
#endif
#include "exec/ram_addr.h"
#include "exec/log.h"
+#include "migration/vmstate.h"
+
#include "qemu/range.h"
#ifndef _WIN32
#include "qemu/mmap-alloc.h"
struct AddressSpaceDispatch {
struct rcu_head rcu;
+ MemoryRegionSection *mru_section;
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
{
+ static unsigned alloc_hint = 16;
if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
- map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
+ map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
+ alloc_hint = map->nodes_nb_alloc;
}
}
}
}
+static inline bool section_covers_addr(const MemoryRegionSection *section,
+ hwaddr addr)
+{
+ /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
+ * the section must cover the entire address space.
+ */
+ return section->size.hi ||
+ range_covers_byte(section->offset_within_address_space,
+ section->size.lo, addr);
+}
+
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Node *nodes, MemoryRegionSection *sections)
{
lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
}
- if (sections[lp.ptr].size.hi ||
- range_covers_byte(sections[lp.ptr].offset_within_address_space,
- sections[lp.ptr].size.lo, addr)) {
+ if (section_covers_addr(§ions[lp.ptr], addr)) {
return §ions[lp.ptr];
} else {
return §ions[PHYS_SECTION_UNASSIGNED];
hwaddr addr,
bool resolve_subpage)
{
- MemoryRegionSection *section;
+ MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage;
+ bool update;
- section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
+ if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
+ section_covers_addr(section, addr)) {
+ update = false;
+ } else {
+ section = phys_page_find(d->phys_map, addr, d->map.nodes,
+ d->map.sections);
+ update = true;
+ }
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
+ if (update) {
+ atomic_set(&d->mru_section, section);
+ }
return section;
}
}
#endif
-#ifndef CONFIG_USER_ONLY
-static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
-
-static int cpu_get_free_index(Error **errp)
-{
- int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
-
- if (cpu >= MAX_CPUMASK_BITS) {
- error_setg(errp, "Trying to use more CPUs than max of %d",
- MAX_CPUMASK_BITS);
- return -1;
- }
-
- bitmap_set(cpu_index_map, cpu, 1);
- return cpu;
-}
-
-void cpu_exec_exit(CPUState *cpu)
-{
- if (cpu->cpu_index == -1) {
- /* cpu_index was never allocated by this @cpu or was already freed. */
- return;
- }
-
- bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
- cpu->cpu_index = -1;
-}
-#else
-
-static int cpu_get_free_index(Error **errp)
+static int cpu_get_free_index(void)
{
CPUState *some_cpu;
int cpu_index = 0;
void cpu_exec_exit(CPUState *cpu)
{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ cpu_list_lock();
+ if (cpu->node.tqe_prev == NULL) {
+ /* there is nothing to undo since cpu_exec_init() hasn't been called */
+ cpu_list_unlock();
+ return;
+ }
+
+ QTAILQ_REMOVE(&cpus, cpu, node);
+ cpu->node.tqe_prev = NULL;
+ cpu->cpu_index = UNASSIGNED_CPU_INDEX;
+ cpu_list_unlock();
+
+ if (cc->vmsd != NULL) {
+ vmstate_unregister(NULL, cc->vmsd, cpu);
+ }
+ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
+ vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
+ }
}
-#endif
void cpu_exec_init(CPUState *cpu, Error **errp)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- int cpu_index;
- Error *local_err = NULL;
+ CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
+ Error *local_err ATTRIBUTE_UNUSED = NULL;
cpu->as = NULL;
cpu->num_ases = 0;
object_ref(OBJECT(cpu->memory));
#endif
-#if defined(CONFIG_USER_ONLY)
cpu_list_lock();
-#endif
- cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
- if (local_err) {
- error_propagate(errp, local_err);
-#if defined(CONFIG_USER_ONLY)
- cpu_list_unlock();
-#endif
- return;
+ if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
+ cpu->cpu_index = cpu_get_free_index();
+ assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
}
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
-#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
-#endif
+
+#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
- vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
+ vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
if (cc->vmsd != NULL) {
- vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
+ vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
}
+#endif
}
#if defined(CONFIG_USER_ONLY)
if (memory_region_is_ram(section->mr)) {
/* Normal RAM. */
- iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
- + xlat;
+ iotlb = memory_region_get_ram_addr(section->mr) + xlat;
if (!section->readonly) {
iotlb |= PHYS_SECTION_NOTDIRTY;
} else {
}
#ifdef __linux__
-
-#include <sys/vfs.h>
-
-#define HUGETLBFS_MAGIC 0x958458f6
-
-static long gethugepagesize(const char *path, Error **errp)
-{
- struct statfs fs;
- int ret;
-
- do {
- ret = statfs(path, &fs);
- } while (ret != 0 && errno == EINTR);
-
- if (ret != 0) {
- error_setg_errno(errp, errno, "failed to get page size of file %s",
- path);
- return 0;
- }
-
- return fs.f_bsize;
-}
-
static void *file_ram_alloc(RAMBlock *block,
ram_addr_t memory,
const char *path,
Error **errp)
{
- struct stat st;
+ bool unlink_on_error = false;
char *filename;
char *sanitized_name;
char *c;
- void *area;
- int fd;
- uint64_t hpagesize;
- Error *local_err = NULL;
-
- hpagesize = gethugepagesize(path, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- goto error;
- }
- block->mr->align = hpagesize;
-
- if (memory < hpagesize) {
- error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than huge page size 0x%" PRIx64,
- memory, hpagesize);
- goto error;
- }
+ void *area = MAP_FAILED;
+ int fd = -1;
+ int64_t page_size;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
"host lacks kvm mmu notifiers, -mem-path unsupported");
- goto error;
+ return NULL;
}
- if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
- /* Make name safe to use with mkstemp by replacing '/' with '_'. */
- sanitized_name = g_strdup(memory_region_name(block->mr));
- for (c = sanitized_name; *c != '\0'; c++) {
- if (*c == '/') {
- *c = '_';
- }
+ for (;;) {
+ fd = open(path, O_RDWR);
+ if (fd >= 0) {
+ /* @path names an existing file, use it */
+ break;
}
+ if (errno == ENOENT) {
+ /* @path names a file that doesn't exist, create it */
+ fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
+ if (fd >= 0) {
+ unlink_on_error = true;
+ break;
+ }
+ } else if (errno == EISDIR) {
+ /* @path names a directory, create a file there */
+ /* Make name safe to use with mkstemp by replacing '/' with '_'. */
+ sanitized_name = g_strdup(memory_region_name(block->mr));
+ for (c = sanitized_name; *c != '\0'; c++) {
+ if (*c == '/') {
+ *c = '_';
+ }
+ }
- filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
- sanitized_name);
- g_free(sanitized_name);
+ filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
+ sanitized_name);
+ g_free(sanitized_name);
- fd = mkstemp(filename);
- if (fd >= 0) {
- unlink(filename);
+ fd = mkstemp(filename);
+ if (fd >= 0) {
+ unlink(filename);
+ g_free(filename);
+ break;
+ }
+ g_free(filename);
}
- g_free(filename);
- } else {
- fd = open(path, O_RDWR | O_CREAT, 0644);
+ if (errno != EEXIST && errno != EINTR) {
+ error_setg_errno(errp, errno,
+ "can't open backing store %s for guest RAM",
+ path);
+ goto error;
+ }
+ /*
+ * Try again on EINTR and EEXIST. The latter happens when
+ * something else creates the file between our two open().
+ */
}
- if (fd < 0) {
- error_setg_errno(errp, errno,
- "unable to create backing store for hugepages");
+ page_size = qemu_fd_getpagesize(fd);
+ block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
+
+ if (memory < page_size) {
+ error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
+ "or larger than page size 0x%" PRIx64,
+ memory, page_size);
goto error;
}
- memory = ROUND_UP(memory, hpagesize);
+ memory = ROUND_UP(memory, page_size);
/*
* ftruncate is not supported by hugetlbfs in older
perror("ftruncate");
}
- area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
+ area = qemu_ram_mmap(fd, memory, block->mr->align,
+ block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
- "unable to map backing store for hugepages");
- close(fd);
+ "unable to map backing store for guest RAM");
goto error;
}
if (mem_prealloc) {
- os_mem_prealloc(fd, area, memory);
+ os_mem_prealloc(fd, area, memory, errp);
+ if (errp && *errp) {
+ goto error;
+ }
}
block->fd = fd;
return area;
error:
+ if (area != MAP_FAILED) {
+ qemu_ram_munmap(area, memory);
+ }
+ if (unlink_on_error) {
+ unlink(path);
+ }
+ if (fd != -1) {
+ close(fd);
+ }
return NULL;
}
#endif
}
}
-/* Called within an RCU critical section, or while the ramlist lock
- * is held.
- */
-static RAMBlock *find_ram_block(ram_addr_t addr)
-{
- RAMBlock *block;
-
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- if (block->offset == addr) {
- return block;
- }
- }
-
- return NULL;
-}
-
const char *qemu_ram_get_idstr(RAMBlock *rb)
{
return rb->idstr;
}
/* Called with iothread lock held. */
-void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
+void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
{
- RAMBlock *new_block, *block;
+ RAMBlock *block;
- rcu_read_lock();
- new_block = find_ram_block(addr);
assert(new_block);
assert(!new_block->idstr[0]);
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
+ rcu_read_lock();
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
+ if (block != new_block &&
+ !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
new_block->idstr);
abort();
}
/* Called with iothread lock held. */
-void qemu_ram_unset_idstr(ram_addr_t addr)
+void qemu_ram_unset_idstr(RAMBlock *block)
{
- RAMBlock *block;
-
/* FIXME: arch_init.c assumes that this is not called throughout
* migration. Ignore the problem since hot-unplug during migration
* does not work anyway.
*/
-
- rcu_read_lock();
- block = find_ram_block(addr);
if (block) {
memset(block->idstr, 0, sizeof(block->idstr));
}
- rcu_read_unlock();
}
static int memory_try_enable_merging(void *addr, size_t len)
* resize callback to update device state and/or add assertions to detect
* misuse, if necessary.
*/
-int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
+int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
{
- RAMBlock *block = find_ram_block(base);
-
assert(block);
newsize = HOST_PAGE_ALIGN(newsize);
if (err) {
error_propagate(errp, err);
qemu_mutex_unlock_ramlist();
+ return;
}
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
"cannot set up guest memory '%s'",
memory_region_name(new_block->mr));
qemu_mutex_unlock_ramlist();
+ return;
}
memory_try_enable_merging(new_block->host, new_block->max_length);
}
g_free(block);
}
-void qemu_ram_free(ram_addr_t addr)
+void qemu_ram_free(RAMBlock *block)
{
- RAMBlock *block;
+ if (!block) {
+ return;
+ }
qemu_mutex_lock_ramlist();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- if (addr == block->offset) {
- QLIST_REMOVE_RCU(block, next);
- ram_list.mru_block = NULL;
- /* Write list before version */
- smp_wmb();
- ram_list.version++;
- call_rcu(block, reclaim_ramblock, rcu);
- break;
- }
- }
+ QLIST_REMOVE_RCU(block, next);
+ ram_list.mru_block = NULL;
+ /* Write list before version */
+ smp_wmb();
+ ram_list.version++;
+ call_rcu(block, reclaim_ramblock, rcu);
qemu_mutex_unlock_ramlist();
}
}
#endif /* !_WIN32 */
-int qemu_get_ram_fd(ram_addr_t addr)
-{
- RAMBlock *block;
- int fd;
-
- rcu_read_lock();
- block = qemu_get_ram_block(addr);
- fd = block->fd;
- rcu_read_unlock();
- return fd;
-}
-
-void qemu_set_ram_fd(ram_addr_t addr, int fd)
-{
- RAMBlock *block;
-
- rcu_read_lock();
- block = qemu_get_ram_block(addr);
- block->fd = fd;
- rcu_read_unlock();
-}
-
-void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
-{
- RAMBlock *block;
- void *ptr;
-
- rcu_read_lock();
- block = qemu_get_ram_block(addr);
- ptr = ramblock_ptr(block, 0);
- rcu_read_unlock();
- return ptr;
-}
-
/* Return a host pointer to ram allocated with qemu_ram_alloc.
* This should not be used for general purpose DMA. Use address_space_map
* or address_space_rw instead. For local memory (e.g. video ram) that the
*
* Called within RCU critical section.
*/
-void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
+void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
{
RAMBlock *block = ram_block;
if (block == NULL) {
block = qemu_get_ram_block(addr);
+ addr -= block->offset;
}
if (xen_enabled() && block->host == NULL) {
block->host = xen_map_cache(block->offset, block->max_length, 1);
}
- return ramblock_ptr(block, addr - block->offset);
+ return ramblock_ptr(block, addr);
}
-/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
+/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
* but takes a size argument.
*
* Called within RCU critical section.
hwaddr *size)
{
RAMBlock *block = ram_block;
- ram_addr_t offset_inside_block;
if (*size == 0) {
return NULL;
}
if (block == NULL) {
block = qemu_get_ram_block(addr);
+ addr -= block->offset;
}
- offset_inside_block = addr - block->offset;
- *size = MIN(*size, block->max_length - offset_inside_block);
+ *size = MIN(*size, block->max_length - addr);
if (xen_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
block->host = xen_map_cache(block->offset, block->max_length, 1);
}
- return ramblock_ptr(block, offset_inside_block);
+ return ramblock_ptr(block, addr);
}
/*
* ram_addr_t.
*/
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
- ram_addr_t *ram_addr,
ram_addr_t *offset)
{
RAMBlock *block;
uint8_t *host = ptr;
if (xen_enabled()) {
+ ram_addr_t ram_addr;
rcu_read_lock();
- *ram_addr = xen_ram_addr_from_mapcache(ptr);
- block = qemu_get_ram_block(*ram_addr);
+ ram_addr = xen_ram_addr_from_mapcache(ptr);
+ block = qemu_get_ram_block(ram_addr);
if (block) {
- *offset = (host - block->host);
+ *offset = ram_addr - block->offset;
}
rcu_read_unlock();
return block;
if (round_offset) {
*offset &= TARGET_PAGE_MASK;
}
- *ram_addr = block->offset + *offset;
rcu_read_unlock();
return block;
}
/* Some of the softmmu routines need to translate from a host pointer
(typically a TLB entry) back to a ram offset. */
-MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
+ram_addr_t qemu_ram_addr_from_host(void *ptr)
{
RAMBlock *block;
- ram_addr_t offset; /* Not used */
-
- block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
+ ram_addr_t offset;
+ block = qemu_ram_block_from_host(ptr, false, &offset);
if (!block) {
- return NULL;
+ return RAM_ADDR_INVALID;
}
- return block->mr;
+ return block->offset + offset;
}
/* Called within RCU critical section. */
}
switch (size) {
case 1:
- stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
+ stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
break;
case 2:
- stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
+ stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
break;
case 4:
- stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
+ stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
break;
default:
abort();
target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
- int cpu_flags;
+ uint32_t cpu_flags;
if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
} else {
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
- cpu_resume_from_signal(cpu, NULL);
+ cpu_loop_exit_noexc(cpu);
}
}
} else {
hwaddr length)
{
uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
+ addr += memory_region_get_ram_addr(mr);
+
/* No early return if dirty_log_mask is or becomes 0, because
* cpu_physical_memory_set_dirty_range will still call
* xen_modified_memory.
abort();
}
} else {
- addr1 += memory_region_get_ram_addr(mr);
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
memcpy(ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_block,
- memory_region_get_ram_addr(mr) + addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
memcpy(buf, ptr, l);
}
memory_region_is_romd(mr))) {
l = memory_access_size(mr, l, addr1);
} else {
- addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
memcpy(ptr, buf, l);
hwaddr done = 0;
hwaddr l, xlat, base;
MemoryRegion *mr, *this_mr;
- ram_addr_t raddr;
void *ptr;
if (len == 0) {
}
base = xlat;
- raddr = memory_region_get_ram_addr(mr);
for (;;) {
len -= l;
memory_region_ref(mr);
*plen = done;
- ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
+ ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
rcu_read_unlock();
return ptr;
MemoryRegion *mr;
ram_addr_t addr1;
- mr = qemu_ram_addr_from_host(buffer, &addr1);
+ mr = memory_region_from_host(buffer, &addr1);
assert(mr != NULL);
if (is_write) {
invalidate_and_set_dirty(mr, addr1, access_len);
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_block,
- (memory_region_get_ram_addr(mr)
- & TARGET_PAGE_MASK)
- + addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
val = ldl_le_p(ptr);
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_block,
- (memory_region_get_ram_addr(mr)
- & TARGET_PAGE_MASK)
- + addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
val = ldq_le_p(ptr);
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_block,
- (memory_region_get_ram_addr(mr)
- & TARGET_PAGE_MASK)
- + addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
val = lduw_le_p(ptr);
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
- addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
stl_p(ptr, val);
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
- cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
+ cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
+ 4, dirty_log_mask);
r = MEMTX_OK;
}
if (result) {
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
/* RAM case */
- addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stl_le_p(ptr, val);
r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
} else {
/* RAM case */
- addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
+ ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stw_le_p(ptr, val);