return section;
}
+/* Called from RCU critical section */
+IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
+ bool is_write)
+{
+ IOMMUTLBEntry iotlb = {0};
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+
+ for (;;) {
+ AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
+ section = address_space_lookup_region(d, addr, false);
+ addr = addr - section->offset_within_address_space
+ + section->offset_within_region;
+ mr = section->mr;
+
+ if (!mr->iommu_ops) {
+ break;
+ }
+
+ iotlb = mr->iommu_ops->translate(mr, addr, is_write);
+ if (!(iotlb.perm & (1 << is_write))) {
+ iotlb.target_as = NULL;
+ break;
+ }
+
+ addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
+ | (addr & iotlb.addr_mask));
+ as = iotlb.target_as;
+ }
+
+ return iotlb;
+}
+
/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
hwaddr *xlat, hwaddr *plen)
{
MemoryRegionSection *section;
- AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
section = address_space_translate_internal(d, addr, xlat, plen, false);
#endif
}
-#if defined(CONFIG_USER_ONLY)
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
- tb_invalidate_phys_page_range(pc, pc + 1, 0);
-}
-#else
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
-{
- MemTxAttrs attrs;
- hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- if (phys != -1) {
- tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
- phys | (pc & ~TARGET_PAGE_MASK));
- }
+ /* Flush the whole TB as this will not have race conditions
+ * even if we don't have proper locking yet.
+ * Ideally we would just invalidate the TBs for the
+ * specified PC.
+ */
+ tb_flush(cpu);
}
-#endif
#if defined(CONFIG_USER_ONLY)
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
+ qemu_log_lock();
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
qemu_log_flush();
+ qemu_log_unlock();
qemu_log_close();
}
va_end(ap2);
}
#ifdef __linux__
+static int64_t get_file_size(int fd)
+{
+ int64_t size = lseek(fd, 0, SEEK_END);
+ if (size < 0) {
+ return -errno;
+ }
+ return size;
+}
+
static void *file_ram_alloc(RAMBlock *block,
ram_addr_t memory,
const char *path,
char *c;
void *area = MAP_FAILED;
int fd = -1;
+ int64_t file_size;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
}
#endif
+ file_size = get_file_size(fd);
+
if (memory < block->page_size) {
error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
"or larger than page size 0x%zx",
goto error;
}
+ if (file_size > 0 && file_size < memory) {
+ error_setg(errp, "backing store %s size 0x%" PRIx64
+ " does not match 'size' option 0x" RAM_ADDR_FMT,
+ path, file_size, memory);
+ goto error;
+ }
+
memory = ROUND_UP(memory, block->page_size);
/*
* hosts, so don't bother bailing out on errors.
* If anything goes wrong with it under other filesystems,
* mmap will fail.
+ *
+ * Do not truncate the non-empty backend file to avoid corrupting
+ * the existing data in the file. Disabling shrinking is not
+ * enough. For example, the current vNVDIMM implementation stores
+ * the guest NVDIMM labels at the end of the backend file. If the
+ * backend file is later extended, QEMU will not be able to find
+ * those labels. Therefore, extending the non-empty backend file
+ * is disabled as well.
*/
- if (ftruncate(fd, memory)) {
+ if (!file_size && ftruncate(fd, memory)) {
perror("ftruncate");
}
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size)
{
+ bool locked = false;
+
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
+ locked = true;
+ tb_lock();
tb_invalidate_phys_page_fast(ram_addr, size);
}
switch (size) {
default:
abort();
}
+
+ if (locked) {
+ tb_unlock();
+ }
+
/* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
continue;
}
cpu->watchpoint_hit = wp;
+
+ /* The tb_lock will be reset when cpu_loop_exit or
+ * cpu_loop_exit_noexc longjmp back into the cpu_exec
+ * main loop.
+ */
+ tb_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
* may have split the RCU critical section.
*/
d = atomic_rcu_read(&cpuas->as->dispatch);
- cpuas->memory_dispatch = d;
+ atomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu, 1);
}
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
+ tb_lock();
tb_invalidate_phys_range(addr, addr + length);
+ tb_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
return true;
}
+static hwaddr
+address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
+ MemoryRegion *mr, hwaddr base, hwaddr len,
+ bool is_write)
+{
+ hwaddr done = 0;
+ hwaddr xlat;
+ MemoryRegion *this_mr;
+
+ for (;;) {
+ target_len -= len;
+ addr += len;
+ done += len;
+ if (target_len == 0) {
+ return done;
+ }
+
+ len = target_len;
+ this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
+ if (this_mr != mr || xlat != base + done) {
+ return done;
+ }
+ }
+}
+
/* Map a physical memory region into a host virtual address.
* May map a subset of the requested range, given by and returned in *plen.
* May return NULL if resources needed to perform the mapping are exhausted.
bool is_write)
{
hwaddr len = *plen;
- hwaddr done = 0;
- hwaddr l, xlat, base;
- MemoryRegion *mr, *this_mr;
+ hwaddr l, xlat;
+ MemoryRegion *mr;
void *ptr;
if (len == 0) {
return bounce.buffer;
}
- base = xlat;
-
- for (;;) {
- len -= l;
- addr += l;
- done += l;
- if (len == 0) {
- break;
- }
-
- l = len;
- this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
- if (this_mr != mr || xlat != base + done) {
- break;
- }
- }
memory_region_ref(mr);
- *plen = done;
- ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
+ *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
+ ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen);
rcu_read_unlock();
return ptr;
return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
}
-/* warning: addr must be aligned */
-static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs,
- MemTxResult *result,
- enum device_endian endian)
-{
- uint8_t *ptr;
- uint64_t val;
- MemoryRegion *mr;
- hwaddr l = 4;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- rcu_read_lock();
- mr = address_space_translate(as, addr, &addr1, &l, false);
- if (l < 4 || !memory_access_is_direct(mr, false)) {
- release_lock |= prepare_mmio_access(mr);
+#define ARG1_DECL AddressSpace *as
+#define ARG1 as
+#define SUFFIX
+#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
+#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
+#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
+#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
+#define RCU_READ_LOCK(...) rcu_read_lock()
+#define RCU_READ_UNLOCK(...) rcu_read_unlock()
+#include "memory_ldst.inc.c"
- /* I/O case */
- r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == DEVICE_LITTLE_ENDIAN) {
- val = bswap32(val);
- }
-#else
- if (endian == DEVICE_BIG_ENDIAN) {
- val = bswap32(val);
- }
-#endif
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- val = ldl_le_p(ptr);
- break;
- case DEVICE_BIG_ENDIAN:
- val = ldl_be_p(ptr);
- break;
- default:
- val = ldl_p(ptr);
- break;
- }
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- qemu_mutex_unlock_iothread();
- }
- rcu_read_unlock();
- return val;
-}
-
-uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_ldl_internal(as, addr, attrs, result,
- DEVICE_NATIVE_ENDIAN);
-}
-
-uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_ldl_internal(as, addr, attrs, result,
- DEVICE_LITTLE_ENDIAN);
-}
-
-uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
+int64_t address_space_cache_init(MemoryRegionCache *cache,
+ AddressSpace *as,
+ hwaddr addr,
+ hwaddr len,
+ bool is_write)
{
- return address_space_ldl_internal(as, addr, attrs, result,
- DEVICE_BIG_ENDIAN);
-}
-
-uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* warning: addr must be aligned */
-static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs,
- MemTxResult *result,
- enum device_endian endian)
-{
- uint8_t *ptr;
- uint64_t val;
- MemoryRegion *mr;
- hwaddr l = 8;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- rcu_read_lock();
- mr = address_space_translate(as, addr, &addr1, &l,
- false);
- if (l < 8 || !memory_access_is_direct(mr, false)) {
- release_lock |= prepare_mmio_access(mr);
-
- /* I/O case */
- r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == DEVICE_LITTLE_ENDIAN) {
- val = bswap64(val);
- }
-#else
- if (endian == DEVICE_BIG_ENDIAN) {
- val = bswap64(val);
- }
-#endif
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- val = ldq_le_p(ptr);
- break;
- case DEVICE_BIG_ENDIAN:
- val = ldq_be_p(ptr);
- break;
- default:
- val = ldq_p(ptr);
- break;
- }
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- qemu_mutex_unlock_iothread();
- }
- rcu_read_unlock();
- return val;
-}
-
-uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_ldq_internal(as, addr, attrs, result,
- DEVICE_NATIVE_ENDIAN);
-}
-
-uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_ldq_internal(as, addr, attrs, result,
- DEVICE_LITTLE_ENDIAN);
-}
-
-uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_ldq_internal(as, addr, attrs, result,
- DEVICE_BIG_ENDIAN);
-}
-
-uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* XXX: optimize */
-uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- uint8_t val;
- MemTxResult r;
-
- r = address_space_rw(as, addr, attrs, &val, 1, 0);
- if (result) {
- *result = r;
- }
- return val;
-}
-
-uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* warning: addr must be aligned */
-static inline uint32_t address_space_lduw_internal(AddressSpace *as,
- hwaddr addr,
- MemTxAttrs attrs,
- MemTxResult *result,
- enum device_endian endian)
-{
- uint8_t *ptr;
- uint64_t val;
- MemoryRegion *mr;
- hwaddr l = 2;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- rcu_read_lock();
- mr = address_space_translate(as, addr, &addr1, &l,
- false);
- if (l < 2 || !memory_access_is_direct(mr, false)) {
- release_lock |= prepare_mmio_access(mr);
-
- /* I/O case */
- r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == DEVICE_LITTLE_ENDIAN) {
- val = bswap16(val);
- }
-#else
- if (endian == DEVICE_BIG_ENDIAN) {
- val = bswap16(val);
- }
-#endif
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- val = lduw_le_p(ptr);
- break;
- case DEVICE_BIG_ENDIAN:
- val = lduw_be_p(ptr);
- break;
- default:
- val = lduw_p(ptr);
- break;
- }
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- qemu_mutex_unlock_iothread();
- }
- rcu_read_unlock();
- return val;
-}
-
-uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_lduw_internal(as, addr, attrs, result,
- DEVICE_NATIVE_ENDIAN);
-}
-
-uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_lduw_internal(as, addr, attrs, result,
- DEVICE_LITTLE_ENDIAN);
-}
-
-uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result)
-{
- return address_space_lduw_internal(as, addr, attrs, result,
- DEVICE_BIG_ENDIAN);
-}
-
-uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
-{
- return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* warning: addr must be aligned. The ram page is not masked as dirty
- and the code inside is not invalidated. It is useful if the dirty
- bits are used to track modified PTEs */
-void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- uint8_t *ptr;
- MemoryRegion *mr;
- hwaddr l = 4;
- hwaddr addr1;
- MemTxResult r;
- uint8_t dirty_log_mask;
- bool release_lock = false;
-
- rcu_read_lock();
- mr = address_space_translate(as, addr, &addr1, &l,
- true);
- if (l < 4 || !memory_access_is_direct(mr, true)) {
- release_lock |= prepare_mmio_access(mr);
-
- r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
- } else {
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- stl_p(ptr, val);
-
- dirty_log_mask = memory_region_get_dirty_log_mask(mr);
- dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
- cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
- 4, dirty_log_mask);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- qemu_mutex_unlock_iothread();
- }
- rcu_read_unlock();
-}
-
-void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* warning: addr must be aligned */
-static inline void address_space_stl_internal(AddressSpace *as,
- hwaddr addr, uint32_t val,
- MemTxAttrs attrs,
- MemTxResult *result,
- enum device_endian endian)
-{
- uint8_t *ptr;
- MemoryRegion *mr;
- hwaddr l = 4;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- rcu_read_lock();
- mr = address_space_translate(as, addr, &addr1, &l,
- true);
- if (l < 4 || !memory_access_is_direct(mr, true)) {
- release_lock |= prepare_mmio_access(mr);
-
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == DEVICE_LITTLE_ENDIAN) {
- val = bswap32(val);
- }
-#else
- if (endian == DEVICE_BIG_ENDIAN) {
- val = bswap32(val);
- }
-#endif
- r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- stl_le_p(ptr, val);
- break;
- case DEVICE_BIG_ENDIAN:
- stl_be_p(ptr, val);
- break;
- default:
- stl_p(ptr, val);
- break;
- }
- invalidate_and_set_dirty(mr, addr1, 4);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- qemu_mutex_unlock_iothread();
- }
- rcu_read_unlock();
-}
-
-void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- address_space_stl_internal(as, addr, val, attrs, result,
- DEVICE_NATIVE_ENDIAN);
-}
-
-void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- address_space_stl_internal(as, addr, val, attrs, result,
- DEVICE_LITTLE_ENDIAN);
-}
-
-void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- address_space_stl_internal(as, addr, val, attrs, result,
- DEVICE_BIG_ENDIAN);
-}
-
-void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* XXX: optimize */
-void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- uint8_t v = val;
- MemTxResult r;
-
- r = address_space_rw(as, addr, attrs, &v, 1, 1);
- if (result) {
- *result = r;
- }
-}
-
-void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-/* warning: addr must be aligned */
-static inline void address_space_stw_internal(AddressSpace *as,
- hwaddr addr, uint32_t val,
- MemTxAttrs attrs,
- MemTxResult *result,
- enum device_endian endian)
-{
- uint8_t *ptr;
+ hwaddr l, xlat;
MemoryRegion *mr;
- hwaddr l = 2;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
+ void *ptr;
- rcu_read_lock();
- mr = address_space_translate(as, addr, &addr1, &l, true);
- if (l < 2 || !memory_access_is_direct(mr, true)) {
- release_lock |= prepare_mmio_access(mr);
+ assert(len > 0);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == DEVICE_LITTLE_ENDIAN) {
- val = bswap16(val);
- }
-#else
- if (endian == DEVICE_BIG_ENDIAN) {
- val = bswap16(val);
- }
-#endif
- r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- stw_le_p(ptr, val);
- break;
- case DEVICE_BIG_ENDIAN:
- stw_be_p(ptr, val);
- break;
- default:
- stw_p(ptr, val);
- break;
- }
- invalidate_and_set_dirty(mr, addr1, 2);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- qemu_mutex_unlock_iothread();
+ l = len;
+ mr = address_space_translate(as, addr, &xlat, &l, is_write);
+ if (!memory_access_is_direct(mr, is_write)) {
+ return -EINVAL;
}
- rcu_read_unlock();
-}
-
-void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- address_space_stw_internal(as, addr, val, attrs, result,
- DEVICE_NATIVE_ENDIAN);
-}
-void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- address_space_stw_internal(as, addr, val, attrs, result,
- DEVICE_LITTLE_ENDIAN);
-}
+ l = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
+ ptr = qemu_ram_ptr_length(mr->ram_block, xlat, &l);
-void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- address_space_stw_internal(as, addr, val, attrs, result,
- DEVICE_BIG_ENDIAN);
-}
+ cache->xlat = xlat;
+ cache->is_write = is_write;
+ cache->mr = mr;
+ cache->ptr = ptr;
+ cache->len = l;
+ memory_region_ref(cache->mr);
-void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
-{
- address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ return l;
}
-void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
+void address_space_cache_invalidate(MemoryRegionCache *cache,
+ hwaddr addr,
+ hwaddr access_len)
{
- address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ assert(cache->is_write);
+ invalidate_and_set_dirty(cache->mr, addr + cache->xlat, access_len);
}
-/* XXX: optimize */
-void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result)
+void address_space_cache_destroy(MemoryRegionCache *cache)
{
- MemTxResult r;
- val = tswap64(val);
- r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
- if (result) {
- *result = r;
+ if (!cache->mr) {
+ return;
}
-}
-void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- MemTxResult r;
- val = cpu_to_le64(val);
- r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
- if (result) {
- *result = r;
- }
-}
-void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result)
-{
- MemTxResult r;
- val = cpu_to_be64(val);
- r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
- if (result) {
- *result = r;
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(cache->ptr);
}
+ memory_region_unref(cache->mr);
}
-void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
-{
- address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
-{
- address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
-{
- address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
-}
+/* Called from RCU critical section. This function has the same
+ * semantics as address_space_translate, but it only works on a
+ * predefined range of a MemoryRegion that was mapped with
+ * address_space_cache_init.
+ */
+static inline MemoryRegion *address_space_translate_cached(
+ MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
+ hwaddr *plen, bool is_write)
+{
+ assert(addr < cache->len && *plen <= cache->len - addr);
+ *xlat = addr + cache->xlat;
+ return cache->mr;
+}
+
+#define ARG1_DECL MemoryRegionCache *cache
+#define ARG1 cache
+#define SUFFIX _cached
+#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
+#define IS_DIRECT(mr, is_write) true
+#define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat))
+#define INVALIDATE(mr, ofs, len) ((void)0)
+#define RCU_READ_LOCK() ((void)0)
+#define RCU_READ_UNLOCK() ((void)0)
+#include "memory_ldst.inc.c"
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,