#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "sysemu/kvm.h"
-#include "sysemu/sysemu.h"
+#include "sysemu/runstate.h"
#include "sysemu/tcg.h"
#include "sysemu/accel.h"
#include "hw/boards.h"
bool romd_mode;
bool readonly;
bool nonvolatile;
- int has_coalesced_range;
};
#define FOR_EACH_FLAT_RANGE(var, view) \
#endif
}
-static bool memory_region_wrong_endianness(MemoryRegion *mr)
+static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
{
-#ifdef TARGET_WORDS_BIGENDIAN
- return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
-#else
- return mr->ops->endianness == DEVICE_BIG_ENDIAN;
-#endif
-}
-
-static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
-{
- if (memory_region_wrong_endianness(mr)) {
- switch (size) {
- case 1:
+ if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
+ switch (op & MO_SIZE) {
+ case MO_8:
break;
- case 2:
+ case MO_16:
*data = bswap16(*data);
break;
- case 4:
+ case MO_32:
*data = bswap32(*data);
break;
- case 8:
+ case MO_64:
*data = bswap64(*data);
break;
default:
- abort();
+ g_assert_not_reached();
}
}
}
tmp = mr->ops->read(mr->opaque, addr, size);
if (mr->subpage) {
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
if (mr->subpage) {
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
fr.romd_mode = mr->romd_mode;
fr.readonly = readonly;
fr.nonvolatile = nonvolatile;
- fr.has_coalesced_range = 0;
/* Render the region itself into any gaps left by the current view. */
for (i = 0; i < view->nr && int128_nz(remain); ++i) {
{
FlatView *view;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
do {
view = address_space_to_flatview(as);
/* If somebody has replaced as->current_map concurrently,
* flatview_ref returns false.
*/
} while (!flatview_ref(view));
- rcu_read_unlock();
return view;
}
flatview_unref(view);
}
-static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
+/*
+ * Notify the memory listeners about the coalesced IO change events of
+ * range `cmr'. Only the part that has intersection of the specified
+ * FlatRange will be sent.
+ */
+static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
+ CoalescedMemoryRange *cmr, bool add)
{
- if (!fr->has_coalesced_range) {
+ AddrRange tmp;
+
+ tmp = addrrange_shift(cmr->addr,
+ int128_sub(fr->addr.start,
+ int128_make64(fr->offset_in_region)));
+ if (!addrrange_intersects(tmp, fr->addr)) {
return;
}
+ tmp = addrrange_intersection(tmp, fr->addr);
- if (--fr->has_coalesced_range > 0) {
- return;
+ if (add) {
+ MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
+ } else {
+ MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
}
+}
+
+static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
+{
+ CoalescedMemoryRange *cmr;
- MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
- int128_get64(fr->addr.start),
- int128_get64(fr->addr.size));
+ QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
+ flat_range_coalesced_io_notify(fr, as, cmr, false);
+ }
}
static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
{
MemoryRegion *mr = fr->mr;
CoalescedMemoryRange *cmr;
- AddrRange tmp;
if (QTAILQ_EMPTY(&mr->coalesced)) {
return;
}
- if (fr->has_coalesced_range++) {
- return;
- }
-
QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
- tmp = addrrange_shift(cmr->addr,
- int128_sub(fr->addr.start,
- int128_make64(fr->offset_in_region)));
- if (!addrrange_intersects(tmp, fr->addr)) {
- continue;
- }
- tmp = addrrange_intersection(tmp, fr->addr);
- MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
- int128_get64(tmp.start),
- int128_get64(tmp.size));
+ flat_range_coalesced_io_notify(fr, as, cmr, true);
}
}
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
hwaddr addr,
uint64_t *pval,
- unsigned size,
+ MemOp op,
MemTxAttrs attrs)
{
+ unsigned size = memop_size(op);
MemTxResult r;
if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
}
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
- adjust_endianness(mr, pval, size);
+ adjust_endianness(mr, pval, op);
return r;
}
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
hwaddr addr,
uint64_t data,
- unsigned size,
+ MemOp op,
MemTxAttrs attrs)
{
+ unsigned size = memop_size(op);
+
if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
unassigned_mem_write(mr, addr, data, size);
return MEMTX_DECODE_ERROR;
}
- adjust_endianness(mr, &data, size);
+ adjust_endianness(mr, &data, op);
if ((!kvm_eventfds_enabled()) &&
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
return memory_region_get_dirty_log_mask(mr) & (1 << client);
}
-static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
+static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
+ Error **errp)
{
IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
IOMMUNotifier *iommu_notifier;
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+ int ret = 0;
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
flags |= iommu_notifier->notifier_flags;
}
if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
- imrc->notify_flag_changed(iommu_mr,
- iommu_mr->iommu_notify_flags,
- flags);
+ ret = imrc->notify_flag_changed(iommu_mr,
+ iommu_mr->iommu_notify_flags,
+ flags, errp);
}
- iommu_mr->iommu_notify_flags = flags;
+ if (!ret) {
+ iommu_mr->iommu_notify_flags = flags;
+ }
+ return ret;
}
-void memory_region_register_iommu_notifier(MemoryRegion *mr,
- IOMMUNotifier *n)
+int memory_region_register_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n, Error **errp)
{
IOMMUMemoryRegion *iommu_mr;
+ int ret;
if (mr->alias) {
- memory_region_register_iommu_notifier(mr->alias, n);
- return;
+ return memory_region_register_iommu_notifier(mr->alias, n, errp);
}
/* We need to register for at least one bitfield */
n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
- memory_region_update_iommu_notify_flags(iommu_mr);
+ ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
+ if (ret) {
+ QLIST_REMOVE(n, node);
+ }
+ return ret;
}
uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
}
}
-void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
-{
- IOMMUNotifier *notifier;
-
- IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
- memory_region_iommu_replay(iommu_mr, notifier);
- }
-}
-
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
}
QLIST_REMOVE(n, node);
iommu_mr = IOMMU_MEMORY_REGION(mr);
- memory_region_update_iommu_notify_flags(iommu_mr);
+ memory_region_update_iommu_notify_flags(iommu_mr, NULL);
}
void memory_region_notify_one(IOMMUNotifier *notifier,
IOMMUTLBEntry *entry)
{
IOMMUNotifierFlag request_flags;
+ hwaddr entry_end = entry->iova + entry->addr_mask;
/*
* Skip the notification if the notification does not overlap
* with registered range.
*/
- if (notifier->start > entry->iova + entry->addr_mask ||
- notifier->end < entry->iova) {
+ if (notifier->start > entry_end || notifier->end < entry->iova) {
return;
}
+ assert(entry->iova >= notifier->start && entry_end <= notifier->end);
+
if (entry->perm & IOMMU_RW) {
request_flags = IOMMU_NOTIFIER_MAP;
} else {
hwaddr size,
unsigned client)
{
+ DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr);
- return cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
+ snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
+ memory_global_after_dirty_log_sync();
+ return snapshot;
}
bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
{
int fd;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
while (mr->alias) {
mr = mr->alias;
}
fd = mr->ram_block->fd;
- rcu_read_unlock();
return fd;
}
void *ptr;
uint64_t offset = 0;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
while (mr->alias) {
offset += mr->alias_offset;
mr = mr->alias;
}
assert(mr->ram_block);
ptr = qemu_map_ram_ptr(mr->ram_block, offset);
- rcu_read_unlock();
return ptr;
}
qemu_ram_resize(mr->ram_block, newsize, errp);
}
-static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
+/*
+ * Call proper memory listeners about the change on the newly
+ * added/removed CoalescedMemoryRange.
+ */
+static void memory_region_update_coalesced_range(MemoryRegion *mr,
+ CoalescedMemoryRange *cmr,
+ bool add)
{
+ AddressSpace *as;
FlatView *view;
FlatRange *fr;
- view = address_space_get_flatview(as);
- FOR_EACH_FLAT_RANGE(fr, view) {
- if (fr->mr == mr) {
- flat_range_coalesced_io_del(fr, as);
- flat_range_coalesced_io_add(fr, as);
- }
- }
- flatview_unref(view);
-}
-
-static void memory_region_update_coalesced_range(MemoryRegion *mr)
-{
- AddressSpace *as;
-
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- memory_region_update_coalesced_range_as(mr, as);
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ if (fr->mr == mr) {
+ flat_range_coalesced_io_notify(fr, as, cmr, add);
+ }
+ }
+ flatview_unref(view);
}
}
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
- memory_region_update_coalesced_range(mr);
+ memory_region_update_coalesced_range(mr, cmr, true);
memory_region_set_flush_coalesced(mr);
}
void memory_region_clear_coalescing(MemoryRegion *mr)
{
CoalescedMemoryRange *cmr;
- bool updated = false;
+
+ if (QTAILQ_EMPTY(&mr->coalesced)) {
+ return;
+ }
qemu_flush_coalesced_mmio_buffer();
mr->flush_coalesced_mmio = false;
while (!QTAILQ_EMPTY(&mr->coalesced)) {
cmr = QTAILQ_FIRST(&mr->coalesced);
QTAILQ_REMOVE(&mr->coalesced, cmr, link);
+ memory_region_update_coalesced_range(mr, cmr, false);
g_free(cmr);
- updated = true;
- }
-
- if (updated) {
- memory_region_update_coalesced_range(mr);
}
}
}
if (size) {
- adjust_endianness(mr, &mrfd.data, size);
+ adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
unsigned i;
if (size) {
- adjust_endianness(mr, &mrfd.data, size);
+ adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
hwaddr addr, uint64_t size)
{
MemoryRegionSection ret;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
ret = memory_region_find_rcu(mr, addr, size);
if (ret.mr) {
memory_region_ref(ret.mr);
}
- rcu_read_unlock();
return ret;
}
{
MemoryRegion *mr;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
mr = memory_region_find_rcu(container, addr, 1).mr;
- rcu_read_unlock();
return mr && mr != container;
}
memory_region_sync_dirty_bitmap(NULL);
}
+void memory_global_after_dirty_log_sync(void)
+{
+ MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
+}
+
static VMChangeStateEntry *vmstate_change;
void memory_global_dirty_log_start(void)