#include "exec/ram_addr.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
+#include "hw/misc/mmio_interface.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
//#define DEBUG_UNASSIGNED
static QTAILQ_HEAD(, AddressSpace) address_spaces
= QTAILQ_HEAD_INITIALIZER(address_spaces);
+static GHashTable *flat_views;
+
typedef struct AddrRange AddrRange;
/*
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
do { \
- MemoryRegionSection mrs = section_from_flat_range(fr, as); \
+ MemoryRegionSection mrs = section_from_flat_range(fr, \
+ address_space_to_flatview(as)); \
MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
} while(0)
}
typedef struct FlatRange FlatRange;
-typedef struct FlatView FlatView;
/* Range of memory in the global map. Addresses are absolute. */
struct FlatRange {
FlatRange *ranges;
unsigned nr;
unsigned nr_allocated;
+ struct AddressSpaceDispatch *dispatch;
+ MemoryRegion *root;
};
typedef struct AddressSpaceOps AddressSpaceOps;
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
static inline MemoryRegionSection
-section_from_flat_range(FlatRange *fr, AddressSpace *as)
+section_from_flat_range(FlatRange *fr, FlatView *fv)
{
return (MemoryRegionSection) {
.mr = fr->mr,
- .address_space = as,
+ .fv = fv,
.offset_within_region = fr->offset_in_region,
.size = fr->addr.size,
.offset_within_address_space = int128_get64(fr->addr.start),
&& a->readonly == b->readonly;
}
-static void flatview_init(FlatView *view)
+static FlatView *flatview_new(MemoryRegion *mr_root)
{
+ FlatView *view;
+
+ view = g_new0(FlatView, 1);
view->ref = 1;
- view->ranges = NULL;
- view->nr = 0;
- view->nr_allocated = 0;
+ view->root = mr_root;
+ memory_region_ref(mr_root);
+ trace_flatview_new(view, mr_root);
+
+ return view;
}
/* Insert a range into a given position. Caller is responsible for maintaining
{
int i;
+ trace_flatview_destroy(view, view->root);
+ if (view->dispatch) {
+ address_space_dispatch_free(view->dispatch);
+ }
for (i = 0; i < view->nr; i++) {
memory_region_unref(view->ranges[i].mr);
}
g_free(view->ranges);
+ memory_region_unref(view->root);
g_free(view);
}
-static void flatview_ref(FlatView *view)
+static bool flatview_ref(FlatView *view)
{
- atomic_inc(&view->ref);
+ return atomic_fetch_inc_nonzero(&view->ref) > 0;
}
static void flatview_unref(FlatView *view)
{
if (atomic_fetch_dec(&view->ref) == 1) {
- flatview_destroy(view);
+ trace_flatview_destroy_rcu(view, view->root);
+ assert(view->root);
+ call_rcu(view, flatview_destroy, rcu);
}
}
+FlatView *address_space_to_flatview(AddressSpace *as)
+{
+ return atomic_rcu_read(&as->current_map);
+}
+
+AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
+{
+ return fv->dispatch;
+}
+
+AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
+{
+ return flatview_to_dispatch(address_space_to_flatview(as));
+}
+
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
unsigned size,
unsigned access_size_min,
unsigned access_size_max,
- MemTxResult (*access)(MemoryRegion *mr,
- hwaddr addr,
- uint64_t *value,
- unsigned size,
- unsigned shift,
- uint64_t mask,
- MemTxAttrs attrs),
+ MemTxResult (*access_fn)
+ (MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *value,
+ unsigned size,
+ unsigned shift,
+ uint64_t mask,
+ MemTxAttrs attrs),
MemoryRegion *mr,
MemTxAttrs attrs)
{
access_mask = -1ULL >> (64 - access_size * 8);
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
- r |= access(mr, addr + i, value, access_size,
+ r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
} else {
for (i = 0; i < size; i += access_size) {
- r |= access(mr, addr + i, value, access_size, i * 8,
+ r |= access_fn(mr, addr + i, value, access_size, i * 8,
access_mask, attrs);
}
}
}
}
+static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
+{
+ while (mr->enabled) {
+ if (mr->alias) {
+ if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
+ /* The alias is included in its entirety. Use it as
+ * the "real" root, so that we can share more FlatViews.
+ */
+ mr = mr->alias;
+ continue;
+ }
+ } else if (!mr->terminates) {
+ unsigned int found = 0;
+ MemoryRegion *child, *next = NULL;
+ QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
+ if (child->enabled) {
+ if (++found > 1) {
+ next = NULL;
+ break;
+ }
+ if (!child->addr && int128_ge(mr->size, child->size)) {
+ /* A child is included in its entirety. If it's the only
+ * enabled one, use it in the hope of finding an alias down the
+ * way. This will also let us share FlatViews.
+ */
+ next = child;
+ }
+ }
+ }
+ if (found == 0) {
+ return NULL;
+ }
+ if (next) {
+ mr = next;
+ continue;
+ }
+ }
+
+ return mr;
+ }
+
+ return NULL;
+}
+
/* Render a memory topology into a list of disjoint absolute ranges. */
static FlatView *generate_memory_topology(MemoryRegion *mr)
{
+ int i;
FlatView *view;
- view = g_new(FlatView, 1);
- flatview_init(view);
+ view = flatview_new(mr);
if (mr) {
render_memory_region(view, mr, int128_zero(),
}
flatview_simplify(view);
+ view->dispatch = address_space_dispatch_new(view);
+ for (i = 0; i < view->nr; i++) {
+ MemoryRegionSection mrs =
+ section_from_flat_range(&view->ranges[i], view);
+ flatview_add_to_dispatch(view, &mrs);
+ }
+ address_space_dispatch_compact(view->dispatch);
+ g_hash_table_replace(flat_views, mr, view);
+
return view;
}
fds_new[inew]))) {
fd = &fds_old[iold];
section = (MemoryRegionSection) {
- .address_space = as,
+ .fv = address_space_to_flatview(as),
.offset_within_address_space = int128_get64(fd->addr.start),
.size = fd->addr.size,
};
fds_old[iold]))) {
fd = &fds_new[inew];
section = (MemoryRegionSection) {
- .address_space = as,
+ .fv = address_space_to_flatview(as),
.offset_within_address_space = int128_get64(fd->addr.start),
.size = fd->addr.size,
};
FlatView *view;
rcu_read_lock();
- view = atomic_rcu_read(&as->current_map);
- flatview_ref(view);
+ do {
+ view = address_space_to_flatview(as);
+ /* If somebody has replaced as->current_map concurrently,
+ * flatview_ref returns false.
+ */
+ } while (!flatview_ref(view));
rcu_read_unlock();
return view;
}
}
}
+static void flatviews_init(void)
+{
+ static FlatView *empty_view;
-static void address_space_update_topology(AddressSpace *as)
+ if (flat_views) {
+ return;
+ }
+
+ flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
+ (GDestroyNotify) flatview_unref);
+ if (!empty_view) {
+ empty_view = generate_memory_topology(NULL);
+ /* We keep it alive forever in the global variable. */
+ flatview_ref(empty_view);
+ } else {
+ g_hash_table_replace(flat_views, NULL, empty_view);
+ flatview_ref(empty_view);
+ }
+}
+
+static void flatviews_reset(void)
{
- FlatView *old_view = address_space_get_flatview(as);
- FlatView *new_view = generate_memory_topology(as->root);
+ AddressSpace *as;
+
+ if (flat_views) {
+ g_hash_table_unref(flat_views);
+ flat_views = NULL;
+ }
+ flatviews_init();
+
+ /* Render unique FVs */
+ QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+ MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
+
+ if (g_hash_table_lookup(flat_views, physmr)) {
+ continue;
+ }
+
+ generate_memory_topology(physmr);
+ }
+}
+
+static void address_space_set_flatview(AddressSpace *as)
+{
+ FlatView *old_view = address_space_to_flatview(as);
+ MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
+ FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
+
+ assert(new_view);
- address_space_update_topology_pass(as, old_view, new_view, false);
- address_space_update_topology_pass(as, old_view, new_view, true);
+ if (old_view == new_view) {
+ return;
+ }
+
+ if (old_view) {
+ flatview_ref(old_view);
+ }
+
+ flatview_ref(new_view);
+
+ if (!QTAILQ_EMPTY(&as->listeners)) {
+ FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
+
+ if (!old_view2) {
+ old_view2 = &tmpview;
+ }
+ address_space_update_topology_pass(as, old_view2, new_view, false);
+ address_space_update_topology_pass(as, old_view2, new_view, true);
+ }
/* Writes are protected by the BQL. */
atomic_rcu_set(&as->current_map, new_view);
- call_rcu(old_view, flatview_unref, rcu);
+ if (old_view) {
+ flatview_unref(old_view);
+ }
/* Note that all the old MemoryRegions are still alive up to this
* point. This relieves most MemoryListeners from the need to
* outside the iothread mutex, in which case precise reference
* counting is necessary.
*/
- flatview_unref(old_view);
+ if (old_view) {
+ flatview_unref(old_view);
+ }
+}
- address_space_update_ioeventfds(as);
+static void address_space_update_topology(AddressSpace *as)
+{
+ MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
+
+ flatviews_init();
+ if (!g_hash_table_lookup(flat_views, physmr)) {
+ generate_memory_topology(physmr);
+ }
+ address_space_set_flatview(as);
}
void memory_region_transaction_begin(void)
--memory_region_transaction_depth;
if (!memory_region_transaction_depth) {
if (memory_region_update_pending) {
+ flatviews_reset();
+
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- address_space_update_topology(as);
+ address_space_set_flatview(as);
+ address_space_update_ioeventfds(as);
}
memory_region_update_pending = false;
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
return escaped;
}
-void memory_region_init(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size)
+static void memory_region_do_init(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size)
{
- object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
mr->size = int128_make64(size);
if (size == UINT64_MAX) {
mr->size = int128_2_64();
}
}
+void memory_region_init(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size)
+{
+ object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
+ memory_region_do_init(mr, owner, name, size);
+}
+
static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
NULL, NULL, &error_abort);
}
+static void iommu_memory_region_initfn(Object *obj)
+{
+ MemoryRegion *mr = MEMORY_REGION(obj);
+
+ mr->is_iommu = true;
+}
+
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
unsigned size)
{
mr->terminates = true;
}
-void memory_region_init_ram(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size,
- Error **errp)
+void memory_region_init_ram_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp)
{
memory_region_init(mr, owner, name, size);
mr->ram = true;
struct Object *owner,
const char *name,
uint64_t size,
+ uint64_t align,
bool share,
const char *path,
Error **errp)
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
+ mr->align = align;
mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
+
+void memory_region_init_ram_from_fd(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ int fd,
+ Error **errp)
+{
+ memory_region_init(mr, owner, name, size);
+ mr->ram = true;
+ mr->terminates = true;
+ mr->destructor = memory_region_destructor_ram;
+ mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
+ mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+}
#endif
void memory_region_init_ram_ptr(MemoryRegion *mr,
mr->alias_offset = offset;
}
-void memory_region_init_rom(MemoryRegion *mr,
- struct Object *owner,
- const char *name,
- uint64_t size,
- Error **errp)
+void memory_region_init_rom_nomigrate(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp)
{
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
-void memory_region_init_rom_device(MemoryRegion *mr,
- Object *owner,
- const MemoryRegionOps *ops,
- void *opaque,
- const char *name,
- uint64_t size,
- Error **errp)
+void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp)
{
assert(ops);
memory_region_init(mr, owner, name, size);
mr->ram_block = qemu_ram_alloc(size, mr, errp);
}
-void memory_region_init_iommu(MemoryRegion *mr,
+void memory_region_init_iommu(void *_iommu_mr,
+ size_t instance_size,
+ const char *mrtypename,
Object *owner,
- const MemoryRegionIOMMUOps *ops,
const char *name,
uint64_t size)
{
- memory_region_init(mr, owner, name, size);
- mr->iommu_ops = ops,
+ struct IOMMUMemoryRegion *iommu_mr;
+ struct MemoryRegion *mr;
+
+ object_initialize(_iommu_mr, instance_size, mrtypename);
+ mr = MEMORY_REGION(_iommu_mr);
+ memory_region_do_init(mr, owner, name, size);
+ iommu_mr = IOMMU_MEMORY_REGION(mr);
mr->terminates = true; /* then re-forwards */
- QLIST_INIT(&mr->iommu_notify);
- mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
+ QLIST_INIT(&iommu_mr->iommu_notify);
+ iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
}
static void memory_region_finalize(Object *obj)
return memory_region_get_dirty_log_mask(mr) & (1 << client);
}
-static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
+static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
{
IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
IOMMUNotifier *iommu_notifier;
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
- IOMMU_NOTIFIER_FOREACH(iommu_notifier, mr) {
+ IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
flags |= iommu_notifier->notifier_flags;
}
- if (flags != mr->iommu_notify_flags &&
- mr->iommu_ops->notify_flag_changed) {
- mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags,
- flags);
+ if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
+ imrc->notify_flag_changed(iommu_mr,
+ iommu_mr->iommu_notify_flags,
+ flags);
}
- mr->iommu_notify_flags = flags;
+ iommu_mr->iommu_notify_flags = flags;
}
void memory_region_register_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
+ IOMMUMemoryRegion *iommu_mr;
+
if (mr->alias) {
memory_region_register_iommu_notifier(mr->alias, n);
return;
}
/* We need to register for at least one bitfield */
+ iommu_mr = IOMMU_MEMORY_REGION(mr);
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
assert(n->start <= n->end);
- QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
- memory_region_update_iommu_notify_flags(mr);
+ QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
+ memory_region_update_iommu_notify_flags(iommu_mr);
}
-uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
+uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
{
- assert(memory_region_is_iommu(mr));
- if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
- return mr->iommu_ops->get_min_page_size(mr);
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+
+ if (imrc->get_min_page_size) {
+ return imrc->get_min_page_size(iommu_mr);
}
return TARGET_PAGE_SIZE;
}
-void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
- bool is_write)
+void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
{
+ MemoryRegion *mr = MEMORY_REGION(iommu_mr);
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
hwaddr addr, granularity;
IOMMUTLBEntry iotlb;
/* If the IOMMU has its own replay callback, override */
- if (mr->iommu_ops->replay) {
- mr->iommu_ops->replay(mr, n);
+ if (imrc->replay) {
+ imrc->replay(iommu_mr, n);
return;
}
- granularity = memory_region_iommu_get_min_page_size(mr);
+ granularity = memory_region_iommu_get_min_page_size(iommu_mr);
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
- iotlb = mr->iommu_ops->translate(mr, addr, is_write);
+ iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
if (iotlb.perm != IOMMU_NONE) {
n->notify(n, &iotlb);
}
}
}
-void memory_region_iommu_replay_all(MemoryRegion *mr)
+void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
{
IOMMUNotifier *notifier;
- IOMMU_NOTIFIER_FOREACH(notifier, mr) {
- memory_region_iommu_replay(mr, notifier, false);
+ IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
+ memory_region_iommu_replay(iommu_mr, notifier);
}
}
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
+ IOMMUMemoryRegion *iommu_mr;
+
if (mr->alias) {
memory_region_unregister_iommu_notifier(mr->alias, n);
return;
}
QLIST_REMOVE(n, node);
- memory_region_update_iommu_notify_flags(mr);
+ iommu_mr = IOMMU_MEMORY_REGION(mr);
+ memory_region_update_iommu_notify_flags(iommu_mr);
}
void memory_region_notify_one(IOMMUNotifier *notifier,
* Skip the notification if the notification does not overlap
* with registered range.
*/
- if (notifier->start > entry->iova + entry->addr_mask + 1 ||
+ if (notifier->start > entry->iova + entry->addr_mask ||
notifier->end < entry->iova) {
return;
}
}
}
-void memory_region_notify_iommu(MemoryRegion *mr,
+void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
IOMMUTLBEntry entry)
{
IOMMUNotifier *iommu_notifier;
- assert(memory_region_is_iommu(mr));
+ assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
- IOMMU_NOTIFIER_FOREACH(iommu_notifier, mr) {
+ IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
memory_region_notify_one(iommu_notifier, &entry);
}
}
memory_region_get_ram_addr(mr) + addr, size, client);
}
+DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
+ hwaddr addr,
+ hwaddr size,
+ unsigned client)
+{
+ assert(mr->ram_block);
+ return cpu_physical_memory_snapshot_and_clear_dirty(
+ memory_region_get_ram_addr(mr) + addr, size, client);
+}
+
+bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
+ hwaddr addr, hwaddr size)
+{
+ assert(mr->ram_block);
+ return cpu_physical_memory_snapshot_get_dirty(snap,
+ memory_region_get_ram_addr(mr) + addr, size);
+}
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
{
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
if (fr->mr == mr) {
- MemoryRegionSection mrs = section_from_flat_range(fr, as);
+ MemoryRegionSection mrs = section_from_flat_range(fr, view);
listener->log_sync(listener, &mrs);
}
}
return fd;
}
-void memory_region_set_fd(MemoryRegion *mr, int fd)
-{
- rcu_read_lock();
- while (mr->alias) {
- mr = mr->alias;
- }
- mr->ram_block->fd = fd;
- rcu_read_unlock();
-}
-
void *memory_region_get_ram_ptr(MemoryRegion *mr)
{
void *ptr;
FOR_EACH_FLAT_RANGE(fr, view) {
if (fr->mr == mr) {
section = (MemoryRegionSection) {
- .address_space = as,
+ .fv = view,
.offset_within_address_space = int128_get64(fr->addr.start),
.size = fr->addr.size,
};
}
}
-void memory_region_set_global_locking(MemoryRegion *mr)
-{
- mr->global_locking = true;
-}
-
void memory_region_clear_global_locking(MemoryRegion *mr)
{
mr->global_locking = false;
}
range = addrrange_make(int128_make64(addr), int128_make64(size));
- view = atomic_rcu_read(&as->current_map);
+ view = address_space_to_flatview(as);
fr = flatview_lookup(view, range);
if (!fr) {
return ret;
}
ret.mr = fr->mr;
- ret.address_space = as;
+ ret.fv = view;
range = addrrange_intersection(range, fr->addr);
ret.offset_within_region = fr->offset_in_region;
ret.offset_within_region += int128_get64(int128_sub(range.start,
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
if (fr->dirty_log_mask) {
- MemoryRegionSection mrs = section_from_flat_range(fr, as);
+ MemoryRegionSection mrs = section_from_flat_range(fr, view);
+
listener->log_sync(listener, &mrs);
}
}
}
}
+static VMChangeStateEntry *vmstate_change;
+
void memory_global_dirty_log_start(void)
{
+ if (vmstate_change) {
+ qemu_del_vm_change_state_handler(vmstate_change);
+ vmstate_change = NULL;
+ }
+
global_dirty_log = true;
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
memory_region_transaction_commit();
}
-void memory_global_dirty_log_stop(void)
+static void memory_global_dirty_log_do_stop(void)
{
global_dirty_log = false;
MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
}
+static void memory_vm_change_state_handler(void *opaque, int running,
+ RunState state)
+{
+ if (running) {
+ memory_global_dirty_log_do_stop();
+
+ if (vmstate_change) {
+ qemu_del_vm_change_state_handler(vmstate_change);
+ vmstate_change = NULL;
+ }
+ }
+}
+
+void memory_global_dirty_log_stop(void)
+{
+ if (!runstate_is_running()) {
+ if (vmstate_change) {
+ return;
+ }
+ vmstate_change = qemu_add_vm_change_state_handler(
+ memory_vm_change_state_handler, NULL);
+ return;
+ }
+
+ memory_global_dirty_log_do_stop();
+}
+
static void listener_add_address_space(MemoryListener *listener,
AddressSpace *as)
{
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
- MemoryRegionSection section = {
- .mr = fr->mr,
- .address_space = as,
- .offset_within_region = fr->offset_in_region,
- .size = fr->addr.size,
- .offset_within_address_space = int128_get64(fr->addr.start),
- .readonly = fr->readonly,
- };
- if (fr->dirty_log_mask && listener->log_start) {
- listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
- }
+ MemoryRegionSection section = section_from_flat_range(fr, view);
+
if (listener->region_add) {
listener->region_add(listener, §ion);
}
+ if (fr->dirty_log_mask && listener->log_start) {
+ listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
+ }
}
if (listener->commit) {
listener->commit(listener);
listener->address_space = NULL;
}
+bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
+{
+ void *host;
+ unsigned size = 0;
+ unsigned offset = 0;
+ Object *new_interface;
+
+ if (!mr || !mr->ops->request_ptr) {
+ return false;
+ }
+
+ /*
+ * Avoid an update if the request_ptr call
+ * memory_region_invalidate_mmio_ptr which seems to be likely when we use
+ * a cache.
+ */
+ memory_region_transaction_begin();
+
+ host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
+
+ if (!host || !size) {
+ memory_region_transaction_commit();
+ return false;
+ }
+
+ new_interface = object_new("mmio_interface");
+ qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
+ qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
+ qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
+ qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
+ qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
+ object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
+
+ memory_region_transaction_commit();
+ return true;
+}
+
+typedef struct MMIOPtrInvalidate {
+ MemoryRegion *mr;
+ hwaddr offset;
+ unsigned size;
+ int busy;
+ int allocated;
+} MMIOPtrInvalidate;
+
+#define MAX_MMIO_INVALIDATE 10
+static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
+
+static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
+ run_on_cpu_data data)
+{
+ MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
+ MemoryRegion *mr = invalidate_data->mr;
+ hwaddr offset = invalidate_data->offset;
+ unsigned size = invalidate_data->size;
+ MemoryRegionSection section = memory_region_find(mr, offset, size);
+
+ qemu_mutex_lock_iothread();
+
+ /* Reset dirty so this doesn't happen later. */
+ cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
+
+ if (section.mr != mr) {
+ /* memory_region_find add a ref on section.mr */
+ memory_region_unref(section.mr);
+ if (MMIO_INTERFACE(section.mr->owner)) {
+ /* We found the interface just drop it. */
+ object_property_set_bool(section.mr->owner, false, "realized",
+ NULL);
+ object_unref(section.mr->owner);
+ object_unparent(section.mr->owner);
+ }
+ }
+
+ qemu_mutex_unlock_iothread();
+
+ if (invalidate_data->allocated) {
+ g_free(invalidate_data);
+ } else {
+ invalidate_data->busy = 0;
+ }
+}
+
+void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
+ unsigned size)
+{
+ size_t i;
+ MMIOPtrInvalidate *invalidate_data = NULL;
+
+ for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
+ if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
+ invalidate_data = &mmio_ptr_invalidate_list[i];
+ break;
+ }
+ }
+
+ if (!invalidate_data) {
+ invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
+ invalidate_data->allocated = 1;
+ }
+
+ invalidate_data->mr = mr;
+ invalidate_data->offset = offset;
+ invalidate_data->size = size;
+
+ async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
+ RUN_ON_CPU_HOST_PTR(invalidate_data));
+}
+
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
{
memory_region_ref(root);
- memory_region_transaction_begin();
- as->ref_count = 1;
as->root = root;
- as->malloced = false;
- as->current_map = g_new(FlatView, 1);
- flatview_init(as->current_map);
+ as->current_map = NULL;
as->ioeventfd_nb = 0;
as->ioeventfds = NULL;
QTAILQ_INIT(&as->listeners);
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
as->name = g_strdup(name ? name : "anonymous");
- address_space_init_dispatch(as);
- memory_region_update_pending |= root->enabled;
- memory_region_transaction_commit();
+ address_space_update_topology(as);
+ address_space_update_ioeventfds(as);
}
static void do_address_space_destroy(AddressSpace *as)
{
- bool do_free = as->malloced;
-
- address_space_destroy_dispatch(as);
assert(QTAILQ_EMPTY(&as->listeners));
flatview_unref(as->current_map);
g_free(as->name);
g_free(as->ioeventfds);
memory_region_unref(as->root);
- if (do_free) {
- g_free(as);
- }
-}
-
-AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
-{
- AddressSpace *as;
-
- QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- if (root == as->root && as->malloced) {
- as->ref_count++;
- return as;
- }
- }
-
- as = g_malloc0(sizeof *as);
- address_space_init(as, root, name);
- as->malloced = true;
- return as;
}
void address_space_destroy(AddressSpace *as)
{
MemoryRegion *root = as->root;
- as->ref_count--;
- if (as->ref_count) {
- return;
- }
/* Flush out anything from MemoryListeners listening in on this */
memory_region_transaction_begin();
as->root = NULL;
memory_region_transaction_commit();
QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
- address_space_unregister(as);
/* At this point, as->dispatch and as->current_map are dummy
* entries that the guest should never use. Wait for the old
struct MemoryRegionList {
const MemoryRegion *mr;
- QTAILQ_ENTRY(MemoryRegionList) queue;
+ QTAILQ_ENTRY(MemoryRegionList) mrqueue;
};
-typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
+typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
int128_sub((size), int128_one())) : 0)
bool found = false;
/* check if the alias is already in the queue */
- QTAILQ_FOREACH(ml, alias_print_queue, queue) {
+ QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
if (ml->mr == mr->alias) {
found = true;
}
if (!found) {
ml = g_new(MemoryRegionList, 1);
ml->mr = mr->alias;
- QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
+ QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
}
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %s): alias %s @%s " TARGET_FMT_plx
QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
new_ml = g_new(MemoryRegionList, 1);
new_ml->mr = submr;
- QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
+ QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
if (new_ml->mr->addr < ml->mr->addr ||
(new_ml->mr->addr == ml->mr->addr &&
new_ml->mr->priority > ml->mr->priority)) {
- QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
+ QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
new_ml = NULL;
break;
}
}
if (new_ml) {
- QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
+ QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
}
}
- QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
+ QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
alias_print_queue);
}
- QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
+ QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
g_free(ml);
}
}
-static void mtree_print_flatview(fprintf_function p, void *f,
- AddressSpace *as)
+struct FlatViewInfo {
+ fprintf_function mon_printf;
+ void *f;
+ int counter;
+ bool dispatch_tree;
+};
+
+static void mtree_print_flatview(gpointer key, gpointer value,
+ gpointer user_data)
{
- FlatView *view = address_space_get_flatview(as);
+ FlatView *view = key;
+ GArray *fv_address_spaces = value;
+ struct FlatViewInfo *fvi = user_data;
+ fprintf_function p = fvi->mon_printf;
+ void *f = fvi->f;
FlatRange *range = &view->ranges[0];
MemoryRegion *mr;
int n = view->nr;
+ int i;
+ AddressSpace *as;
+
+ p(f, "FlatView #%d\n", fvi->counter);
+ ++fvi->counter;
+
+ for (i = 0; i < fv_address_spaces->len; ++i) {
+ as = g_array_index(fv_address_spaces, AddressSpace*, i);
+ p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
+ if (as->root->alias) {
+ p(f, ", alias %s", memory_region_name(as->root->alias));
+ }
+ p(f, "\n");
+ }
+
+ p(f, " Root memory region: %s\n",
+ view->root ? memory_region_name(view->root) : "(none)");
if (n <= 0) {
- p(f, MTREE_INDENT "No rendered FlatView for "
- "address space '%s'\n", as->name);
- flatview_unref(view);
+ p(f, MTREE_INDENT "No rendered FlatView\n\n");
return;
}
range++;
}
+#if !defined(CONFIG_USER_ONLY)
+ if (fvi->dispatch_tree && view->root) {
+ mtree_print_dispatch(p, f, view->dispatch, view->root);
+ }
+#endif
+
+ p(f, "\n");
+}
+
+static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ FlatView *view = key;
+ GArray *fv_address_spaces = value;
+
+ g_array_unref(fv_address_spaces);
flatview_unref(view);
+
+ return true;
}
-void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
+void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
+ bool dispatch_tree)
{
MemoryRegionListHead ml_head;
MemoryRegionList *ml, *ml2;
AddressSpace *as;
if (flatview) {
+ FlatView *view;
+ struct FlatViewInfo fvi = {
+ .mon_printf = mon_printf,
+ .f = f,
+ .counter = 0,
+ .dispatch_tree = dispatch_tree
+ };
+ GArray *fv_address_spaces;
+ GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
+
+ /* Gather all FVs in one table */
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- mon_printf(f, "address-space (flat view): %s\n", as->name);
- mtree_print_flatview(mon_printf, f, as);
- mon_printf(f, "\n");
+ view = address_space_get_flatview(as);
+
+ fv_address_spaces = g_hash_table_lookup(views, view);
+ if (!fv_address_spaces) {
+ fv_address_spaces = g_array_new(false, false, sizeof(as));
+ g_hash_table_insert(views, view, fv_address_spaces);
+ }
+
+ g_array_append_val(fv_address_spaces, as);
}
+
+ /* Print */
+ g_hash_table_foreach(views, mtree_print_flatview, &fvi);
+
+ /* Free */
+ g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
+ g_hash_table_unref(views);
+
return;
}
}
/* print aliased regions */
- QTAILQ_FOREACH(ml, &ml_head, queue) {
+ QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
mon_printf(f, "\n");
}
- QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
+ QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
g_free(ml);
}
}
+void memory_region_init_ram(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp)
+{
+ DeviceState *owner_dev;
+ Error *err = NULL;
+
+ memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ /* This will assert if owner is neither NULL nor a DeviceState.
+ * We only want the owner here for the purposes of defining a
+ * unique name for migration. TODO: Ideally we should implement
+ * a naming scheme for Objects which are not DeviceStates, in
+ * which case we can relax this restriction.
+ */
+ owner_dev = DEVICE(owner);
+ vmstate_register_ram(mr, owner_dev);
+}
+
+void memory_region_init_rom(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp)
+{
+ DeviceState *owner_dev;
+ Error *err = NULL;
+
+ memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ /* This will assert if owner is neither NULL nor a DeviceState.
+ * We only want the owner here for the purposes of defining a
+ * unique name for migration. TODO: Ideally we should implement
+ * a naming scheme for Objects which are not DeviceStates, in
+ * which case we can relax this restriction.
+ */
+ owner_dev = DEVICE(owner);
+ vmstate_register_ram(mr, owner_dev);
+}
+
+void memory_region_init_rom_device(MemoryRegion *mr,
+ struct Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp)
+{
+ DeviceState *owner_dev;
+ Error *err = NULL;
+
+ memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
+ name, size, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ /* This will assert if owner is neither NULL nor a DeviceState.
+ * We only want the owner here for the purposes of defining a
+ * unique name for migration. TODO: Ideally we should implement
+ * a naming scheme for Objects which are not DeviceStates, in
+ * which case we can relax this restriction.
+ */
+ owner_dev = DEVICE(owner);
+ vmstate_register_ram(mr, owner_dev);
+}
+
static const TypeInfo memory_region_info = {
.parent = TYPE_OBJECT,
.name = TYPE_MEMORY_REGION,
.instance_finalize = memory_region_finalize,
};
+static const TypeInfo iommu_memory_region_info = {
+ .parent = TYPE_MEMORY_REGION,
+ .name = TYPE_IOMMU_MEMORY_REGION,
+ .class_size = sizeof(IOMMUMemoryRegionClass),
+ .instance_size = sizeof(IOMMUMemoryRegion),
+ .instance_init = iommu_memory_region_initfn,
+ .abstract = true,
+};
+
static void memory_register_types(void)
{
type_register_static(&memory_region_info);
+ type_register_static(&iommu_memory_region_info);
}
type_init(memory_register_types)