#include "cpu.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
-#include "exec/ioport.h"
#include "qapi/visitor.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "exec/ram_addr.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
-#include "hw/misc/mmio_interface.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
EventNotifier *e;
};
-static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
- MemoryRegionIoeventfd b)
+static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
+ MemoryRegionIoeventfd *b)
{
- if (int128_lt(a.addr.start, b.addr.start)) {
+ if (int128_lt(a->addr.start, b->addr.start)) {
return true;
- } else if (int128_gt(a.addr.start, b.addr.start)) {
+ } else if (int128_gt(a->addr.start, b->addr.start)) {
return false;
- } else if (int128_lt(a.addr.size, b.addr.size)) {
+ } else if (int128_lt(a->addr.size, b->addr.size)) {
return true;
- } else if (int128_gt(a.addr.size, b.addr.size)) {
+ } else if (int128_gt(a->addr.size, b->addr.size)) {
return false;
- } else if (a.match_data < b.match_data) {
+ } else if (a->match_data < b->match_data) {
return true;
- } else if (a.match_data > b.match_data) {
+ } else if (a->match_data > b->match_data) {
return false;
- } else if (a.match_data) {
- if (a.data < b.data) {
+ } else if (a->match_data) {
+ if (a->data < b->data) {
return true;
- } else if (a.data > b.data) {
+ } else if (a->data > b->data) {
return false;
}
}
- if (a.e < b.e) {
+ if (a->e < b->e) {
return true;
- } else if (a.e > b.e) {
+ } else if (a->e > b->e) {
return false;
}
return false;
}
-static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
- MemoryRegionIoeventfd b)
+static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
+ MemoryRegionIoeventfd *b)
{
return !memory_region_ioeventfd_before(a, b)
&& !memory_region_ioeventfd_before(b, a);
}
-typedef struct FlatRange FlatRange;
-
/* Range of memory in the global map. Addresses are absolute. */
struct FlatRange {
MemoryRegion *mr;
bool readonly;
};
-/* Flattened global view of current active memory hierarchy. Kept in sorted
- * order.
- */
-struct FlatView {
- struct rcu_head rcu;
- unsigned ref;
- FlatRange *ranges;
- unsigned nr;
- unsigned nr_allocated;
- struct AddressSpaceDispatch *dispatch;
- MemoryRegion *root;
-};
-
-typedef struct AddressSpaceOps AddressSpaceOps;
-
#define FOR_EACH_FLAT_RANGE(var, view) \
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
return atomic_fetch_inc_nonzero(&view->ref) > 0;
}
-static void flatview_unref(FlatView *view)
+void flatview_unref(FlatView *view)
{
if (atomic_fetch_dec(&view->ref) == 1) {
trace_flatview_destroy_rcu(view, view->root);
}
}
-FlatView *address_space_to_flatview(AddressSpace *as)
-{
- return atomic_rcu_read(&as->current_map);
-}
-
-AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
-{
- return fv->dispatch;
-}
-
-AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
-{
- return flatview_to_dispatch(address_space_to_flatview(as));
-}
-
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
}
}
+static inline void memory_region_shift_read_access(uint64_t *value,
+ signed shift,
+ uint64_t mask,
+ uint64_t tmp)
+{
+ if (shift >= 0) {
+ *value |= (tmp & mask) << shift;
+ } else {
+ *value |= (tmp & mask) >> -shift;
+ }
+}
+
+static inline uint64_t memory_region_shift_write_access(uint64_t *value,
+ signed shift,
+ uint64_t mask)
+{
+ uint64_t tmp;
+
+ if (shift >= 0) {
+ tmp = (*value >> shift) & mask;
+ } else {
+ tmp = (*value << -shift) & mask;
+ }
+
+ return tmp;
+}
+
static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
{
MemoryRegion *root;
return -1;
}
-static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
- hwaddr addr,
- uint64_t *value,
- unsigned size,
- unsigned shift,
- uint64_t mask,
- MemTxAttrs attrs)
-{
- uint64_t tmp;
-
- tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
- if (mr->subpage) {
- trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
- } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
- hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
- trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
- }
- *value |= (tmp & mask) << shift;
- return MEMTX_OK;
-}
-
static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
}
- *value |= (tmp & mask) << shift;
+ memory_region_shift_read_access(value, shift, mask, tmp);
return MEMTX_OK;
}
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
}
- *value |= (tmp & mask) << shift;
+ memory_region_shift_read_access(value, shift, mask, tmp);
return r;
}
-static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
- hwaddr addr,
- uint64_t *value,
- unsigned size,
- unsigned shift,
- uint64_t mask,
- MemTxAttrs attrs)
-{
- uint64_t tmp;
-
- tmp = (*value >> shift) & mask;
- if (mr->subpage) {
- trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
- } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
- hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
- trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
- }
- mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
- return MEMTX_OK;
-}
-
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
- uint64_t tmp;
+ uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
- tmp = (*value >> shift) & mask;
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
} else if (mr == &io_mem_notdirty) {
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
- uint64_t tmp;
+ uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
- tmp = (*value >> shift) & mask;
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
} else if (mr == &io_mem_notdirty) {
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs),
MemoryRegion *mr,
/* FIXME: support unaligned access? */
access_size = MAX(MIN(size, access_size_max), access_size_min);
- access_mask = -1ULL >> (64 - access_size * 8);
+ access_mask = MAKE_64BIT_MASK(0, access_size * 8);
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
while (iold < fds_old_nb || inew < fds_new_nb) {
if (iold < fds_old_nb
&& (inew == fds_new_nb
- || memory_region_ioeventfd_before(fds_old[iold],
- fds_new[inew]))) {
+ || memory_region_ioeventfd_before(&fds_old[iold],
+ &fds_new[inew]))) {
fd = &fds_old[iold];
section = (MemoryRegionSection) {
.fv = address_space_to_flatview(as),
++iold;
} else if (inew < fds_new_nb
&& (iold == fds_old_nb
- || memory_region_ioeventfd_before(fds_new[inew],
- fds_old[iold]))) {
+ || memory_region_ioeventfd_before(&fds_new[inew],
+ &fds_old[iold]))) {
fd = &fds_new[inew];
section = (MemoryRegionSection) {
.fv = address_space_to_flatview(as),
}
}
-static FlatView *address_space_get_flatview(AddressSpace *as)
+FlatView *address_space_get_flatview(AddressSpace *as)
{
FlatView *view;
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
if (current_cpu != NULL) {
- cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+ bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
+ cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
}
return 0;
}
}
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return false;
}
bool memory_region_access_valid(MemoryRegion *mr,
hwaddr addr,
unsigned size,
- bool is_write)
+ bool is_write,
+ MemTxAttrs attrs)
{
int access_size_min, access_size_max;
int access_size, i;
access_size = MAX(MIN(size, access_size_max), access_size_min);
for (i = 0; i < size; i += access_size) {
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
- is_write)) {
+ is_write, attrs)) {
return false;
}
}
mr->ops->impl.max_access_size,
memory_region_read_accessor,
mr, attrs);
- } else if (mr->ops->read_with_attrs) {
+ } else {
return access_with_adjusted_size(addr, pval, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_read_with_attrs_accessor,
mr, attrs);
- } else {
- return access_with_adjusted_size(addr, pval, size, 1, 4,
- memory_region_oldmmio_read_accessor,
- mr, attrs);
}
}
{
MemTxResult r;
- if (!memory_region_access_valid(mr, addr, size, false)) {
+ if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
*pval = unassigned_mem_read(mr, addr, size);
return MEMTX_DECODE_ERROR;
}
ioeventfd.match_data = mr->ioeventfds[i].match_data;
ioeventfd.e = mr->ioeventfds[i].e;
- if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
event_notifier_set(ioeventfd.e);
return true;
}
unsigned size,
MemTxAttrs attrs)
{
- if (!memory_region_access_valid(mr, addr, size, true)) {
+ if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
unassigned_mem_write(mr, addr, data, size);
return MEMTX_DECODE_ERROR;
}
mr->ops->impl.max_access_size,
memory_region_write_accessor, mr,
attrs);
- } else if (mr->ops->write_with_attrs) {
+ } else {
return
access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_with_attrs_accessor,
mr, attrs);
- } else {
- return access_with_adjusted_size(addr, &data, size, 1, 4,
- memory_region_oldmmio_write_accessor,
- mr, attrs);
}
}
uint64_t size,
Error **errp)
{
+ memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
+}
+
+void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ Error **errp)
+{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_resizeable_ram(MemoryRegion *mr,
void *host),
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
- mr, errp);
+ mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
-#ifdef __linux__
+#ifdef CONFIG_POSIX
void memory_region_init_ram_from_file(MemoryRegion *mr,
struct Object *owner,
const char *name,
uint64_t size,
uint64_t align,
- bool share,
+ uint32_t ram_flags,
const char *path,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->align = align;
- mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
+ mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_ram_from_fd(MemoryRegion *mr,
int fd,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
+ mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
+ share ? RAM_SHARED : 0,
+ fd, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
#endif
uint64_t size,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->readonly = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
uint64_t size,
Error **errp)
{
+ Error *err = NULL;
assert(ops);
memory_region_init(mr, owner, name, size);
mr->ops = ops;
mr->terminates = true;
mr->rom_device = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_iommu(void *_iommu_mr,
iommu_mr = IOMMU_MEMORY_REGION(mr);
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
assert(n->start <= n->end);
+ assert(n->iommu_idx >= 0 &&
+ n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
+
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
memory_region_update_iommu_notify_flags(iommu_mr);
}
granularity = memory_region_iommu_get_min_page_size(iommu_mr);
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
- iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
+ iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
if (iotlb.perm != IOMMU_NONE) {
n->notify(n, &iotlb);
}
}
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
IOMMUTLBEntry entry)
{
IOMMUNotifier *iommu_notifier;
assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
- memory_region_notify_one(iommu_notifier, &entry);
+ if (iommu_notifier->iommu_idx == iommu_idx) {
+ memory_region_notify_one(iommu_notifier, &entry);
+ }
}
}
return imrc->get_attr(iommu_mr, attr, data);
}
+int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+
+ if (!imrc->attrs_to_index) {
+ return 0;
+ }
+
+ return imrc->attrs_to_index(iommu_mr, attrs);
+}
+
+int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+
+ if (!imrc->num_indexes) {
+ return 1;
+ }
+
+ return imrc->num_indexes(iommu_mr);
+}
+
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
uint8_t mask = 1 << client;
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
- if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
break;
}
}
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
- if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
break;
}
}
listener->address_space = NULL;
}
-bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
-{
- void *host;
- unsigned size = 0;
- unsigned offset = 0;
- Object *new_interface;
-
- if (!mr || !mr->ops->request_ptr) {
- return false;
- }
-
- /*
- * Avoid an update if the request_ptr call
- * memory_region_invalidate_mmio_ptr which seems to be likely when we use
- * a cache.
- */
- memory_region_transaction_begin();
-
- host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
-
- if (!host || !size) {
- memory_region_transaction_commit();
- return false;
- }
-
- new_interface = object_new("mmio_interface");
- qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
- qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
- qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
- qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
- qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
- object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
-
- memory_region_transaction_commit();
- return true;
-}
-
-typedef struct MMIOPtrInvalidate {
- MemoryRegion *mr;
- hwaddr offset;
- unsigned size;
- int busy;
- int allocated;
-} MMIOPtrInvalidate;
-
-#define MAX_MMIO_INVALIDATE 10
-static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
-
-static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
- run_on_cpu_data data)
-{
- MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
- MemoryRegion *mr = invalidate_data->mr;
- hwaddr offset = invalidate_data->offset;
- unsigned size = invalidate_data->size;
- MemoryRegionSection section = memory_region_find(mr, offset, size);
-
- qemu_mutex_lock_iothread();
-
- /* Reset dirty so this doesn't happen later. */
- cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
-
- if (section.mr != mr) {
- /* memory_region_find add a ref on section.mr */
- memory_region_unref(section.mr);
- if (MMIO_INTERFACE(section.mr->owner)) {
- /* We found the interface just drop it. */
- object_property_set_bool(section.mr->owner, false, "realized",
- NULL);
- object_unref(section.mr->owner);
- object_unparent(section.mr->owner);
- }
- }
-
- qemu_mutex_unlock_iothread();
-
- if (invalidate_data->allocated) {
- g_free(invalidate_data);
- } else {
- invalidate_data->busy = 0;
- }
-}
-
-void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
- unsigned size)
-{
- size_t i;
- MMIOPtrInvalidate *invalidate_data = NULL;
-
- for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
- if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
- invalidate_data = &mmio_ptr_invalidate_list[i];
- break;
- }
- }
-
- if (!invalidate_data) {
- invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
- invalidate_data->allocated = 1;
- }
-
- invalidate_data->mr = mr;
- invalidate_data->offset = offset;
- invalidate_data->size = size;
-
- async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
- RUN_ON_CPU_HOST_PTR(invalidate_data));
-}
-
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
{
memory_region_ref(root);
int128_sub((size), int128_one())) : 0)
#define MTREE_INDENT " "
+static void mtree_expand_owner(fprintf_function mon_printf, void *f,
+ const char *label, Object *obj)
+{
+ DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
+
+ mon_printf(f, " %s:{%s", label, dev ? "dev" : "obj");
+ if (dev && dev->id) {
+ mon_printf(f, " id=%s", dev->id);
+ } else {
+ gchar *canonical_path = object_get_canonical_path(obj);
+ if (canonical_path) {
+ mon_printf(f, " path=%s", canonical_path);
+ g_free(canonical_path);
+ } else {
+ mon_printf(f, " type=%s", object_get_typename(obj));
+ }
+ }
+ mon_printf(f, "}");
+}
+
+static void mtree_print_mr_owner(fprintf_function mon_printf, void *f,
+ const MemoryRegion *mr)
+{
+ Object *owner = mr->owner;
+ Object *parent = memory_region_owner((MemoryRegion *)mr);
+
+ if (!owner && !parent) {
+ mon_printf(f, " orphan");
+ return;
+ }
+ if (owner) {
+ mtree_expand_owner(mon_printf, f, "owner", owner);
+ }
+ if (parent && parent != owner) {
+ mtree_expand_owner(mon_printf, f, "parent", parent);
+ }
+}
+
static void mtree_print_mr(fprintf_function mon_printf, void *f,
const MemoryRegion *mr, unsigned int level,
hwaddr base,
- MemoryRegionListHead *alias_print_queue)
+ MemoryRegionListHead *alias_print_queue,
+ bool owner)
{
MemoryRegionList *new_ml, *ml, *next_ml;
MemoryRegionListHead submr_print_queue;
}
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %s): alias %s @%s " TARGET_FMT_plx
- "-" TARGET_FMT_plx "%s\n",
+ "-" TARGET_FMT_plx "%s",
cur_start, cur_end,
mr->priority,
memory_region_type((MemoryRegion *)mr),
mr->alias_offset,
mr->alias_offset + MR_SIZE(mr->size),
mr->enabled ? "" : " [disabled]");
+ if (owner) {
+ mtree_print_mr_owner(mon_printf, f, mr);
+ }
} else {
mon_printf(f,
- TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
+ TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s",
cur_start, cur_end,
mr->priority,
memory_region_type((MemoryRegion *)mr),
memory_region_name(mr),
mr->enabled ? "" : " [disabled]");
+ if (owner) {
+ mtree_print_mr_owner(mon_printf, f, mr);
+ }
}
+ mon_printf(f, "\n");
QTAILQ_INIT(&submr_print_queue);
QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
- alias_print_queue);
+ alias_print_queue, owner);
}
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
void *f;
int counter;
bool dispatch_tree;
+ bool owner;
};
static void mtree_print_flatview(gpointer key, gpointer value,
mr = range->mr;
if (range->offset_in_region) {
p(f, MTREE_INDENT TARGET_FMT_plx "-"
- TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
+ TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx,
int128_get64(range->addr.start),
int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
mr->priority,
range->offset_in_region);
} else {
p(f, MTREE_INDENT TARGET_FMT_plx "-"
- TARGET_FMT_plx " (prio %d, %s): %s\n",
+ TARGET_FMT_plx " (prio %d, %s): %s",
int128_get64(range->addr.start),
int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
mr->priority,
range->readonly ? "rom" : memory_region_type(mr),
memory_region_name(mr));
}
+ if (fvi->owner) {
+ mtree_print_mr_owner(p, f, mr);
+ }
+ p(f, "\n");
range++;
}
}
void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
- bool dispatch_tree)
+ bool dispatch_tree, bool owner)
{
MemoryRegionListHead ml_head;
MemoryRegionList *ml, *ml2;
.mon_printf = mon_printf,
.f = f,
.counter = 0,
- .dispatch_tree = dispatch_tree
+ .dispatch_tree = dispatch_tree,
+ .owner = owner,
};
GArray *fv_address_spaces;
GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
mon_printf(f, "address-space: %s\n", as->name);
- mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
+ mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head, owner);
mon_printf(f, "\n");
}
/* print aliased regions */
QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
- mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
+ mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head, owner);
mon_printf(f, "\n");
}