#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qemu-common.h"
#include "cpu.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
-#include "exec/ioport.h"
#include "qapi/visitor.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/qemu-print.h"
#include "qom/object.h"
#include "trace-root.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "sysemu/kvm.h"
-#include "sysemu/sysemu.h"
-#include "hw/misc/mmio_interface.h"
-#include "hw/qdev-properties.h"
+#include "sysemu/runstate.h"
+#include "sysemu/tcg.h"
+#include "sysemu/accel.h"
+#include "hw/boards.h"
#include "migration/vmstate.h"
//#define DEBUG_UNASSIGNED
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
static bool ioeventfd_update_pending;
-static bool global_dirty_log = false;
+bool global_dirty_log;
-static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
+static QTAILQ_HEAD(, MemoryListener) memory_listeners
= QTAILQ_HEAD_INITIALIZER(memory_listeners);
static QTAILQ_HEAD(, AddressSpace) address_spaces
} \
break; \
case Reverse: \
- QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
- memory_listeners, link) { \
+ QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
if (_listener->_callback) { \
_listener->_callback(_listener, ##_args); \
} \
#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
do { \
MemoryListener *_listener; \
- struct memory_listeners_as *list = &(_as)->listeners; \
\
switch (_direction) { \
case Forward: \
- QTAILQ_FOREACH(_listener, list, link_as) { \
+ QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
if (_listener->_callback) { \
_listener->_callback(_listener, _section, ##_args); \
} \
} \
break; \
case Reverse: \
- QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
- link_as) { \
+ QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
if (_listener->_callback) { \
_listener->_callback(_listener, _section, ##_args); \
} \
EventNotifier *e;
};
-static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
- MemoryRegionIoeventfd b)
+static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
+ MemoryRegionIoeventfd *b)
{
- if (int128_lt(a.addr.start, b.addr.start)) {
+ if (int128_lt(a->addr.start, b->addr.start)) {
return true;
- } else if (int128_gt(a.addr.start, b.addr.start)) {
+ } else if (int128_gt(a->addr.start, b->addr.start)) {
return false;
- } else if (int128_lt(a.addr.size, b.addr.size)) {
+ } else if (int128_lt(a->addr.size, b->addr.size)) {
return true;
- } else if (int128_gt(a.addr.size, b.addr.size)) {
+ } else if (int128_gt(a->addr.size, b->addr.size)) {
return false;
- } else if (a.match_data < b.match_data) {
+ } else if (a->match_data < b->match_data) {
return true;
- } else if (a.match_data > b.match_data) {
+ } else if (a->match_data > b->match_data) {
return false;
- } else if (a.match_data) {
- if (a.data < b.data) {
+ } else if (a->match_data) {
+ if (a->data < b->data) {
return true;
- } else if (a.data > b.data) {
+ } else if (a->data > b->data) {
return false;
}
}
- if (a.e < b.e) {
+ if (a->e < b->e) {
return true;
- } else if (a.e > b.e) {
+ } else if (a->e > b->e) {
return false;
}
return false;
}
-static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
- MemoryRegionIoeventfd b)
+static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
+ MemoryRegionIoeventfd *b)
{
return !memory_region_ioeventfd_before(a, b)
&& !memory_region_ioeventfd_before(b, a);
}
-typedef struct FlatRange FlatRange;
-
/* Range of memory in the global map. Addresses are absolute. */
struct FlatRange {
MemoryRegion *mr;
uint8_t dirty_log_mask;
bool romd_mode;
bool readonly;
+ bool nonvolatile;
+ int has_coalesced_range;
};
-/* Flattened global view of current active memory hierarchy. Kept in sorted
- * order.
- */
-struct FlatView {
- struct rcu_head rcu;
- unsigned ref;
- FlatRange *ranges;
- unsigned nr;
- unsigned nr_allocated;
- struct AddressSpaceDispatch *dispatch;
- MemoryRegion *root;
-};
-
-typedef struct AddressSpaceOps AddressSpaceOps;
-
#define FOR_EACH_FLAT_RANGE(var, view) \
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
.size = fr->addr.size,
.offset_within_address_space = int128_get64(fr->addr.start),
.readonly = fr->readonly,
+ .nonvolatile = fr->nonvolatile,
};
}
&& addrrange_equal(a->addr, b->addr)
&& a->offset_in_region == b->offset_in_region
&& a->romd_mode == b->romd_mode
- && a->readonly == b->readonly;
+ && a->readonly == b->readonly
+ && a->nonvolatile == b->nonvolatile;
}
static FlatView *flatview_new(MemoryRegion *mr_root)
return atomic_fetch_inc_nonzero(&view->ref) > 0;
}
-static void flatview_unref(FlatView *view)
+void flatview_unref(FlatView *view)
{
if (atomic_fetch_dec(&view->ref) == 1) {
trace_flatview_destroy_rcu(view, view->root);
}
}
-FlatView *address_space_to_flatview(AddressSpace *as)
-{
- return atomic_rcu_read(&as->current_map);
-}
-
-AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
-{
- return fv->dispatch;
-}
-
-AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
-{
- return flatview_to_dispatch(address_space_to_flatview(as));
-}
-
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
int128_make64(r2->offset_in_region))
&& r1->dirty_log_mask == r2->dirty_log_mask
&& r1->romd_mode == r2->romd_mode
- && r1->readonly == r2->readonly;
+ && r1->readonly == r2->readonly
+ && r1->nonvolatile == r2->nonvolatile;
}
/* Attempt to simplify a view by merging adjacent ranges */
static void flatview_simplify(FlatView *view)
{
- unsigned i, j;
+ unsigned i, j, k;
i = 0;
while (i < view->nr) {
++j;
}
++i;
+ for (k = i; k < j; k++) {
+ memory_region_unref(view->ranges[k].mr);
+ }
memmove(&view->ranges[i], &view->ranges[j],
(view->nr - j) * sizeof(view->ranges[j]));
view->nr -= j - i;
}
}
+static inline void memory_region_shift_read_access(uint64_t *value,
+ signed shift,
+ uint64_t mask,
+ uint64_t tmp)
+{
+ if (shift >= 0) {
+ *value |= (tmp & mask) << shift;
+ } else {
+ *value |= (tmp & mask) >> -shift;
+ }
+}
+
+static inline uint64_t memory_region_shift_write_access(uint64_t *value,
+ signed shift,
+ uint64_t mask)
+{
+ uint64_t tmp;
+
+ if (shift >= 0) {
+ tmp = (*value >> shift) & mask;
+ } else {
+ tmp = (*value << -shift) & mask;
+ }
+
+ return tmp;
+}
+
static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
{
MemoryRegion *root;
return -1;
}
-static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
- hwaddr addr,
- uint64_t *value,
- unsigned size,
- unsigned shift,
- uint64_t mask,
- MemTxAttrs attrs)
-{
- uint64_t tmp;
-
- tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
- if (mr->subpage) {
- trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
- } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
- hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
- trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
- }
- *value |= (tmp & mask) << shift;
- return MEMTX_OK;
-}
-
static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
}
- *value |= (tmp & mask) << shift;
+ memory_region_shift_read_access(value, shift, mask, tmp);
return MEMTX_OK;
}
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
}
- *value |= (tmp & mask) << shift;
+ memory_region_shift_read_access(value, shift, mask, tmp);
return r;
}
-static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
- hwaddr addr,
- uint64_t *value,
- unsigned size,
- unsigned shift,
- uint64_t mask,
- MemTxAttrs attrs)
-{
- uint64_t tmp;
-
- tmp = (*value >> shift) & mask;
- if (mr->subpage) {
- trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
- } else if (mr == &io_mem_notdirty) {
- /* Accesses to code which has previously been translated into a TB show
- * up in the MMIO path, as accesses to the io_mem_notdirty
- * MemoryRegion. */
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
- } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
- hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
- trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
- }
- mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
- return MEMTX_OK;
-}
-
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
- uint64_t tmp;
+ uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
- tmp = (*value >> shift) & mask;
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
} else if (mr == &io_mem_notdirty) {
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs)
{
- uint64_t tmp;
+ uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
- tmp = (*value >> shift) & mask;
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
} else if (mr == &io_mem_notdirty) {
hwaddr addr,
uint64_t *value,
unsigned size,
- unsigned shift,
+ signed shift,
uint64_t mask,
MemTxAttrs attrs),
MemoryRegion *mr,
/* FIXME: support unaligned access? */
access_size = MAX(MIN(size, access_size_max), access_size_min);
- access_mask = -1ULL >> (64 - access_size * 8);
+ access_mask = MAKE_64BIT_MASK(0, access_size * 8);
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
MemoryRegion *mr,
Int128 base,
AddrRange clip,
- bool readonly)
+ bool readonly,
+ bool nonvolatile)
{
MemoryRegion *subregion;
unsigned i;
int128_addto(&base, int128_make64(mr->addr));
readonly |= mr->readonly;
+ nonvolatile |= mr->nonvolatile;
tmp = addrrange_make(base, mr->size);
if (mr->alias) {
int128_subfrom(&base, int128_make64(mr->alias->addr));
int128_subfrom(&base, int128_make64(mr->alias_offset));
- render_memory_region(view, mr->alias, base, clip, readonly);
+ render_memory_region(view, mr->alias, base, clip,
+ readonly, nonvolatile);
return;
}
/* Render subregions in priority order. */
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
- render_memory_region(view, subregion, base, clip, readonly);
+ render_memory_region(view, subregion, base, clip,
+ readonly, nonvolatile);
}
if (!mr->terminates) {
fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
fr.romd_mode = mr->romd_mode;
fr.readonly = readonly;
+ fr.nonvolatile = nonvolatile;
+ fr.has_coalesced_range = 0;
/* Render the region itself into any gaps left by the current view. */
for (i = 0; i < view->nr && int128_nz(remain); ++i) {
if (mr) {
render_memory_region(view, mr, int128_zero(),
- addrrange_make(int128_zero(), int128_2_64()), false);
+ addrrange_make(int128_zero(), int128_2_64()),
+ false, false);
}
flatview_simplify(view);
while (iold < fds_old_nb || inew < fds_new_nb) {
if (iold < fds_old_nb
&& (inew == fds_new_nb
- || memory_region_ioeventfd_before(fds_old[iold],
- fds_new[inew]))) {
+ || memory_region_ioeventfd_before(&fds_old[iold],
+ &fds_new[inew]))) {
fd = &fds_old[iold];
section = (MemoryRegionSection) {
.fv = address_space_to_flatview(as),
++iold;
} else if (inew < fds_new_nb
&& (iold == fds_old_nb
- || memory_region_ioeventfd_before(fds_new[inew],
- fds_old[iold]))) {
+ || memory_region_ioeventfd_before(&fds_new[inew],
+ &fds_old[iold]))) {
fd = &fds_new[inew];
section = (MemoryRegionSection) {
.fv = address_space_to_flatview(as),
}
}
-static FlatView *address_space_get_flatview(AddressSpace *as)
+FlatView *address_space_get_flatview(AddressSpace *as)
{
FlatView *view;
flatview_unref(view);
}
+static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
+{
+ if (!fr->has_coalesced_range) {
+ return;
+ }
+
+ if (--fr->has_coalesced_range > 0) {
+ return;
+ }
+
+ MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
+ int128_get64(fr->addr.start),
+ int128_get64(fr->addr.size));
+}
+
+static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
+{
+ MemoryRegion *mr = fr->mr;
+ CoalescedMemoryRange *cmr;
+ AddrRange tmp;
+
+ if (QTAILQ_EMPTY(&mr->coalesced)) {
+ return;
+ }
+
+ if (fr->has_coalesced_range++) {
+ return;
+ }
+
+ QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
+ tmp = addrrange_shift(cmr->addr,
+ int128_sub(fr->addr.start,
+ int128_make64(fr->offset_in_region)));
+ if (!addrrange_intersects(tmp, fr->addr)) {
+ continue;
+ }
+ tmp = addrrange_intersection(tmp, fr->addr);
+ MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
+ }
+}
+
static void address_space_update_topology_pass(AddressSpace *as,
const FlatView *old_view,
const FlatView *new_view,
/* In old but not in new, or in both but attributes changed. */
if (!adding) {
+ flat_range_coalesced_io_del(frold, as);
MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
}
if (adding) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
+ flat_range_coalesced_io_add(frnew, as);
}
++inew;
address_space_update_ioeventfds(as);
}
memory_region_update_pending = false;
+ ioeventfd_update_pending = false;
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
} else if (ioeventfd_update_pending) {
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
if (current_cpu != NULL) {
- cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+ bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
+ cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
}
return 0;
}
}
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return false;
}
bool memory_region_access_valid(MemoryRegion *mr,
hwaddr addr,
unsigned size,
- bool is_write)
+ bool is_write,
+ MemTxAttrs attrs)
{
int access_size_min, access_size_max;
int access_size, i;
access_size = MAX(MIN(size, access_size_max), access_size_min);
for (i = 0; i < size; i += access_size) {
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
- is_write)) {
+ is_write, attrs)) {
return false;
}
}
mr->ops->impl.max_access_size,
memory_region_read_accessor,
mr, attrs);
- } else if (mr->ops->read_with_attrs) {
+ } else {
return access_with_adjusted_size(addr, pval, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_read_with_attrs_accessor,
mr, attrs);
- } else {
- return access_with_adjusted_size(addr, pval, size, 1, 4,
- memory_region_oldmmio_read_accessor,
- mr, attrs);
}
}
{
MemTxResult r;
- if (!memory_region_access_valid(mr, addr, size, false)) {
+ if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
*pval = unassigned_mem_read(mr, addr, size);
return MEMTX_DECODE_ERROR;
}
ioeventfd.match_data = mr->ioeventfds[i].match_data;
ioeventfd.e = mr->ioeventfds[i].e;
- if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
event_notifier_set(ioeventfd.e);
return true;
}
unsigned size,
MemTxAttrs attrs)
{
- if (!memory_region_access_valid(mr, addr, size, true)) {
+ if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
unassigned_mem_write(mr, addr, data, size);
return MEMTX_DECODE_ERROR;
}
mr->ops->impl.max_access_size,
memory_region_write_accessor, mr,
attrs);
- } else if (mr->ops->write_with_attrs) {
+ } else {
return
access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_with_attrs_accessor,
mr, attrs);
- } else {
- return access_with_adjusted_size(addr, &data, size, 1, 4,
- memory_region_oldmmio_write_accessor,
- mr, attrs);
}
}
uint64_t size,
Error **errp)
{
+ memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
+}
+
+void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ Error **errp)
+{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_resizeable_ram(MemoryRegion *mr,
void *host),
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
- mr, errp);
+ mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
-#ifdef __linux__
+#ifdef CONFIG_POSIX
void memory_region_init_ram_from_file(MemoryRegion *mr,
struct Object *owner,
const char *name,
uint64_t size,
- bool share,
+ uint64_t align,
+ uint32_t ram_flags,
const char *path,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
+ mr->align = align;
+ mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_ram_from_fd(MemoryRegion *mr,
int fd,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
+ mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
+ share ? RAM_SHARED : 0,
+ fd, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
#endif
uint64_t size,
void *ptr)
{
- memory_region_init_ram_ptr(mr, owner, name, size, ptr);
+ memory_region_init(mr, owner, name, size);
+ mr->ram = true;
+ mr->terminates = true;
mr->ram_device = true;
mr->ops = &ram_device_mem_ops;
mr->opaque = mr;
+ mr->destructor = memory_region_destructor_ram;
+ mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
+ assert(ptr != NULL);
+ mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
}
void memory_region_init_alias(MemoryRegion *mr,
uint64_t size,
Error **errp)
{
+ Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->readonly = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
uint64_t size,
Error **errp)
{
+ Error *err = NULL;
assert(ops);
memory_region_init(mr, owner, name, size);
mr->ops = ops;
mr->terminates = true;
mr->rom_device = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
+ if (err) {
+ mr->size = int128_zero();
+ object_unparent(OBJECT(mr));
+ error_propagate(errp, err);
+ }
}
void memory_region_init_iommu(void *_iommu_mr,
iommu_mr = IOMMU_MEMORY_REGION(mr);
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
assert(n->start <= n->end);
+ assert(n->iommu_idx >= 0 &&
+ n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
+
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
memory_region_update_iommu_notify_flags(iommu_mr);
}
granularity = memory_region_iommu_get_min_page_size(iommu_mr);
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
- iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
+ iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
if (iotlb.perm != IOMMU_NONE) {
n->notify(n, &iotlb);
}
* Skip the notification if the notification does not overlap
* with registered range.
*/
- if (notifier->start > entry->iova + entry->addr_mask + 1 ||
+ if (notifier->start > entry->iova + entry->addr_mask ||
notifier->end < entry->iova) {
return;
}
}
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
IOMMUTLBEntry entry)
{
IOMMUNotifier *iommu_notifier;
assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
- memory_region_notify_one(iommu_notifier, &entry);
+ if (iommu_notifier->iommu_idx == iommu_idx) {
+ memory_region_notify_one(iommu_notifier, &entry);
+ }
+ }
+}
+
+int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
+ enum IOMMUMemoryRegionAttr attr,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+
+ if (!imrc->get_attr) {
+ return -EINVAL;
+ }
+
+ return imrc->get_attr(iommu_mr, attr, data);
+}
+
+int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+
+ if (!imrc->attrs_to_index) {
+ return 0;
+ }
+
+ return imrc->attrs_to_index(iommu_mr, attrs);
+}
+
+int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+
+ if (!imrc->num_indexes) {
+ return 1;
}
+
+ return imrc->num_indexes(iommu_mr);
}
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
memory_region_transaction_commit();
}
-bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client)
-{
- assert(mr->ram_block);
- return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
- size, client);
-}
-
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size)
{
memory_region_get_dirty_log_mask(mr));
}
-bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client)
-{
- assert(mr->ram_block);
- return cpu_physical_memory_test_and_clear_dirty(
- memory_region_get_ram_addr(mr) + addr, size, client);
-}
-
-DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
- hwaddr addr,
- hwaddr size,
- unsigned client)
-{
- assert(mr->ram_block);
- return cpu_physical_memory_snapshot_and_clear_dirty(
- memory_region_get_ram_addr(mr) + addr, size, client);
-}
-
-bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
- hwaddr addr, hwaddr size)
-{
- assert(mr->ram_block);
- return cpu_physical_memory_snapshot_get_dirty(snap,
- memory_region_get_ram_addr(mr) + addr, size);
-}
-
-void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
+static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
{
MemoryListener *listener;
AddressSpace *as;
as = listener->address_space;
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
- if (fr->mr == mr) {
+ if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
MemoryRegionSection mrs = section_from_flat_range(fr, view);
listener->log_sync(listener, &mrs);
}
}
}
+void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
+ hwaddr len)
+{
+ MemoryRegionSection mrs;
+ MemoryListener *listener;
+ AddressSpace *as;
+ FlatView *view;
+ FlatRange *fr;
+ hwaddr sec_start, sec_end, sec_size;
+
+ QTAILQ_FOREACH(listener, &memory_listeners, link) {
+ if (!listener->log_clear) {
+ continue;
+ }
+ as = listener->address_space;
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ if (!fr->dirty_log_mask || fr->mr != mr) {
+ /*
+ * Clear dirty bitmap operation only applies to those
+ * regions whose dirty logging is at least enabled
+ */
+ continue;
+ }
+
+ mrs = section_from_flat_range(fr, view);
+
+ sec_start = MAX(mrs.offset_within_region, start);
+ sec_end = mrs.offset_within_region + int128_get64(mrs.size);
+ sec_end = MIN(sec_end, start + len);
+
+ if (sec_start >= sec_end) {
+ /*
+ * If this memory region section has no intersection
+ * with the requested range, skip.
+ */
+ continue;
+ }
+
+ /* Valid case; shrink the section if needed */
+ mrs.offset_within_address_space +=
+ sec_start - mrs.offset_within_region;
+ mrs.offset_within_region = sec_start;
+ sec_size = sec_end - sec_start;
+ mrs.size = int128_make64(sec_size);
+ listener->log_clear(listener, &mrs);
+ }
+ flatview_unref(view);
+ }
+}
+
+DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
+ hwaddr addr,
+ hwaddr size,
+ unsigned client)
+{
+ assert(mr->ram_block);
+ memory_region_sync_dirty_bitmap(mr);
+ return cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
+}
+
+bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
+ hwaddr addr, hwaddr size)
+{
+ assert(mr->ram_block);
+ return cpu_physical_memory_snapshot_get_dirty(snap,
+ memory_region_get_ram_addr(mr) + addr, size);
+}
+
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
if (mr->readonly != readonly) {
}
}
+void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
+{
+ if (mr->nonvolatile != nonvolatile) {
+ memory_region_transaction_begin();
+ mr->nonvolatile = nonvolatile;
+ memory_region_update_pending |= mr->enabled;
+ memory_region_transaction_commit();
+ }
+}
+
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
{
if (mr->romd_mode != romd_mode) {
{
FlatView *view;
FlatRange *fr;
- CoalescedMemoryRange *cmr;
- AddrRange tmp;
- MemoryRegionSection section;
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
if (fr->mr == mr) {
- section = (MemoryRegionSection) {
- .fv = view,
- .offset_within_address_space = int128_get64(fr->addr.start),
- .size = fr->addr.size,
- };
-
- MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, §ion,
- int128_get64(fr->addr.start),
- int128_get64(fr->addr.size));
- QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
- tmp = addrrange_shift(cmr->addr,
- int128_sub(fr->addr.start,
- int128_make64(fr->offset_in_region)));
- if (!addrrange_intersects(tmp, fr->addr)) {
- continue;
- }
- tmp = addrrange_intersection(tmp, fr->addr);
- MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, §ion,
- int128_get64(tmp.start),
- int128_get64(tmp.size));
- }
+ flat_range_coalesced_io_del(fr, as);
+ flat_range_coalesced_io_add(fr, as);
}
}
flatview_unref(view);
}
}
-void memory_region_set_global_locking(MemoryRegion *mr)
-{
- mr->global_locking = true;
-}
-
void memory_region_clear_global_locking(MemoryRegion *mr)
{
mr->global_locking = false;
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
- if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
break;
}
}
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
- if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
break;
}
}
ret.size = range.size;
ret.offset_within_address_space = int128_get64(range.start);
ret.readonly = fr->readonly;
+ ret.nonvolatile = fr->nonvolatile;
return ret;
}
void memory_global_dirty_log_sync(void)
{
- MemoryListener *listener;
- AddressSpace *as;
- FlatView *view;
- FlatRange *fr;
-
- QTAILQ_FOREACH(listener, &memory_listeners, link) {
- if (!listener->log_sync) {
- continue;
- }
- as = listener->address_space;
- view = address_space_get_flatview(as);
- FOR_EACH_FLAT_RANGE(fr, view) {
- if (fr->dirty_log_mask) {
- MemoryRegionSection mrs = section_from_flat_range(fr, view);
-
- listener->log_sync(listener, &mrs);
- }
- }
- flatview_unref(view);
- }
+ memory_region_sync_dirty_bitmap(NULL);
}
static VMChangeStateEntry *vmstate_change;
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
- /* Refresh DIRTY_LOG_MIGRATION bit. */
+ /* Refresh DIRTY_MEMORY_MIGRATION bit. */
memory_region_transaction_begin();
memory_region_update_pending = true;
memory_region_transaction_commit();
{
global_dirty_log = false;
- /* Refresh DIRTY_LOG_MIGRATION bit. */
+ /* Refresh DIRTY_MEMORY_MIGRATION bit. */
memory_region_transaction_begin();
memory_region_update_pending = true;
memory_region_transaction_commit();
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
- MemoryRegionSection section = {
- .mr = fr->mr,
- .fv = view,
- .offset_within_region = fr->offset_in_region,
- .size = fr->addr.size,
- .offset_within_address_space = int128_get64(fr->addr.start),
- .readonly = fr->readonly,
- };
+ MemoryRegionSection section = section_from_flat_range(fr, view);
+
+ if (listener->region_add) {
+ listener->region_add(listener, §ion);
+ }
if (fr->dirty_log_mask && listener->log_start) {
listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
}
- if (listener->region_add) {
- listener->region_add(listener, §ion);
+ }
+ if (listener->commit) {
+ listener->commit(listener);
+ }
+ flatview_unref(view);
+}
+
+static void listener_del_address_space(MemoryListener *listener,
+ AddressSpace *as)
+{
+ FlatView *view;
+ FlatRange *fr;
+
+ if (listener->begin) {
+ listener->begin(listener);
+ }
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ MemoryRegionSection section = section_from_flat_range(fr, view);
+
+ if (fr->dirty_log_mask && listener->log_stop) {
+ listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
+ }
+ if (listener->region_del) {
+ listener->region_del(listener, §ion);
}
}
if (listener->commit) {
listener->address_space = as;
if (QTAILQ_EMPTY(&memory_listeners)
- || listener->priority >= QTAILQ_LAST(&memory_listeners,
- memory_listeners)->priority) {
+ || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
} else {
QTAILQ_FOREACH(other, &memory_listeners, link) {
}
if (QTAILQ_EMPTY(&as->listeners)
- || listener->priority >= QTAILQ_LAST(&as->listeners,
- memory_listeners)->priority) {
+ || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
} else {
QTAILQ_FOREACH(other, &as->listeners, link_as) {
return;
}
+ listener_del_address_space(listener, listener->address_space);
QTAILQ_REMOVE(&memory_listeners, listener, link);
QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
listener->address_space = NULL;
}
-bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
-{
- void *host;
- unsigned size = 0;
- unsigned offset = 0;
- Object *new_interface;
-
- if (!mr || !mr->ops->request_ptr) {
- return false;
- }
-
- /*
- * Avoid an update if the request_ptr call
- * memory_region_invalidate_mmio_ptr which seems to be likely when we use
- * a cache.
- */
- memory_region_transaction_begin();
-
- host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
-
- if (!host || !size) {
- memory_region_transaction_commit();
- return false;
- }
-
- new_interface = object_new("mmio_interface");
- qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
- qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
- qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
- qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
- qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
- object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
-
- memory_region_transaction_commit();
- return true;
-}
-
-typedef struct MMIOPtrInvalidate {
- MemoryRegion *mr;
- hwaddr offset;
- unsigned size;
- int busy;
- int allocated;
-} MMIOPtrInvalidate;
-
-#define MAX_MMIO_INVALIDATE 10
-static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
-
-static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
- run_on_cpu_data data)
-{
- MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
- MemoryRegion *mr = invalidate_data->mr;
- hwaddr offset = invalidate_data->offset;
- unsigned size = invalidate_data->size;
- MemoryRegionSection section = memory_region_find(mr, offset, size);
-
- qemu_mutex_lock_iothread();
-
- /* Reset dirty so this doesn't happen later. */
- cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
-
- if (section.mr != mr) {
- /* memory_region_find add a ref on section.mr */
- memory_region_unref(section.mr);
- if (MMIO_INTERFACE(section.mr->owner)) {
- /* We found the interface just drop it. */
- object_property_set_bool(section.mr->owner, false, "realized",
- NULL);
- object_unref(section.mr->owner);
- object_unparent(section.mr->owner);
- }
- }
-
- qemu_mutex_unlock_iothread();
-
- if (invalidate_data->allocated) {
- g_free(invalidate_data);
- } else {
- invalidate_data->busy = 0;
- }
-}
-
-void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
- unsigned size)
+void address_space_remove_listeners(AddressSpace *as)
{
- size_t i;
- MMIOPtrInvalidate *invalidate_data = NULL;
-
- for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
- if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
- invalidate_data = &mmio_ptr_invalidate_list[i];
- break;
- }
- }
-
- if (!invalidate_data) {
- invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
- invalidate_data->allocated = 1;
+ while (!QTAILQ_EMPTY(&as->listeners)) {
+ memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
}
-
- invalidate_data->mr = mr;
- invalidate_data->offset = offset;
- invalidate_data->size = size;
-
- async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
- RUN_ON_CPU_HOST_PTR(invalidate_data));
}
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
QTAILQ_ENTRY(MemoryRegionList) mrqueue;
};
-typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
+typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
int128_sub((size), int128_one())) : 0)
#define MTREE_INDENT " "
-static void mtree_print_mr(fprintf_function mon_printf, void *f,
- const MemoryRegion *mr, unsigned int level,
+static void mtree_expand_owner(const char *label, Object *obj)
+{
+ DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
+
+ qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
+ if (dev && dev->id) {
+ qemu_printf(" id=%s", dev->id);
+ } else {
+ gchar *canonical_path = object_get_canonical_path(obj);
+ if (canonical_path) {
+ qemu_printf(" path=%s", canonical_path);
+ g_free(canonical_path);
+ } else {
+ qemu_printf(" type=%s", object_get_typename(obj));
+ }
+ }
+ qemu_printf("}");
+}
+
+static void mtree_print_mr_owner(const MemoryRegion *mr)
+{
+ Object *owner = mr->owner;
+ Object *parent = memory_region_owner((MemoryRegion *)mr);
+
+ if (!owner && !parent) {
+ qemu_printf(" orphan");
+ return;
+ }
+ if (owner) {
+ mtree_expand_owner("owner", owner);
+ }
+ if (parent && parent != owner) {
+ mtree_expand_owner("parent", parent);
+ }
+}
+
+static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
hwaddr base,
- MemoryRegionListHead *alias_print_queue)
+ MemoryRegionListHead *alias_print_queue,
+ bool owner)
{
MemoryRegionList *new_ml, *ml, *next_ml;
MemoryRegionListHead submr_print_queue;
}
for (i = 0; i < level; i++) {
- mon_printf(f, MTREE_INDENT);
+ qemu_printf(MTREE_INDENT);
}
cur_start = base + mr->addr;
* user who is observing this.
*/
if (cur_start < base || cur_end < cur_start) {
- mon_printf(f, "[DETECTED OVERFLOW!] ");
+ qemu_printf("[DETECTED OVERFLOW!] ");
}
if (mr->alias) {
ml->mr = mr->alias;
QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
}
- mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
- " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
- "-" TARGET_FMT_plx "%s\n",
- cur_start, cur_end,
- mr->priority,
- memory_region_type((MemoryRegion *)mr),
- memory_region_name(mr),
- memory_region_name(mr->alias),
- mr->alias_offset,
- mr->alias_offset + MR_SIZE(mr->size),
- mr->enabled ? "" : " [disabled]");
+ qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
+ " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
+ "-" TARGET_FMT_plx "%s",
+ cur_start, cur_end,
+ mr->priority,
+ mr->nonvolatile ? "nv-" : "",
+ memory_region_type((MemoryRegion *)mr),
+ memory_region_name(mr),
+ memory_region_name(mr->alias),
+ mr->alias_offset,
+ mr->alias_offset + MR_SIZE(mr->size),
+ mr->enabled ? "" : " [disabled]");
+ if (owner) {
+ mtree_print_mr_owner(mr);
+ }
} else {
- mon_printf(f,
- TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
- cur_start, cur_end,
- mr->priority,
- memory_region_type((MemoryRegion *)mr),
- memory_region_name(mr),
- mr->enabled ? "" : " [disabled]");
+ qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
+ " (prio %d, %s%s): %s%s",
+ cur_start, cur_end,
+ mr->priority,
+ mr->nonvolatile ? "nv-" : "",
+ memory_region_type((MemoryRegion *)mr),
+ memory_region_name(mr),
+ mr->enabled ? "" : " [disabled]");
+ if (owner) {
+ mtree_print_mr_owner(mr);
+ }
}
+ qemu_printf("\n");
QTAILQ_INIT(&submr_print_queue);
}
QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
- mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
- alias_print_queue);
+ mtree_print_mr(ml->mr, level + 1, cur_start,
+ alias_print_queue, owner);
}
QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
}
struct FlatViewInfo {
- fprintf_function mon_printf;
- void *f;
int counter;
bool dispatch_tree;
+ bool owner;
+ AccelClass *ac;
+ const char *ac_name;
};
static void mtree_print_flatview(gpointer key, gpointer value,
FlatView *view = key;
GArray *fv_address_spaces = value;
struct FlatViewInfo *fvi = user_data;
- fprintf_function p = fvi->mon_printf;
- void *f = fvi->f;
FlatRange *range = &view->ranges[0];
MemoryRegion *mr;
int n = view->nr;
int i;
AddressSpace *as;
- p(f, "FlatView #%d\n", fvi->counter);
+ qemu_printf("FlatView #%d\n", fvi->counter);
++fvi->counter;
for (i = 0; i < fv_address_spaces->len; ++i) {
as = g_array_index(fv_address_spaces, AddressSpace*, i);
- p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
+ qemu_printf(" AS \"%s\", root: %s",
+ as->name, memory_region_name(as->root));
if (as->root->alias) {
- p(f, ", alias %s", memory_region_name(as->root->alias));
+ qemu_printf(", alias %s", memory_region_name(as->root->alias));
}
- p(f, "\n");
+ qemu_printf("\n");
}
- p(f, " Root memory region: %s\n",
+ qemu_printf(" Root memory region: %s\n",
view->root ? memory_region_name(view->root) : "(none)");
if (n <= 0) {
- p(f, MTREE_INDENT "No rendered FlatView\n\n");
+ qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
return;
}
while (n--) {
mr = range->mr;
if (range->offset_in_region) {
- p(f, MTREE_INDENT TARGET_FMT_plx "-"
- TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
- int128_get64(range->addr.start),
- int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
- mr->priority,
- range->readonly ? "rom" : memory_region_type(mr),
- memory_region_name(mr),
- range->offset_in_region);
+ qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
+ " (prio %d, %s%s): %s @" TARGET_FMT_plx,
+ int128_get64(range->addr.start),
+ int128_get64(range->addr.start)
+ + MR_SIZE(range->addr.size),
+ mr->priority,
+ range->nonvolatile ? "nv-" : "",
+ range->readonly ? "rom" : memory_region_type(mr),
+ memory_region_name(mr),
+ range->offset_in_region);
} else {
- p(f, MTREE_INDENT TARGET_FMT_plx "-"
- TARGET_FMT_plx " (prio %d, %s): %s\n",
- int128_get64(range->addr.start),
- int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
- mr->priority,
- range->readonly ? "rom" : memory_region_type(mr),
- memory_region_name(mr));
+ qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
+ " (prio %d, %s%s): %s",
+ int128_get64(range->addr.start),
+ int128_get64(range->addr.start)
+ + MR_SIZE(range->addr.size),
+ mr->priority,
+ range->nonvolatile ? "nv-" : "",
+ range->readonly ? "rom" : memory_region_type(mr),
+ memory_region_name(mr));
+ }
+ if (fvi->owner) {
+ mtree_print_mr_owner(mr);
}
+
+ if (fvi->ac) {
+ for (i = 0; i < fv_address_spaces->len; ++i) {
+ as = g_array_index(fv_address_spaces, AddressSpace*, i);
+ if (fvi->ac->has_memory(current_machine, as,
+ int128_get64(range->addr.start),
+ MR_SIZE(range->addr.size) + 1)) {
+ qemu_printf(" %s", fvi->ac_name);
+ }
+ }
+ }
+ qemu_printf("\n");
range++;
}
#if !defined(CONFIG_USER_ONLY)
if (fvi->dispatch_tree && view->root) {
- mtree_print_dispatch(p, f, view->dispatch, view->root);
+ mtree_print_dispatch(view->dispatch, view->root);
}
#endif
- p(f, "\n");
+ qemu_printf("\n");
}
static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
return true;
}
-void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
- bool dispatch_tree)
+void mtree_info(bool flatview, bool dispatch_tree, bool owner)
{
MemoryRegionListHead ml_head;
MemoryRegionList *ml, *ml2;
if (flatview) {
FlatView *view;
struct FlatViewInfo fvi = {
- .mon_printf = mon_printf,
- .f = f,
.counter = 0,
- .dispatch_tree = dispatch_tree
+ .dispatch_tree = dispatch_tree,
+ .owner = owner,
};
GArray *fv_address_spaces;
GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
+ AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
+
+ if (ac->has_memory) {
+ fvi.ac = ac;
+ fvi.ac_name = current_machine->accel ? current_machine->accel :
+ object_class_get_name(OBJECT_CLASS(ac));
+ }
/* Gather all FVs in one table */
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
QTAILQ_INIT(&ml_head);
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- mon_printf(f, "address-space: %s\n", as->name);
- mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
- mon_printf(f, "\n");
+ qemu_printf("address-space: %s\n", as->name);
+ mtree_print_mr(as->root, 1, 0, &ml_head, owner);
+ qemu_printf("\n");
}
/* print aliased regions */
QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
- mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
- mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
- mon_printf(f, "\n");
+ qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
+ mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
+ qemu_printf("\n");
}
QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
static const TypeInfo memory_region_info = {
.parent = TYPE_OBJECT,
.name = TYPE_MEMORY_REGION,
+ .class_size = sizeof(MemoryRegionClass),
.instance_size = sizeof(MemoryRegion),
.instance_init = memory_region_initfn,
.instance_finalize = memory_region_finalize,