#include "cpu.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
-#include "exec/ioport.h"
#include "qapi/visitor.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
EventNotifier *e;
};
-static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
- MemoryRegionIoeventfd b)
+static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
+ MemoryRegionIoeventfd *b)
{
- if (int128_lt(a.addr.start, b.addr.start)) {
+ if (int128_lt(a->addr.start, b->addr.start)) {
return true;
- } else if (int128_gt(a.addr.start, b.addr.start)) {
+ } else if (int128_gt(a->addr.start, b->addr.start)) {
return false;
- } else if (int128_lt(a.addr.size, b.addr.size)) {
+ } else if (int128_lt(a->addr.size, b->addr.size)) {
return true;
- } else if (int128_gt(a.addr.size, b.addr.size)) {
+ } else if (int128_gt(a->addr.size, b->addr.size)) {
return false;
- } else if (a.match_data < b.match_data) {
+ } else if (a->match_data < b->match_data) {
return true;
- } else if (a.match_data > b.match_data) {
+ } else if (a->match_data > b->match_data) {
return false;
- } else if (a.match_data) {
- if (a.data < b.data) {
+ } else if (a->match_data) {
+ if (a->data < b->data) {
return true;
- } else if (a.data > b.data) {
+ } else if (a->data > b->data) {
return false;
}
}
- if (a.e < b.e) {
+ if (a->e < b->e) {
return true;
- } else if (a.e > b.e) {
+ } else if (a->e > b->e) {
return false;
}
return false;
}
-static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
- MemoryRegionIoeventfd b)
+static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
+ MemoryRegionIoeventfd *b)
{
return !memory_region_ioeventfd_before(a, b)
&& !memory_region_ioeventfd_before(b, a);
}
-typedef struct FlatRange FlatRange;
-
/* Range of memory in the global map. Addresses are absolute. */
struct FlatRange {
MemoryRegion *mr;
bool readonly;
};
-/* Flattened global view of current active memory hierarchy. Kept in sorted
- * order.
- */
-struct FlatView {
- struct rcu_head rcu;
- unsigned ref;
- FlatRange *ranges;
- unsigned nr;
- unsigned nr_allocated;
- struct AddressSpaceDispatch *dispatch;
- MemoryRegion *root;
-};
-
-typedef struct AddressSpaceOps AddressSpaceOps;
-
#define FOR_EACH_FLAT_RANGE(var, view) \
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
return atomic_fetch_inc_nonzero(&view->ref) > 0;
}
-static void flatview_unref(FlatView *view)
+void flatview_unref(FlatView *view)
{
if (atomic_fetch_dec(&view->ref) == 1) {
trace_flatview_destroy_rcu(view, view->root);
}
}
-FlatView *address_space_to_flatview(AddressSpace *as)
-{
- return atomic_rcu_read(&as->current_map);
-}
-
-AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
-{
- return fv->dispatch;
-}
-
-AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
-{
- return flatview_to_dispatch(address_space_to_flatview(as));
-}
-
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
while (iold < fds_old_nb || inew < fds_new_nb) {
if (iold < fds_old_nb
&& (inew == fds_new_nb
- || memory_region_ioeventfd_before(fds_old[iold],
- fds_new[inew]))) {
+ || memory_region_ioeventfd_before(&fds_old[iold],
+ &fds_new[inew]))) {
fd = &fds_old[iold];
section = (MemoryRegionSection) {
.fv = address_space_to_flatview(as),
++iold;
} else if (inew < fds_new_nb
&& (iold == fds_old_nb
- || memory_region_ioeventfd_before(fds_new[inew],
- fds_old[iold]))) {
+ || memory_region_ioeventfd_before(&fds_new[inew],
+ &fds_old[iold]))) {
fd = &fds_new[inew];
section = (MemoryRegionSection) {
.fv = address_space_to_flatview(as),
}
}
-static FlatView *address_space_get_flatview(AddressSpace *as)
+FlatView *address_space_get_flatview(AddressSpace *as)
{
FlatView *view;
}
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return false;
}
bool memory_region_access_valid(MemoryRegion *mr,
hwaddr addr,
unsigned size,
- bool is_write)
+ bool is_write,
+ MemTxAttrs attrs)
{
int access_size_min, access_size_max;
int access_size, i;
access_size = MAX(MIN(size, access_size_max), access_size_min);
for (i = 0; i < size; i += access_size) {
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
- is_write)) {
+ is_write, attrs)) {
return false;
}
}
{
MemTxResult r;
- if (!memory_region_access_valid(mr, addr, size, false)) {
+ if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
*pval = unassigned_mem_read(mr, addr, size);
return MEMTX_DECODE_ERROR;
}
ioeventfd.match_data = mr->ioeventfds[i].match_data;
ioeventfd.e = mr->ioeventfds[i].e;
- if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
event_notifier_set(ioeventfd.e);
return true;
}
unsigned size,
MemTxAttrs attrs)
{
- if (!memory_region_access_valid(mr, addr, size, true)) {
+ if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
unassigned_mem_write(mr, addr, data, size);
return MEMTX_DECODE_ERROR;
}
const char *name,
uint64_t size,
Error **errp)
+{
+ memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
+}
+
+void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ Error **errp)
{
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
mr->readonly = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
mr->terminates = true;
mr->rom_device = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
}
void memory_region_init_iommu(void *_iommu_mr,
as = listener->address_space;
view = address_space_get_flatview(as);
FOR_EACH_FLAT_RANGE(fr, view) {
- if (fr->mr == mr) {
+ if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
MemoryRegionSection mrs = section_from_flat_range(fr, view);
listener->log_sync(listener, &mrs);
}
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
- if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
break;
}
}
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
- if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
+ if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
break;
}
}
void memory_global_dirty_log_sync(void)
{
- MemoryListener *listener;
- AddressSpace *as;
- FlatView *view;
- FlatRange *fr;
-
- QTAILQ_FOREACH(listener, &memory_listeners, link) {
- if (!listener->log_sync) {
- continue;
- }
- as = listener->address_space;
- view = address_space_get_flatview(as);
- FOR_EACH_FLAT_RANGE(fr, view) {
- if (fr->dirty_log_mask) {
- MemoryRegionSection mrs = section_from_flat_range(fr, view);
-
- listener->log_sync(listener, &mrs);
- }
- }
- flatview_unref(view);
- }
+ memory_region_sync_dirty_bitmap(NULL);
}
static VMChangeStateEntry *vmstate_change;