X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/4ba364b47275fe428723442987b57b260b215dba..31c17aa5c38d8aa177730b0c4429f8023f8ad79b:/memory.c diff --git a/memory.c b/memory.c index 9f5c4584d1..edbc7012b6 100644 --- a/memory.c +++ b/memory.c @@ -14,6 +14,9 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "cpu.h" #include "exec/memory.h" #include "exec/address-spaces.h" #include "exec/ioport.h" @@ -30,8 +33,6 @@ //#define DEBUG_UNASSIGNED -#define RAM_ADDR_INVALID (~(ram_addr_t)0) - static unsigned memory_region_transaction_depth; static bool memory_region_update_pending; static bool ioeventfd_update_pending; @@ -96,13 +97,6 @@ static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) enum ListenerDirection { Forward, Reverse }; -static bool memory_listener_match(MemoryListener *listener, - MemoryRegionSection *section) -{ - return !listener->address_space_filter - || listener->address_space_filter == section->address_space; -} - #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ do { \ MemoryListener *_listener; \ @@ -128,24 +122,23 @@ static bool memory_listener_match(MemoryListener *listener, } \ } while (0) -#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \ +#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ do { \ MemoryListener *_listener; \ + struct memory_listeners_as *list = &(_as)->listeners; \ \ switch (_direction) { \ case Forward: \ - QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ - if (_listener->_callback \ - && memory_listener_match(_listener, _section)) { \ + QTAILQ_FOREACH(_listener, list, link_as) { \ + if (_listener->_callback) { \ _listener->_callback(_listener, _section, ##_args); \ } \ } \ break; \ case Reverse: \ - QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ - memory_listeners, link) { \ - if (_listener->_callback \ - && memory_listener_match(_listener, _section)) { \ + QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \ + link_as) { \ + if (_listener->_callback) { \ _listener->_callback(_listener, _section, ##_args); \ } \ } \ @@ -157,14 +150,10 @@ static bool memory_listener_match(MemoryListener *listener, /* No need to ref/unref .mr, the FlatRange keeps it alive. */ #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ - MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ - .mr = (fr)->mr, \ - .address_space = (as), \ - .offset_within_region = (fr)->offset_in_region, \ - .size = (fr)->addr.size, \ - .offset_within_address_space = int128_get64((fr)->addr.start), \ - .readonly = (fr)->readonly, \ - }), ##_args) + do { \ + MemoryRegionSection mrs = section_from_flat_range(fr, as); \ + MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ + } while(0) struct CoalescedMemoryRange { AddrRange addr; @@ -244,6 +233,19 @@ typedef struct AddressSpaceOps AddressSpaceOps; #define FOR_EACH_FLAT_RANGE(var, view) \ for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) +static inline MemoryRegionSection +section_from_flat_range(FlatRange *fr, AddressSpace *as) +{ + return (MemoryRegionSection) { + .mr = fr->mr, + .address_space = as, + .offset_within_region = fr->offset_in_region, + .size = fr->addr.size, + .offset_within_address_space = int128_get64(fr->addr.start), + .readonly = fr->readonly, + }; +} + static bool flatrange_equal(FlatRange *a, FlatRange *b) { return a->mr == b->mr @@ -386,6 +388,14 @@ static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) return abs_addr; } +static int get_cpu_index(void) +{ + if (current_cpu) { + return current_cpu->cpu_index; + } + return -1; +} + static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, @@ -398,10 +408,15 @@ static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); if (mr->subpage) { - trace_memory_region_subpage_read(mr, addr, tmp, size); + trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); + } else if (mr == &io_mem_notdirty) { + /* Accesses to code which has previously been translated into a TB show + * up in the MMIO path, as accesses to the io_mem_notdirty + * MemoryRegion. */ + trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_read(mr, abs_addr, tmp, size); + trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } *value |= (tmp & mask) << shift; return MEMTX_OK; @@ -419,10 +434,15 @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr, tmp = mr->ops->read(mr->opaque, addr, size); if (mr->subpage) { - trace_memory_region_subpage_read(mr, addr, tmp, size); + trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); + } else if (mr == &io_mem_notdirty) { + /* Accesses to code which has previously been translated into a TB show + * up in the MMIO path, as accesses to the io_mem_notdirty + * MemoryRegion. */ + trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_read(mr, abs_addr, tmp, size); + trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } *value |= (tmp & mask) << shift; return MEMTX_OK; @@ -441,10 +461,15 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); if (mr->subpage) { - trace_memory_region_subpage_read(mr, addr, tmp, size); + trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); + } else if (mr == &io_mem_notdirty) { + /* Accesses to code which has previously been translated into a TB show + * up in the MMIO path, as accesses to the io_mem_notdirty + * MemoryRegion. */ + trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_read(mr, abs_addr, tmp, size); + trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } *value |= (tmp & mask) << shift; return r; @@ -462,10 +487,15 @@ static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, tmp = (*value >> shift) & mask; if (mr->subpage) { - trace_memory_region_subpage_write(mr, addr, tmp, size); + trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); + } else if (mr == &io_mem_notdirty) { + /* Accesses to code which has previously been translated into a TB show + * up in the MMIO path, as accesses to the io_mem_notdirty + * MemoryRegion. */ + trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_write(mr, abs_addr, tmp, size); + trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); } mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); return MEMTX_OK; @@ -483,10 +513,15 @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr, tmp = (*value >> shift) & mask; if (mr->subpage) { - trace_memory_region_subpage_write(mr, addr, tmp, size); + trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); + } else if (mr == &io_mem_notdirty) { + /* Accesses to code which has previously been translated into a TB show + * up in the MMIO path, as accesses to the io_mem_notdirty + * MemoryRegion. */ + trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_write(mr, abs_addr, tmp, size); + trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); } mr->ops->write(mr->opaque, addr, tmp, size); return MEMTX_OK; @@ -504,10 +539,15 @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, tmp = (*value >> shift) & mask; if (mr->subpage) { - trace_memory_region_subpage_write(mr, addr, tmp, size); + trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); + } else if (mr == &io_mem_notdirty) { + /* Accesses to code which has previously been translated into a TB show + * up in the MMIO path, as accesses to the io_mem_notdirty + * MemoryRegion. */ + trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); - trace_memory_region_ops_write(mr, abs_addr, tmp, size); + trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); } return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); } @@ -701,7 +741,7 @@ static void address_space_add_del_ioeventfds(AddressSpace *as, .offset_within_address_space = int128_get64(fd->addr.start), .size = fd->addr.size, }; - MEMORY_LISTENER_CALL(eventfd_del, Forward, §ion, + MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, fd->match_data, fd->data, fd->e); ++iold; } else if (inew < fds_new_nb @@ -714,7 +754,7 @@ static void address_space_add_del_ioeventfds(AddressSpace *as, .offset_within_address_space = int128_get64(fd->addr.start), .size = fd->addr.size, }; - MEMORY_LISTENER_CALL(eventfd_add, Reverse, §ion, + MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, fd->match_data, fd->data, fd->e); ++inew; } else { @@ -905,11 +945,6 @@ static void memory_region_destructor_ram(MemoryRegion *mr) qemu_ram_free(mr->ram_block); } -static void memory_region_destructor_rom_device(MemoryRegion *mr) -{ - qemu_ram_free(mr->ram_block); -} - static bool memory_region_need_escape(char c) { return c == '/' || c == '[' || c == '\\' || c == ']'; @@ -1016,13 +1051,6 @@ static void memory_region_get_priority(Object *obj, Visitor *v, visit_type_int32(v, name, &value, errp); } -static bool memory_region_get_may_overlap(Object *obj, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(obj); - - return mr->may_overlap; -} - static void memory_region_get_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -1060,10 +1088,6 @@ static void memory_region_initfn(Object *obj) memory_region_get_priority, NULL, /* memory_region_set_priority */ NULL, NULL, &error_abort); - object_property_add_bool(OBJECT(mr), "may-overlap", - memory_region_get_may_overlap, - NULL, /* memory_region_set_may_overlap */ - &error_abort); object_property_add(OBJECT(mr), "size", "uint64", memory_region_get_size, NULL, /* memory_region_set_size, */ @@ -1348,6 +1372,21 @@ void memory_region_init_alias(MemoryRegion *mr, mr->alias_offset = offset; } +void memory_region_init_rom(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + Error **errp) +{ + memory_region_init(mr, owner, name, size); + mr->ram = true; + mr->readonly = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_block = qemu_ram_alloc(size, mr, errp); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; +} + void memory_region_init_rom_device(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, @@ -1356,12 +1395,13 @@ void memory_region_init_rom_device(MemoryRegion *mr, uint64_t size, Error **errp) { + assert(ops); memory_region_init(mr, owner, name, size); mr->ops = ops; mr->opaque = opaque; mr->terminates = true; mr->rom_device = true; - mr->destructor = memory_region_destructor_rom_device; + mr->destructor = memory_region_destructor_ram; mr->ram_block = qemu_ram_alloc(size, mr, errp); } @@ -1374,7 +1414,8 @@ void memory_region_init_iommu(MemoryRegion *mr, memory_region_init(mr, owner, name, size); mr->iommu_ops = ops, mr->terminates = true; /* then re-forwards */ - notifier_list_init(&mr->iommu_notify); + QLIST_INIT(&mr->iommu_notify); + mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; } static void memory_region_finalize(Object *obj) @@ -1458,7 +1499,7 @@ bool memory_region_is_skip_dump(MemoryRegion *mr) uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) { uint8_t mask = mr->dirty_log_mask; - if (global_dirty_log) { + if (global_dirty_log && mr->ram_block) { mask |= (1 << DIRTY_MEMORY_MIGRATION); } return mask; @@ -1469,17 +1510,50 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) return memory_region_get_dirty_log_mask(mr) & (1 << client); } -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n) +static void memory_region_update_iommu_notify_flags(MemoryRegion *mr) { - notifier_list_add(&mr->iommu_notify, n); + IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; + IOMMUNotifier *iommu_notifier; + + QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) { + flags |= iommu_notifier->notifier_flags; + } + + if (flags != mr->iommu_notify_flags && + mr->iommu_ops->notify_flag_changed) { + mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags, + flags); + } + + mr->iommu_notify_flags = flags; } -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, - hwaddr granularity, bool is_write) +void memory_region_register_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n) { - hwaddr addr; + /* We need to register for at least one bitfield */ + assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); + QLIST_INSERT_HEAD(&mr->iommu_notify, n, node); + memory_region_update_iommu_notify_flags(mr); +} + +uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr) +{ + assert(memory_region_is_iommu(mr)); + if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) { + return mr->iommu_ops->get_min_page_size(mr); + } + return TARGET_PAGE_SIZE; +} + +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, + bool is_write) +{ + hwaddr addr, granularity; IOMMUTLBEntry iotlb; + granularity = memory_region_iommu_get_min_page_size(mr); + for (addr = 0; addr < memory_region_size(mr); addr += granularity) { iotlb = mr->iommu_ops->translate(mr, addr, is_write); if (iotlb.perm != IOMMU_NONE) { @@ -1494,16 +1568,32 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, } } -void memory_region_unregister_iommu_notifier(Notifier *n) +void memory_region_unregister_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n) { - notifier_remove(n); + QLIST_REMOVE(n, node); + memory_region_update_iommu_notify_flags(mr); } void memory_region_notify_iommu(MemoryRegion *mr, IOMMUTLBEntry entry) { + IOMMUNotifier *iommu_notifier; + IOMMUNotifierFlag request_flags; + assert(memory_region_is_iommu(mr)); - notifier_list_notify(&mr->iommu_notify, &entry); + + if (entry.perm & IOMMU_RW) { + request_flags = IOMMU_NOTIFIER_MAP; + } else { + request_flags = IOMMU_NOTIFIER_UNMAP; + } + + QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) { + if (iommu_notifier->notifier_flags & request_flags) { + iommu_notifier->notify(iommu_notifier, &entry); + } + } } void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) @@ -1552,14 +1642,26 @@ bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, void memory_region_sync_dirty_bitmap(MemoryRegion *mr) { + MemoryListener *listener; AddressSpace *as; + FlatView *view; FlatRange *fr; - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - FlatView *view = address_space_get_flatview(as); + /* If the same address space has multiple log_sync listeners, we + * visit that address space's FlatView multiple times. But because + * log_sync listeners are rare, it's still cheaper than walking each + * address space once. + */ + QTAILQ_FOREACH(listener, &memory_listeners, link) { + if (!listener->log_sync) { + continue; + } + as = listener->address_space; + view = address_space_get_flatview(as); FOR_EACH_FLAT_RANGE(fr, view) { if (fr->mr == mr) { - MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); + MemoryRegionSection mrs = section_from_flat_range(fr, as); + listener->log_sync(listener, &mrs); } } flatview_unref(view); @@ -1596,13 +1698,26 @@ void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, int memory_region_get_fd(MemoryRegion *mr) { - if (mr->alias) { - return memory_region_get_fd(mr->alias); + int fd; + + rcu_read_lock(); + while (mr->alias) { + mr = mr->alias; } + fd = mr->ram_block->fd; + rcu_read_unlock(); - assert(mr->ram_block); + return fd; +} - return qemu_get_ram_fd(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK); +void memory_region_set_fd(MemoryRegion *mr, int fd) +{ + rcu_read_lock(); + while (mr->alias) { + mr = mr->alias; + } + mr->ram_block->fd = fd; + rcu_read_unlock(); } void *memory_region_get_ram_ptr(MemoryRegion *mr) @@ -1616,11 +1731,22 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr) mr = mr->alias; } assert(mr->ram_block); - ptr = qemu_get_ram_ptr(mr->ram_block, - memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK); + ptr = qemu_map_ram_ptr(mr->ram_block, offset); rcu_read_unlock(); - return ptr + offset; + return ptr; +} + +MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) +{ + RAMBlock *block; + + block = qemu_ram_block_from_host(ptr, false, offset); + if (!block) { + return NULL; + } + + return block->mr; } ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) @@ -1632,7 +1758,7 @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp { assert(mr->ram_block); - qemu_ram_resize(memory_region_get_ram_addr(mr), newsize, errp); + qemu_ram_resize(mr->ram_block, newsize, errp); } static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) @@ -1652,7 +1778,7 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa .size = fr->addr.size, }; - MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion, + MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, §ion, int128_get64(fr->addr.start), int128_get64(fr->addr.size)); QTAILQ_FOREACH(cmr, &mr->coalesced, link) { @@ -1663,7 +1789,7 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa continue; } tmp = addrrange_intersection(tmp, fr->addr); - MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion, + MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, §ion, int128_get64(tmp.start), int128_get64(tmp.size)); } @@ -1823,34 +1949,12 @@ void memory_region_del_eventfd(MemoryRegion *mr, static void memory_region_update_container_subregions(MemoryRegion *subregion) { - hwaddr offset = subregion->addr; MemoryRegion *mr = subregion->container; MemoryRegion *other; memory_region_transaction_begin(); memory_region_ref(subregion); - QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { - if (subregion->may_overlap || other->may_overlap) { - continue; - } - if (int128_ge(int128_make64(offset), - int128_add(int128_make64(other->addr), other->size)) - || int128_le(int128_add(int128_make64(offset), subregion->size), - int128_make64(other->addr))) { - continue; - } -#if 0 - printf("warning: subregion collision %llx/%llx (%s) " - "vs %llx/%llx (%s)\n", - (unsigned long long)offset, - (unsigned long long)int128_get64(subregion->size), - subregion->name, - (unsigned long long)other->addr, - (unsigned long long)int128_get64(other->size), - other->name); -#endif - } QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { if (subregion->priority >= other->priority) { QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); @@ -1877,7 +1981,6 @@ void memory_region_add_subregion(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion) { - subregion->may_overlap = false; subregion->priority = 0; memory_region_add_subregion_common(mr, offset, subregion); } @@ -1887,7 +1990,6 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr, MemoryRegion *subregion, int priority) { - subregion->may_overlap = true; subregion->priority = priority; memory_region_add_subregion_common(mr, offset, subregion); } @@ -2067,16 +2169,27 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr) return mr && mr != container; } -void address_space_sync_dirty_bitmap(AddressSpace *as) +void memory_global_dirty_log_sync(void) { + MemoryListener *listener; + AddressSpace *as; FlatView *view; FlatRange *fr; - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); + QTAILQ_FOREACH(listener, &memory_listeners, link) { + if (!listener->log_sync) { + continue; + } + as = listener->address_space; + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + if (fr->dirty_log_mask) { + MemoryRegionSection mrs = section_from_flat_range(fr, as); + listener->log_sync(listener, &mrs); + } + } + flatview_unref(view); } - flatview_unref(view); } void memory_global_dirty_log_start(void) @@ -2109,11 +2222,6 @@ static void listener_add_address_space(MemoryListener *listener, FlatView *view; FlatRange *fr; - if (listener->address_space_filter - && listener->address_space_filter != as) { - return; - } - if (listener->begin) { listener->begin(listener); } @@ -2146,12 +2254,11 @@ static void listener_add_address_space(MemoryListener *listener, flatview_unref(view); } -void memory_listener_register(MemoryListener *listener, AddressSpace *filter) +void memory_listener_register(MemoryListener *listener, AddressSpace *as) { MemoryListener *other = NULL; - AddressSpace *as; - listener->address_space_filter = filter; + listener->address_space = as; if (QTAILQ_EMPTY(&memory_listeners) || listener->priority >= QTAILQ_LAST(&memory_listeners, memory_listeners)->priority) { @@ -2165,14 +2272,26 @@ void memory_listener_register(MemoryListener *listener, AddressSpace *filter) QTAILQ_INSERT_BEFORE(other, listener, link); } - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - listener_add_address_space(listener, as); + if (QTAILQ_EMPTY(&as->listeners) + || listener->priority >= QTAILQ_LAST(&as->listeners, + memory_listeners)->priority) { + QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); + } else { + QTAILQ_FOREACH(other, &as->listeners, link_as) { + if (listener->priority < other->priority) { + break; + } + } + QTAILQ_INSERT_BEFORE(other, listener, link_as); } + + listener_add_address_space(listener, as); } void memory_listener_unregister(MemoryListener *listener) { QTAILQ_REMOVE(&memory_listeners, listener, link); + QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); } void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) @@ -2186,6 +2305,7 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) flatview_init(as->current_map); as->ioeventfd_nb = 0; as->ioeventfds = NULL; + QTAILQ_INIT(&as->listeners); QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); as->name = g_strdup(name ? name : "anonymous"); address_space_init_dispatch(as); @@ -2195,14 +2315,10 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) static void do_address_space_destroy(AddressSpace *as) { - MemoryListener *listener; bool do_free = as->malloced; address_space_destroy_dispatch(as); - - QTAILQ_FOREACH(listener, &memory_listeners, link) { - assert(listener->address_space_filter != as); - } + assert(QTAILQ_EMPTY(&as->listeners)); flatview_unref(as->current_map); g_free(as->name);