X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/e7e9b49f8e9ea4c5c9d07f6d8c9071c64dae816a..0e173b24b523b432854689717e09de5c95c158f8:/memory.c diff --git a/memory.c b/memory.c index ee3f2a8a95..93bd8ed7bc 100644 --- a/memory.c +++ b/memory.c @@ -18,16 +18,20 @@ #include "exec/ioport.h" #include "qapi/visitor.h" #include "qemu/bitops.h" +#include "qemu/error-report.h" #include "qom/object.h" #include "trace.h" #include #include "exec/memory-internal.h" #include "exec/ram_addr.h" +#include "sysemu/kvm.h" #include "sysemu/sysemu.h" //#define DEBUG_UNASSIGNED +#define RAM_ADDR_INVALID (~(ram_addr_t)0) + static unsigned memory_region_transaction_depth; static bool memory_region_update_pending; static bool ioeventfd_update_pending; @@ -152,7 +156,7 @@ static bool memory_listener_match(MemoryListener *listener, } while (0) /* No need to ref/unref .mr, the FlatRange keeps it alive. */ -#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \ +#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ .mr = (fr)->mr, \ .address_space = (as), \ @@ -160,7 +164,7 @@ static bool memory_listener_match(MemoryListener *listener, .size = (fr)->addr.size, \ .offset_within_address_space = int128_get64((fr)->addr.start), \ .readonly = (fr)->readonly, \ - })) + }), ##_args) struct CoalescedMemoryRange { AddrRange addr; @@ -368,84 +372,121 @@ static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) } } -static void memory_region_oldmmio_read_accessor(MemoryRegion *mr, +static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp; + + tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); + trace_memory_region_ops_read(mr, addr, tmp, size); + *value |= (tmp & mask) << shift; + return MEMTX_OK; +} + +static MemTxResult memory_region_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, unsigned shift, - uint64_t mask) + uint64_t mask, + MemTxAttrs attrs) { uint64_t tmp; - tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); + tmp = mr->ops->read(mr->opaque, addr, size); trace_memory_region_ops_read(mr, addr, tmp, size); *value |= (tmp & mask) << shift; + return MEMTX_OK; } -static void memory_region_read_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) +static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) { - uint64_t tmp; + uint64_t tmp = 0; + MemTxResult r; - if (mr->flush_coalesced_mmio) { - qemu_flush_coalesced_mmio_buffer(); - } - tmp = mr->ops->read(mr->opaque, addr, size); + r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); trace_memory_region_ops_read(mr, addr, tmp, size); *value |= (tmp & mask) << shift; + return r; } -static void memory_region_oldmmio_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) +static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) { uint64_t tmp; tmp = (*value >> shift) & mask; trace_memory_region_ops_write(mr, addr, tmp, size); mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); + return MEMTX_OK; } -static void memory_region_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) +static MemTxResult memory_region_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) { uint64_t tmp; - if (mr->flush_coalesced_mmio) { - qemu_flush_coalesced_mmio_buffer(); - } tmp = (*value >> shift) & mask; trace_memory_region_ops_write(mr, addr, tmp, size); mr->ops->write(mr->opaque, addr, tmp, size); + return MEMTX_OK; } -static void access_with_adjusted_size(hwaddr addr, +static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp; + + tmp = (*value >> shift) & mask; + trace_memory_region_ops_write(mr, addr, tmp, size); + return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); +} + +static MemTxResult access_with_adjusted_size(hwaddr addr, uint64_t *value, unsigned size, unsigned access_size_min, unsigned access_size_max, - void (*access)(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask), - MemoryRegion *mr) + MemTxResult (*access)(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask, + MemTxAttrs attrs), + MemoryRegion *mr, + MemTxAttrs attrs) { uint64_t access_mask; unsigned access_size; unsigned i; + MemTxResult r = MEMTX_OK; if (!access_size_min) { access_size_min = 1; @@ -459,14 +500,16 @@ static void access_with_adjusted_size(hwaddr addr, access_mask = -1ULL >> (64 - access_size * 8); if (memory_region_big_endian(mr)) { for (i = 0; i < size; i += access_size) { - access(mr, addr + i, value, access_size, - (size - access_size - i) * 8, access_mask); + r |= access(mr, addr + i, value, access_size, + (size - access_size - i) * 8, access_mask, attrs); } } else { for (i = 0; i < size; i += access_size) { - access(mr, addr + i, value, access_size, i * 8, access_mask); + r |= access(mr, addr + i, value, access_size, i * 8, + access_mask, attrs); } } + return r; } static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) @@ -537,7 +580,7 @@ static void render_memory_region(FlatView *view, remain = clip.size; fr.mr = mr; - fr.dirty_log_mask = mr->dirty_log_mask; + fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); fr.romd_mode = mr->romd_mode; fr.readonly = readonly; @@ -723,10 +766,15 @@ static void address_space_update_topology_pass(AddressSpace *as, if (adding) { MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); - if (frold->dirty_log_mask && !frnew->dirty_log_mask) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop); - } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start); + if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, + frold->dirty_log_mask, + frnew->dirty_log_mask); + } + if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, + frold->dirty_log_mask, + frnew->dirty_log_mask); } } @@ -813,16 +861,6 @@ static void memory_region_destructor_ram(MemoryRegion *mr) qemu_ram_free(mr->ram_addr); } -static void memory_region_destructor_alias(MemoryRegion *mr) -{ - memory_region_unref(mr->alias); -} - -static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) -{ - qemu_ram_free_from_ptr(mr->ram_addr); -} - static void memory_region_destructor_rom_device(MemoryRegion *mr) { qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); @@ -867,20 +905,22 @@ void memory_region_init(MemoryRegion *mr, const char *name, uint64_t size) { - if (!owner) { - owner = container_get(qdev_get_machine(), "/unattached"); - } - object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); mr->size = int128_make64(size); if (size == UINT64_MAX) { mr->size = int128_2_64(); } mr->name = g_strdup(name); + mr->owner = owner; if (name) { char *escaped_name = memory_region_escape_name(name); char *name_array = g_strdup_printf("%s[*]", escaped_name); + + if (!owner) { + owner = container_get(qdev_get_machine(), "/unattached"); + } + object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); object_unref(OBJECT(mr)); g_free(name_array); @@ -951,8 +991,10 @@ static void memory_region_initfn(Object *obj) ObjectProperty *op; mr->ops = &unassigned_mem_ops; + mr->ram_addr = RAM_ADDR_INVALID; mr->enabled = true; mr->romd_mode = true; + mr->global_locking = true; mr->destructor = memory_region_destructor_none; QTAILQ_INIT(&mr->subregions); QTAILQ_INIT(&mr->coalesced); @@ -1053,62 +1095,113 @@ bool memory_region_access_valid(MemoryRegion *mr, return true; } -static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, - hwaddr addr, - unsigned size) +static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size, + MemTxAttrs attrs) { - uint64_t data = 0; + *pval = 0; if (mr->ops->read) { - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_read_accessor, mr); + return access_with_adjusted_size(addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_accessor, + mr, attrs); + } else if (mr->ops->read_with_attrs) { + return access_with_adjusted_size(addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_with_attrs_accessor, + mr, attrs); } else { - access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_read_accessor, mr); + return access_with_adjusted_size(addr, pval, size, 1, 4, + memory_region_oldmmio_read_accessor, + mr, attrs); } - - return data; } -static bool memory_region_dispatch_read(MemoryRegion *mr, +MemTxResult memory_region_dispatch_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, - unsigned size) + unsigned size, + MemTxAttrs attrs) { + MemTxResult r; + if (!memory_region_access_valid(mr, addr, size, false)) { *pval = unassigned_mem_read(mr, addr, size); - return true; + return MEMTX_DECODE_ERROR; } - *pval = memory_region_dispatch_read1(mr, addr, size); + r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); adjust_endianness(mr, pval, size); + return r; +} + +/* Return true if an eventfd was signalled */ +static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs) +{ + MemoryRegionIoeventfd ioeventfd = { + .addr = addrrange_make(int128_make64(addr), int128_make64(size)), + .data = data, + }; + unsigned i; + + for (i = 0; i < mr->ioeventfd_nb; i++) { + ioeventfd.match_data = mr->ioeventfds[i].match_data; + ioeventfd.e = mr->ioeventfds[i].e; + + if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) { + event_notifier_set(ioeventfd.e); + return true; + } + } + return false; } -static bool memory_region_dispatch_write(MemoryRegion *mr, +MemTxResult memory_region_dispatch_write(MemoryRegion *mr, hwaddr addr, uint64_t data, - unsigned size) + unsigned size, + MemTxAttrs attrs) { if (!memory_region_access_valid(mr, addr, size, true)) { unassigned_mem_write(mr, addr, data, size); - return true; + return MEMTX_DECODE_ERROR; } adjust_endianness(mr, &data, size); + if ((!kvm_eventfds_enabled()) && + memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { + return MEMTX_OK; + } + if (mr->ops->write) { - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_accessor, mr); + return access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr, + attrs); + } else if (mr->ops->write_with_attrs) { + return + access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_with_attrs_accessor, + mr, attrs); } else { - access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_write_accessor, mr); + return access_with_adjusted_size(addr, &data, size, 1, 4, + memory_region_oldmmio_write_accessor, + mr, attrs); } - return false; } void memory_region_init_io(MemoryRegion *mr, @@ -1119,10 +1212,9 @@ void memory_region_init_io(MemoryRegion *mr, uint64_t size) { memory_region_init(mr, owner, name, size); - mr->ops = ops; + mr->ops = ops ? ops : &unassigned_mem_ops; mr->opaque = opaque; mr->terminates = true; - mr->ram_addr = ~(ram_addr_t)0; } void memory_region_init_ram(MemoryRegion *mr, @@ -1136,6 +1228,7 @@ void memory_region_init_ram(MemoryRegion *mr, mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc(size, mr, errp); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; } void memory_region_init_resizeable_ram(MemoryRegion *mr, @@ -1153,6 +1246,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; } #ifdef __linux__ @@ -1169,6 +1263,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp); + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; } #endif @@ -1181,11 +1276,12 @@ void memory_region_init_ram_ptr(MemoryRegion *mr, memory_region_init(mr, owner, name, size); mr->ram = true; mr->terminates = true; - mr->destructor = memory_region_destructor_ram_from_ptr; + mr->destructor = memory_region_destructor_ram; + mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ assert(ptr != NULL); - mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort); + mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); } void memory_region_set_skip_dump(MemoryRegion *mr) @@ -1201,8 +1297,6 @@ void memory_region_init_alias(MemoryRegion *mr, uint64_t size) { memory_region_init(mr, owner, name, size); - memory_region_ref(orig); - mr->destructor = memory_region_destructor_alias; mr->alias = orig; mr->alias_offset = offset; } @@ -1236,19 +1330,26 @@ void memory_region_init_iommu(MemoryRegion *mr, notifier_list_init(&mr->iommu_notify); } -void memory_region_init_reservation(MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size) -{ - memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size); -} - static void memory_region_finalize(Object *obj) { MemoryRegion *mr = MEMORY_REGION(obj); - assert(QTAILQ_EMPTY(&mr->subregions)); + assert(!mr->container); + + /* We know the region is not visible in any address space (it + * does not have a container and cannot be a root either because + * it has no references, so we can blindly clear mr->enabled. + * memory_region_set_enabled instead could trigger a transaction + * and cause an infinite loop. + */ + mr->enabled = false; + memory_region_transaction_begin(); + while (!QTAILQ_EMPTY(&mr->subregions)) { + MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); + memory_region_del_subregion(mr, subregion); + } + memory_region_transaction_commit(); + mr->destructor(mr); memory_region_clear_coalescing(mr); g_free((char *)mr->name); @@ -1270,24 +1371,18 @@ void memory_region_ref(MemoryRegion *mr) * The memory region is a child of its owner. As long as the * owner doesn't call unparent itself on the memory region, * ref-ing the owner will also keep the memory region alive. - * Memory regions without an owner are supposed to never go away, - * but we still ref/unref them for debugging purposes. + * Memory regions without an owner are supposed to never go away; + * we do not ref/unref them because it slows down DMA sensibly. */ - Object *obj = OBJECT(mr); - if (obj && obj->parent) { - object_ref(obj->parent); - } else { - object_ref(obj); + if (mr && mr->owner) { + object_ref(mr->owner); } } void memory_region_unref(MemoryRegion *mr) { - Object *obj = OBJECT(mr); - if (obj && obj->parent) { - object_unref(obj->parent); - } else { - object_unref(obj); + if (mr && mr->owner) { + object_unref(mr->owner); } } @@ -1308,34 +1403,48 @@ const char *memory_region_name(const MemoryRegion *mr) return mr->name; } -bool memory_region_is_ram(MemoryRegion *mr) -{ - return mr->ram; -} - bool memory_region_is_skip_dump(MemoryRegion *mr) { return mr->skip_dump; } -bool memory_region_is_logging(MemoryRegion *mr) +uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) { - return mr->dirty_log_mask; + uint8_t mask = mr->dirty_log_mask; + if (global_dirty_log) { + mask |= (1 << DIRTY_MEMORY_MIGRATION); + } + return mask; } -bool memory_region_is_rom(MemoryRegion *mr) +bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) { - return mr->ram && mr->readonly; + return memory_region_get_dirty_log_mask(mr) & (1 << client); } -bool memory_region_is_iommu(MemoryRegion *mr) +void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n) { - return mr->iommu_ops; + notifier_list_add(&mr->iommu_notify, n); } -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n) +void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, + hwaddr granularity, bool is_write) { - notifier_list_add(&mr->iommu_notify, n); + hwaddr addr; + IOMMUTLBEntry iotlb; + + for (addr = 0; addr < memory_region_size(mr); addr += granularity) { + iotlb = mr->iommu_ops->translate(mr, addr, is_write); + if (iotlb.perm != IOMMU_NONE) { + n->notify(n, &iotlb); + } + + /* if (2^64 - MR size) < granularity, it's possible to get an + * infinite loop here. This should catch such a wraparound */ + if ((addr + granularity) < addr) { + break; + } + } } void memory_region_unregister_iommu_notifier(Notifier *n) @@ -1353,6 +1462,14 @@ void memory_region_notify_iommu(MemoryRegion *mr, void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) { uint8_t mask = 1 << client; + uint8_t old_logging; + + assert(client == DIRTY_MEMORY_VGA); + old_logging = mr->vga_logging_count; + mr->vga_logging_count += log ? 1 : -1; + if (!!old_logging == !!mr->vga_logging_count) { + return; + } memory_region_transaction_begin(); mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); @@ -1363,27 +1480,24 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size, unsigned client) { - assert(mr->terminates); + assert(mr->ram_addr != RAM_ADDR_INVALID); return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client); } void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size) { - assert(mr->terminates); - cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size); + assert(mr->ram_addr != RAM_ADDR_INVALID); + cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, + memory_region_get_dirty_log_mask(mr)); } bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size, unsigned client) { - bool ret; - assert(mr->terminates); - ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client); - if (ret) { - cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client); - } - return ret; + assert(mr->ram_addr != RAM_ADDR_INVALID); + return cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, + size, client); } @@ -1426,8 +1540,9 @@ void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size, unsigned client) { - assert(mr->terminates); - cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client); + assert(mr->ram_addr != RAM_ADDR_INVALID); + cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, size, + client); } int memory_region_get_fd(MemoryRegion *mr) @@ -1436,20 +1551,33 @@ int memory_region_get_fd(MemoryRegion *mr) return memory_region_get_fd(mr->alias); } - assert(mr->terminates); + assert(mr->ram_addr != RAM_ADDR_INVALID); return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK); } void *memory_region_get_ram_ptr(MemoryRegion *mr) { - if (mr->alias) { - return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; + void *ptr; + uint64_t offset = 0; + + rcu_read_lock(); + while (mr->alias) { + offset += mr->alias_offset; + mr = mr->alias; } + assert(mr->ram_addr != RAM_ADDR_INVALID); + ptr = qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK); + rcu_read_unlock(); - assert(mr->terminates); + return ptr + offset; +} + +void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) +{ + assert(mr->ram_addr != RAM_ADDR_INVALID); - return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK); + qemu_ram_resize(mr->ram_addr, newsize, errp); } static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) @@ -1549,6 +1677,18 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr) } } +void memory_region_set_global_locking(MemoryRegion *mr) +{ + mr->global_locking = true; +} + +void memory_region_clear_global_locking(MemoryRegion *mr) +{ + mr->global_locking = false; +} + +static bool userspace_eventfd_warning; + void memory_region_add_eventfd(MemoryRegion *mr, hwaddr addr, unsigned size, @@ -1565,7 +1705,16 @@ void memory_region_add_eventfd(MemoryRegion *mr, }; unsigned i; - adjust_endianness(mr, &mrfd.data, size); + if (kvm_enabled() && (!(kvm_eventfds_enabled() || + userspace_eventfd_warning))) { + userspace_eventfd_warning = true; + error_report("Using eventfd without MMIO binding in KVM. " + "Suboptimal performance expected"); + } + + if (size) { + adjust_endianness(mr, &mrfd.data, size); + } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { @@ -1598,7 +1747,9 @@ void memory_region_del_eventfd(MemoryRegion *mr, }; unsigned i; - adjust_endianness(mr, &mrfd.data, size); + if (size) { + adjust_endianness(mr, &mrfd.data, size); + } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) { @@ -1762,11 +1913,6 @@ void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) memory_region_transaction_commit(); } -ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) -{ - return mr->ram_addr; -} - uint64_t memory_region_get_alignment(const MemoryRegion *mr) { return mr->align; @@ -1791,23 +1937,16 @@ static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) sizeof(FlatRange), cmp_flatrange_addr); } -bool memory_region_present(MemoryRegion *container, hwaddr addr) -{ - MemoryRegion *mr = memory_region_find(container, addr, 1).mr; - if (!mr || (mr == container)) { - return false; - } - memory_region_unref(mr); - return true; -} - bool memory_region_is_mapped(MemoryRegion *mr) { return mr->container ? true : false; } -MemoryRegionSection memory_region_find(MemoryRegion *mr, - hwaddr addr, uint64_t size) +/* Same as memory_region_find, but it does not add a reference to the + * returned region. It must be called from an RCU critical section. + */ +static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, + hwaddr addr, uint64_t size) { MemoryRegionSection ret = { .mr = NULL }; MemoryRegion *root; @@ -1828,11 +1967,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, } range = addrrange_make(int128_make64(addr), int128_make64(size)); - rcu_read_lock(); view = atomic_rcu_read(&as->current_map); fr = flatview_lookup(view, range); if (!fr) { - goto out; + return ret; } while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { @@ -1848,12 +1986,32 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, ret.size = range.size; ret.offset_within_address_space = int128_get64(range.start); ret.readonly = fr->readonly; - memory_region_ref(ret.mr); -out: + return ret; +} + +MemoryRegionSection memory_region_find(MemoryRegion *mr, + hwaddr addr, uint64_t size) +{ + MemoryRegionSection ret; + rcu_read_lock(); + ret = memory_region_find_rcu(mr, addr, size); + if (ret.mr) { + memory_region_ref(ret.mr); + } rcu_read_unlock(); return ret; } +bool memory_region_present(MemoryRegion *container, hwaddr addr) +{ + MemoryRegion *mr; + + rcu_read_lock(); + mr = memory_region_find_rcu(container, addr, 1).mr; + rcu_read_unlock(); + return mr && mr != container; +} + void address_space_sync_dirty_bitmap(AddressSpace *as) { FlatView *view; @@ -1869,12 +2027,24 @@ void address_space_sync_dirty_bitmap(AddressSpace *as) void memory_global_dirty_log_start(void) { global_dirty_log = true; + MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); + + /* Refresh DIRTY_LOG_MIGRATION bit. */ + memory_region_transaction_begin(); + memory_region_update_pending = true; + memory_region_transaction_commit(); } void memory_global_dirty_log_stop(void) { global_dirty_log = false; + + /* Refresh DIRTY_LOG_MIGRATION bit. */ + memory_region_transaction_begin(); + memory_region_update_pending = true; + memory_region_transaction_commit(); + MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); } @@ -1889,6 +2059,9 @@ static void listener_add_address_space(MemoryListener *listener, return; } + if (listener->begin) { + listener->begin(listener); + } if (global_dirty_log) { if (listener->log_global_start) { listener->log_global_start(listener); @@ -1905,10 +2078,16 @@ static void listener_add_address_space(MemoryListener *listener, .offset_within_address_space = int128_get64(fr->addr.start), .readonly = fr->readonly, }; + if (fr->dirty_log_mask && listener->log_start) { + listener->log_start(listener, §ion, 0, fr->dirty_log_mask); + } if (listener->region_add) { listener->region_add(listener, §ion); } } + if (listener->commit) { + listener->commit(listener); + } flatview_unref(view); } @@ -1992,17 +2171,6 @@ void address_space_destroy(AddressSpace *as) call_rcu(as, do_address_space_destroy, rcu); } -bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size) -{ - return memory_region_dispatch_read(mr, addr, pval, size); -} - -bool io_mem_write(MemoryRegion *mr, hwaddr addr, - uint64_t val, unsigned size) -{ - return memory_region_dispatch_write(mr, addr, val, size); -} - typedef struct MemoryRegionList MemoryRegionList; struct MemoryRegionList { @@ -2022,7 +2190,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, const MemoryRegion *submr; unsigned int i; - if (!mr || !mr->enabled) { + if (!mr) { return; } @@ -2048,7 +2216,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, } mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx - "-" TARGET_FMT_plx "\n", + "-" TARGET_FMT_plx "%s\n", base + mr->addr, base + mr->addr + (int128_nz(mr->size) ? @@ -2064,10 +2232,11 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, mr->alias_offset + (int128_nz(mr->size) ? (hwaddr)int128_get64(int128_sub(mr->size, - int128_one())) : 0)); + int128_one())) : 0), + mr->enabled ? "" : " [disabled]"); } else { mon_printf(f, - TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n", + TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n", base + mr->addr, base + mr->addr + (int128_nz(mr->size) ? @@ -2077,7 +2246,8 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f, mr->romd_mode ? 'R' : '-', !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W' : '-', - memory_region_name(mr)); + memory_region_name(mr), + mr->enabled ? "" : " [disabled]"); } QTAILQ_INIT(&submr_print_queue); @@ -2118,15 +2288,16 @@ void mtree_info(fprintf_function mon_printf, void *f) QTAILQ_INIT(&ml_head); QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - mon_printf(f, "%s\n", as->name); - mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head); + mon_printf(f, "address-space: %s\n", as->name); + mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head); + mon_printf(f, "\n"); } - mon_printf(f, "aliases\n"); /* print aliased regions */ QTAILQ_FOREACH(ml, &ml_head, queue) { - mon_printf(f, "%s\n", memory_region_name(ml->mr)); - mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); + mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr)); + mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head); + mon_printf(f, "\n"); } QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {