#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
-#include "cpu.h"
#include "exec/memory.h"
-#include "exec/address-spaces.h"
#include "qapi/visitor.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "sysemu/kvm.h"
#include "sysemu/runstate.h"
#include "sysemu/tcg.h"
-#include "sysemu/accel.h"
+#include "qemu/accel.h"
#include "hw/boards.h"
#include "migration/vmstate.h"
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
MemoryRegionIoeventfd *b)
{
- return !memory_region_ioeventfd_before(a, b)
- && !memory_region_ioeventfd_before(b, a);
+ if (int128_eq(a->addr.start, b->addr.start) &&
+ (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
+ (int128_eq(a->addr.size, b->addr.size) &&
+ (a->match_data == b->match_data) &&
+ ((a->match_data && (a->data == b->data)) || !a->match_data) &&
+ (a->e == b->e))))
+ return true;
+
+ return false;
}
/* Range of memory in the global map. Addresses are absolute. */
assert(cb);
FOR_EACH_FLAT_RANGE(fr, fv) {
- if (cb(fr->addr.start, fr->addr.size, fr->mr, opaque))
+ if (cb(fr->addr.start, fr->addr.size, fr->mr,
+ fr->offset_in_region, opaque)) {
break;
+ }
}
}
unsigned size = memop_size(op);
MemTxResult r;
- fuzz_dma_read_cb(addr, size, mr, false);
if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
*pval = unassigned_mem_read(mr, addr, size);
return MEMTX_DECODE_ERROR;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
if (err) {
mr->size = int128_zero();
object_unparent(OBJECT(mr));
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
mr, &err);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
if (err) {
mr->size = int128_zero();
object_unparent(OBJECT(mr));
#ifdef CONFIG_POSIX
void memory_region_init_ram_from_file(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
uint64_t align,
uint32_t ram_flags,
const char *path,
+ bool readonly,
Error **errp)
{
Error *err = NULL;
memory_region_init(mr, owner, name, size);
mr->ram = true;
+ mr->readonly = readonly;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->align = align;
- mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
+ readonly, &err);
if (err) {
mr->size = int128_zero();
object_unparent(OBJECT(mr));
}
void memory_region_init_ram_from_fd(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
bool share,
int fd,
+ ram_addr_t offset,
Error **errp)
{
Error *err = NULL;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
share ? RAM_SHARED : 0,
- fd, &err);
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+ fd, offset, false, &err);
if (err) {
mr->size = int128_zero();
object_unparent(OBJECT(mr));
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
assert(ptr != NULL);
mr->ops = &ram_device_mem_ops;
mr->opaque = mr;
mr->destructor = memory_region_destructor_ram;
- mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
assert(ptr != NULL);
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
}
void memory_region_init_rom_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp)
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
{
uint8_t mask = mr->dirty_log_mask;
- if (global_dirty_log && mr->ram_block) {
+ RAMBlock *rb = mr->ram_block;
+
+ if (global_dirty_log && ((rb && qemu_ram_is_migratable(rb)) ||
+ memory_region_is_iommu(mr))) {
mask |= (1 << DIRTY_MEMORY_MIGRATION);
}
+
+ if (tcg_enabled() && rb) {
+ /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
+ mask |= (1 << DIRTY_MEMORY_CODE);
+ }
return mask;
}
return ret;
}
+int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
+ uint64_t page_size_mask,
+ Error **errp)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
+ int ret = 0;
+
+ if (imrc->iommu_set_page_size_mask) {
+ ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
+ }
+ return ret;
+}
+
int memory_region_register_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n, Error **errp)
{
memory_region_update_iommu_notify_flags(iommu_mr, NULL);
}
-void memory_region_notify_one(IOMMUNotifier *notifier,
- IOMMUTLBEntry *entry)
+void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
+ IOMMUTLBEvent *event)
{
- IOMMUNotifierFlag request_flags;
+ IOMMUTLBEntry *entry = &event->entry;
hwaddr entry_end = entry->iova + entry->addr_mask;
+ IOMMUTLBEntry tmp = *entry;
+
+ if (event->type == IOMMU_NOTIFIER_UNMAP) {
+ assert(entry->perm == IOMMU_NONE);
+ }
/*
* Skip the notification if the notification does not overlap
return;
}
- assert(entry->iova >= notifier->start && entry_end <= notifier->end);
-
- if (entry->perm & IOMMU_RW) {
- request_flags = IOMMU_NOTIFIER_MAP;
+ if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
+ /* Crop (iova, addr_mask) to range */
+ tmp.iova = MAX(tmp.iova, notifier->start);
+ tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
} else {
- request_flags = IOMMU_NOTIFIER_UNMAP;
+ assert(entry->iova >= notifier->start && entry_end <= notifier->end);
}
- if (notifier->notifier_flags & request_flags) {
- notifier->notify(notifier, entry);
+ if (event->type & notifier->notifier_flags) {
+ notifier->notify(notifier, &tmp);
}
}
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
int iommu_idx,
- IOMMUTLBEntry entry)
+ IOMMUTLBEvent event)
{
IOMMUNotifier *iommu_notifier;
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
if (iommu_notifier->iommu_idx == iommu_idx) {
- memory_region_notify_one(iommu_notifier, &entry);
+ memory_region_notify_iommu_one(iommu_notifier, &event);
}
}
}
memory_region_get_dirty_log_mask(mr));
}
+/*
+ * If memory region `mr' is NULL, do global sync. Otherwise, sync
+ * dirty bitmap for the specified memory region.
+ */
static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
{
MemoryListener *listener;
* address space once.
*/
QTAILQ_FOREACH(listener, &memory_listeners, link) {
- if (!listener->log_sync) {
- continue;
- }
- as = listener->address_space;
- view = address_space_get_flatview(as);
- FOR_EACH_FLAT_RANGE(fr, view) {
- if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
- MemoryRegionSection mrs = section_from_flat_range(fr, view);
- listener->log_sync(listener, &mrs);
+ if (listener->log_sync) {
+ as = listener->address_space;
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
+ MemoryRegionSection mrs = section_from_flat_range(fr, view);
+ listener->log_sync(listener, &mrs);
+ }
}
+ flatview_unref(view);
+ } else if (listener->log_sync_global) {
+ /*
+ * No matter whether MR is specified, what we can do here
+ * is to do a global sync, because we are not capable to
+ * sync in a finer granularity.
+ */
+ listener->log_sync_global(listener);
}
- flatview_unref(view);
}
}
MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
}
-static void memory_vm_change_state_handler(void *opaque, int running,
+static void memory_vm_change_state_handler(void *opaque, bool running,
RunState state)
{
if (running) {
{
MemoryListener *other = NULL;
+ /* Only one of them can be defined for a listener */
+ assert(!(listener->log_sync && listener->log_sync_global));
+
listener->address_space = as;
if (QTAILQ_EMPTY(&memory_listeners)
|| listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
}
void memory_region_init_ram(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp)
}
void memory_region_init_rom(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp)
}
void memory_region_init_rom_device(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
#ifdef CONFIG_FUZZ
void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
size_t len,
- MemoryRegion *mr,
- bool is_write)
+ MemoryRegion *mr)
{
}
#endif