* (Set during postcopy)
*/
#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
#endif
#ifdef TARGET_PAGE_BITS_VARY
* @is_write: whether the translation operation is for write
* @is_mmio: whether this can be MMIO, set true if it can
* @target_as: the address space targeted by the IOMMU
+ * @attrs: transaction attributes
*
* This function is called from RCU critical section. It is the common
* part of flatview_do_translate and address_space_translate_cached.
hwaddr *page_mask_out,
bool is_write,
bool is_mmio,
- AddressSpace **target_as)
+ AddressSpace **target_as,
+ MemTxAttrs attrs)
{
MemoryRegionSection *section;
hwaddr page_mask = (hwaddr)-1;
do {
hwaddr addr = *xlat;
IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
- IOMMUTLBEntry iotlb = imrc->translate(iommu_mr, addr, is_write ?
- IOMMU_WO : IOMMU_RO);
+ int iommu_idx = 0;
+ IOMMUTLBEntry iotlb;
+
+ if (imrc->attrs_to_index) {
+ iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
+ }
+
+ iotlb = imrc->translate(iommu_mr, addr, is_write ?
+ IOMMU_WO : IOMMU_RO, iommu_idx);
if (!(iotlb.perm & (1 << is_write))) {
goto unassigned;
* @is_write: whether the translation operation is for write
* @is_mmio: whether this can be MMIO, set true if it can
* @target_as: the address space targeted by the IOMMU
+ * @attrs: memory transaction attributes
*
* This function is called from RCU critical section
*/
hwaddr *page_mask_out,
bool is_write,
bool is_mmio,
- AddressSpace **target_as)
+ AddressSpace **target_as,
+ MemTxAttrs attrs)
{
MemoryRegionSection *section;
IOMMUMemoryRegion *iommu_mr;
return address_space_translate_iommu(iommu_mr, xlat,
plen_out, page_mask_out,
is_write, is_mmio,
- target_as);
+ target_as, attrs);
}
if (page_mask_out) {
/* Not behind an IOMMU, use default page size. */
/* Called from RCU critical section */
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
- bool is_write)
+ bool is_write, MemTxAttrs attrs)
{
MemoryRegionSection section;
hwaddr xlat, page_mask;
* but page mask.
*/
section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
- NULL, &page_mask, is_write, false, &as);
+ NULL, &page_mask, is_write, false, &as,
+ attrs);
/* Illegal translation */
if (section.mr == &io_mem_unassigned) {
/* Called from RCU critical section */
MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
- hwaddr *plen, bool is_write)
+ hwaddr *plen, bool is_write,
+ MemTxAttrs attrs)
{
MemoryRegion *mr;
MemoryRegionSection section;
/* This can be MMIO, so setup MMIO bit. */
section = flatview_do_translate(fv, addr, xlat, plen, NULL,
- is_write, true, &as);
+ is_write, true, &as, attrs);
mr = section.mr;
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
return mr;
}
+typedef struct TCGIOMMUNotifier {
+ IOMMUNotifier n;
+ MemoryRegion *mr;
+ CPUState *cpu;
+ int iommu_idx;
+ bool active;
+} TCGIOMMUNotifier;
+
+static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n);
+
+ if (!notifier->active) {
+ return;
+ }
+ tlb_flush(notifier->cpu);
+ notifier->active = false;
+ /* We leave the notifier struct on the list to avoid reallocating it later.
+ * Generally the number of IOMMUs a CPU deals with will be small.
+ * In any case we can't unregister the iommu notifier from a notify
+ * callback.
+ */
+}
+
+static void tcg_register_iommu_notifier(CPUState *cpu,
+ IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx)
+{
+ /* Make sure this CPU has an IOMMU notifier registered for this
+ * IOMMU/IOMMU index combination, so that we can flush its TLB
+ * when the IOMMU tells us the mappings we've cached have changed.
+ */
+ MemoryRegion *mr = MEMORY_REGION(iommu_mr);
+ TCGIOMMUNotifier *notifier;
+ int i;
+
+ for (i = 0; i < cpu->iommu_notifiers->len; i++) {
+ notifier = &g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier, i);
+ if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) {
+ break;
+ }
+ }
+ if (i == cpu->iommu_notifiers->len) {
+ /* Not found, add a new entry at the end of the array */
+ cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1);
+ notifier = &g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier, i);
+
+ notifier->mr = mr;
+ notifier->iommu_idx = iommu_idx;
+ notifier->cpu = cpu;
+ /* Rather than trying to register interest in the specific part
+ * of the iommu's address space that we've accessed and then
+ * expand it later as subsequent accesses touch more of it, we
+ * just register interest in the whole thing, on the assumption
+ * that iommu reconfiguration will be rare.
+ */
+ iommu_notifier_init(¬ifier->n,
+ tcg_iommu_unmap_notify,
+ IOMMU_NOTIFIER_UNMAP,
+ 0,
+ HWADDR_MAX,
+ iommu_idx);
+ memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n);
+ }
+
+ if (!notifier->active) {
+ notifier->active = true;
+ }
+}
+
+static void tcg_iommu_free_notifier_list(CPUState *cpu)
+{
+ /* Destroy the CPU's notifier list */
+ int i;
+ TCGIOMMUNotifier *notifier;
+
+ for (i = 0; i < cpu->iommu_notifiers->len; i++) {
+ notifier = &g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier, i);
+ memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n);
+ }
+ g_array_free(cpu->iommu_notifiers, true);
+}
+
/* Called from RCU critical section */
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
- hwaddr *xlat, hwaddr *plen)
+ hwaddr *xlat, hwaddr *plen,
+ MemTxAttrs attrs, int *prot)
{
MemoryRegionSection *section;
+ IOMMUMemoryRegion *iommu_mr;
+ IOMMUMemoryRegionClass *imrc;
+ IOMMUTLBEntry iotlb;
+ int iommu_idx;
AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
- section = address_space_translate_internal(d, addr, xlat, plen, false);
+ for (;;) {
+ section = address_space_translate_internal(d, addr, &addr, plen, false);
+
+ iommu_mr = memory_region_get_iommu(section->mr);
+ if (!iommu_mr) {
+ break;
+ }
+
+ imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
+
+ iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
+ tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
+ /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
+ * doesn't short-cut its translation table walk.
+ */
+ iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
+ addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
+ | (addr & iotlb.addr_mask));
+ /* Update the caller's prot bits to remove permissions the IOMMU
+ * is giving us a failure response for. If we get down to no
+ * permissions left at all we can give up now.
+ */
+ if (!(iotlb.perm & IOMMU_RO)) {
+ *prot &= ~(PAGE_READ | PAGE_EXEC);
+ }
+ if (!(iotlb.perm & IOMMU_WO)) {
+ *prot &= ~PAGE_WRITE;
+ }
+
+ if (!*prot) {
+ goto translate_fail;
+ }
+
+ d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as));
+ }
assert(!memory_region_is_iommu(section->mr));
+ *xlat = addr;
return section;
+
+translate_fail:
+ return &d->map.sections[PHYS_SECTION_UNASSIGNED];
}
#endif
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
}
+#ifndef CONFIG_USER_ONLY
+ tcg_iommu_free_notifier_list(cpu);
+#endif
}
Property cpu_common_props[] = {
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
}
+
+ cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier));
#endif
}
}
#if defined(CONFIG_USER_ONLY)
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
+void tb_invalidate_phys_addr(target_ulong addr)
{
mmap_lock();
- tb_lock();
- tb_invalidate_phys_page_range(pc, pc + 1, 0);
- tb_unlock();
+ tb_invalidate_phys_page_range(addr, addr + 1, 0);
mmap_unlock();
}
+
+static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
+{
+ tb_invalidate_phys_addr(pc);
+}
#else
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
+{
+ ram_addr_t ram_addr;
+ MemoryRegion *mr;
+ hwaddr l = 1;
+
+ if (!tcg_enabled()) {
+ return;
+ }
+
+ rcu_read_lock();
+ mr = address_space_translate(as, addr, &addr, &l, false, attrs);
+ if (!(memory_region_is_ram(mr)
+ || memory_region_is_romd(mr))) {
+ rcu_read_unlock();
+ return;
+ }
+ ram_addr = memory_region_get_ram_addr(mr) + addr;
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
+ rcu_read_unlock();
+}
+
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
MemTxAttrs attrs;
struct sigaction act;
sigfillset(&act.sa_mask);
act.sa_handler = SIG_DFL;
+ act.sa_flags = 0;
sigaction(SIGABRT, &act, NULL);
}
#endif
RAMBlock *block;
ram_addr_t end;
+ assert(tcg_enabled());
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
" must be multiples of page size 0x%zx",
block->mr->align, block->page_size);
return NULL;
+ } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
+ error_setg(errp, "alignment 0x%" PRIx64
+ " must be a power of two", block->mr->align);
+ return NULL;
}
block->mr->align = MAX(block->page_size, block->mr->align);
#if defined(__s390x__)
return offset;
}
-unsigned long last_ram_page(void)
+static unsigned long last_ram_page(void)
{
RAMBlock *block;
ram_addr_t last = 0;
rb->flags |= RAM_UF_ZEROPAGE;
}
+bool qemu_ram_is_migratable(RAMBlock *rb)
+{
+ return rb->flags & RAM_MIGRATABLE;
+}
+
+void qemu_ram_set_migratable(RAMBlock *rb)
+{
+ rb->flags |= RAM_MIGRATABLE;
+}
+
+void qemu_ram_unset_migratable(RAMBlock *rb)
+{
+ rb->flags &= ~RAM_MIGRATABLE;
+}
+
/* Called with iothread lock held. */
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
{
ndi->ram_addr = ram_addr;
ndi->mem_vaddr = mem_vaddr;
ndi->size = size;
- ndi->locked = false;
+ ndi->pages = NULL;
assert(tcg_enabled());
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- ndi->locked = true;
- tb_lock();
- tb_invalidate_phys_page_fast(ram_addr, size);
+ ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
+ tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
}
}
/* Called within RCU critical section. */
void memory_notdirty_write_complete(NotDirtyInfo *ndi)
{
- if (ndi->locked) {
- tb_unlock();
+ if (ndi->pages) {
+ assert(tcg_enabled());
+ page_collection_unlock(ndi->pages);
+ ndi->pages = NULL;
}
/* Set both VGA and migration bits for simplicity and to remove
memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
ram_addr, size);
- switch (size) {
- case 1:
- stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
- break;
- case 2:
- stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
- break;
- case 4:
- stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
- break;
- case 8:
- stq_p(qemu_map_ram_ptr(NULL, ram_addr), val);
- break;
- default:
- abort();
- }
+ stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
memory_notdirty_write_complete(&ndi);
}
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return is_write;
}
}
cpu->watchpoint_hit = wp;
- /* Both tb_lock and iothread_mutex will be reset when
- * cpu_loop_exit or cpu_loop_exit_noexc longjmp
- * back into the cpu_exec main loop.
- */
- tb_lock();
+ mmap_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
+ mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
+ mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
}
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
const uint8_t *buf, int len);
static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
- bool is_write);
+ bool is_write, MemTxAttrs attrs);
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
unsigned len, MemTxAttrs attrs)
if (res) {
return res;
}
- switch (len) {
- case 1:
- *data = ldub_p(buf);
- return MEMTX_OK;
- case 2:
- *data = lduw_p(buf);
- return MEMTX_OK;
- case 4:
- *data = ldl_p(buf);
- return MEMTX_OK;
- case 8:
- *data = ldq_p(buf);
- return MEMTX_OK;
- default:
- abort();
- }
+ *data = ldn_p(buf, len);
+ return MEMTX_OK;
}
static MemTxResult subpage_write(void *opaque, hwaddr addr,
" value %"PRIx64"\n",
__func__, subpage, len, addr, value);
#endif
- switch (len) {
- case 1:
- stb_p(buf, value);
- break;
- case 2:
- stw_p(buf, value);
- break;
- case 4:
- stl_p(buf, value);
- break;
- case 8:
- stq_p(buf, value);
- break;
- default:
- abort();
- }
+ stn_p(buf, len, value);
return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
}
static bool subpage_accepts(void *opaque, hwaddr addr,
- unsigned len, bool is_write)
+ unsigned len, bool is_write,
+ MemTxAttrs attrs)
{
subpage_t *subpage = opaque;
#if defined(DEBUG_SUBPAGE)
#endif
return flatview_access_valid(subpage->fv, addr + subpage->base,
- len, is_write);
+ len, is_write, attrs);
}
static const MemoryRegionOps subpage_ops = {
}
static bool readonly_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write)
+ unsigned size, bool is_write,
+ MemTxAttrs attrs)
{
return is_write;
}
},
};
-MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
+MemoryRegionSection *iotlb_to_section(CPUState *cpu,
+ hwaddr index, MemTxAttrs attrs)
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
- return sections[index & ~TARGET_PAGE_MASK].mr;
+ return §ions[index & ~TARGET_PAGE_MASK];
}
static void io_mem_init(void)
CPUAddressSpace *cpuas;
AddressSpaceDispatch *d;
+ assert(tcg_enabled());
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
- tb_lock();
tb_invalidate_phys_range(addr, addr + length);
- tb_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
- switch (l) {
- case 8:
- /* 64 bit write access */
- val = ldq_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 8,
- attrs);
- break;
- case 4:
- /* 32 bit write access */
- val = (uint32_t)ldl_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 4,
- attrs);
- break;
- case 2:
- /* 16 bit write access */
- val = lduw_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 2,
- attrs);
- break;
- case 1:
- /* 8 bit write access */
- val = ldub_p(buf);
- result |= memory_region_dispatch_write(mr, addr1, val, 1,
- attrs);
- break;
- default:
- abort();
- }
+ val = ldn_p(buf, l);
+ result |= memory_region_dispatch_write(mr, addr1, val, l, attrs);
} else {
/* RAM case */
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
}
l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, true);
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
}
return result;
MemTxResult result = MEMTX_OK;
l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, true);
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
result = flatview_write_continue(fv, addr, attrs, buf, len,
addr1, l, mr);
/* I/O case */
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
- switch (l) {
- case 8:
- /* 64 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 8,
- attrs);
- stq_p(buf, val);
- break;
- case 4:
- /* 32 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 4,
- attrs);
- stl_p(buf, val);
- break;
- case 2:
- /* 16 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 2,
- attrs);
- stw_p(buf, val);
- break;
- case 1:
- /* 8 bit read access */
- result |= memory_region_dispatch_read(mr, addr1, &val, 1,
- attrs);
- stb_p(buf, val);
- break;
- default:
- abort();
- }
+ result |= memory_region_dispatch_read(mr, addr1, &val, l, attrs);
+ stn_p(buf, l, val);
} else {
/* RAM case */
ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
}
l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, false);
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
}
return result;
MemoryRegion *mr;
l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, false);
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
return flatview_read_continue(fv, addr, attrs, buf, len,
addr1, l, mr);
}
}
static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
- bool is_write)
+ bool is_write, MemTxAttrs attrs)
{
MemoryRegion *mr;
hwaddr l, xlat;
while (len > 0) {
l = len;
- mr = flatview_translate(fv, addr, &xlat, &l, is_write);
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
l = memory_access_size(mr, l, addr);
- if (!memory_region_access_valid(mr, xlat, l, is_write)) {
+ if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
return false;
}
}
rcu_read_lock();
fv = address_space_to_flatview(as);
- result = flatview_access_valid(fv, addr, len, is_write);
+ result = flatview_access_valid(fv, addr, len, is_write, attrs);
rcu_read_unlock();
return result;
}
static hwaddr
flatview_extend_translation(FlatView *fv, hwaddr addr,
- hwaddr target_len,
- MemoryRegion *mr, hwaddr base, hwaddr len,
- bool is_write)
+ hwaddr target_len,
+ MemoryRegion *mr, hwaddr base, hwaddr len,
+ bool is_write, MemTxAttrs attrs)
{
hwaddr done = 0;
hwaddr xlat;
len = target_len;
this_mr = flatview_translate(fv, addr, &xlat,
- &len, is_write);
+ &len, is_write, attrs);
if (this_mr != mr || xlat != base + done) {
return done;
}
l = len;
rcu_read_lock();
fv = address_space_to_flatview(as);
- mr = flatview_translate(fv, addr, &xlat, &l, is_write);
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
memory_region_ref(mr);
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
- l, is_write);
+ l, is_write, attrs);
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
rcu_read_unlock();
#define ARG1 as
#define SUFFIX
#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
-#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
-#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
-#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
#define RCU_READ_LOCK(...) rcu_read_lock()
#define RCU_READ_UNLOCK(...) rcu_read_unlock()
#include "memory_ldst.inc.c"
mr = cache->mrs.mr;
memory_region_ref(mr);
if (memory_access_is_direct(mr, is_write)) {
+ /* We don't care about the memory attributes here as we're only
+ * doing this if we found actual RAM, which behaves the same
+ * regardless of attributes; so UNSPECIFIED is fine.
+ */
l = flatview_extend_translation(cache->fv, addr, len, mr,
- cache->xlat, l, is_write);
+ cache->xlat, l, is_write,
+ MEMTXATTRS_UNSPECIFIED);
cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
} else {
cache->ptr = NULL;
section = address_space_translate_iommu(iommu_mr, xlat, plen,
NULL, is_write, true,
- &target_as);
+ &target_as, attrs);
return section.mr;
}
#define ARG1 cache
#define SUFFIX _cached_slow
#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
-#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
-#define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat))
-#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
#define RCU_READ_LOCK() ((void)0)
#define RCU_READ_UNLOCK() ((void)0)
#include "memory_ldst.inc.c"
return ret;
}
+int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
+{
+ RAMBlock *block;
+ int ret = 0;
+
+ rcu_read_lock();
+ RAMBLOCK_FOREACH(block) {
+ if (!qemu_ram_is_migratable(block)) {
+ continue;
+ }
+ ret = func(block->idstr, block->host, block->offset,
+ block->used_length, opaque);
+ if (ret) {
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
/*
* Unmap pages of memory from start to start+length such that
* they a) read as 0, b) Trigger whatever fault mechanism