*/
#include "qemu/osdep.h"
#include "qapi/error.h"
-#ifndef _WIN32
-#endif
#include "qemu/cutils.h"
#include "cpu.h"
#include "trace-root.h"
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
-#include <fcntl.h>
#include <linux/falloc.h>
#endif
#endif
-#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
#include "translate-all.h"
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
-uintptr_t qemu_real_host_page_size;
-intptr_t qemu_real_host_page_mask;
bool set_preferred_target_page_bits(int bits)
{
} PhysPageMap;
struct AddressSpaceDispatch {
- struct rcu_head rcu;
-
MemoryRegionSection *mru_section;
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
PhysPageEntry phys_map;
PhysPageMap map;
- AddressSpace *as;
};
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
MemoryRegion iomem;
- AddressSpace *as;
+ FlatView *fv;
hwaddr base;
uint16_t sub_section[];
} subpage_t;
}
}
-static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
+void address_space_dispatch_compact(AddressSpaceDispatch *d)
{
if (d->phys_map.skip) {
phys_page_compact(&d->phys_map, d->map.nodes);
{
MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage;
- bool update;
- if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
- section_covers_addr(section, addr)) {
- update = false;
- } else {
+ if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
+ !section_covers_addr(section, addr)) {
section = phys_page_find(d, addr);
- update = true;
+ atomic_set(&d->mru_section, section);
}
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
- if (update) {
- atomic_set(&d->mru_section, section);
- }
return section;
}
return section;
}
-/* Called from RCU critical section */
-static MemoryRegionSection address_space_do_translate(AddressSpace *as,
- hwaddr addr,
- hwaddr *xlat,
- hwaddr *plen,
- bool is_write,
- bool is_mmio)
+/**
+ * flatview_do_translate - translate an address in FlatView
+ *
+ * @fv: the flat view that we want to translate on
+ * @addr: the address to be translated in above address space
+ * @xlat: the translated address offset within memory region. It
+ * cannot be @NULL.
+ * @plen_out: valid read/write length of the translated address. It
+ * can be @NULL when we don't care about it.
+ * @page_mask_out: page mask for the translated address. This
+ * should only be meaningful for IOMMU translated
+ * addresses, since there may be huge pages that this bit
+ * would tell. It can be @NULL if we don't care about it.
+ * @is_write: whether the translation operation is for write
+ * @is_mmio: whether this can be MMIO, set true if it can
+ *
+ * This function is called from RCU critical section
+ */
+static MemoryRegionSection flatview_do_translate(FlatView *fv,
+ hwaddr addr,
+ hwaddr *xlat,
+ hwaddr *plen_out,
+ hwaddr *page_mask_out,
+ bool is_write,
+ bool is_mmio,
+ AddressSpace **target_as)
{
IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
IOMMUMemoryRegion *iommu_mr;
IOMMUMemoryRegionClass *imrc;
+ hwaddr page_mask = (hwaddr)(-1);
+ hwaddr plen = (hwaddr)(-1);
+
+ if (plen_out) {
+ plen = *plen_out;
+ }
for (;;) {
- AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
- section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
+ section = address_space_translate_internal(
+ flatview_to_dispatch(fv), addr, &addr,
+ &plen, is_mmio);
iommu_mr = memory_region_get_iommu(section->mr);
if (!iommu_mr) {
IOMMU_WO : IOMMU_RO);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
- *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
+ page_mask &= iotlb.addr_mask;
+ plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1);
if (!(iotlb.perm & (1 << is_write))) {
goto translate_fail;
}
- as = iotlb.target_as;
+ fv = address_space_to_flatview(iotlb.target_as);
+ *target_as = iotlb.target_as;
}
*xlat = addr;
+ if (page_mask == (hwaddr)(-1)) {
+ /* Not behind an IOMMU, use default page size. */
+ page_mask = ~TARGET_PAGE_MASK;
+ }
+
+ if (page_mask_out) {
+ *page_mask_out = page_mask;
+ }
+
+ if (plen_out) {
+ *plen_out = plen;
+ }
+
return *section;
translate_fail:
bool is_write)
{
MemoryRegionSection section;
- hwaddr xlat, plen;
-
- /* Try to get maximum page mask during translation. */
- plen = (hwaddr)-1;
+ hwaddr xlat, page_mask;
- /* This can never be MMIO. */
- section = address_space_do_translate(as, addr, &xlat, &plen,
- is_write, false);
+ /*
+ * This can never be MMIO, and we don't really care about plen,
+ * but page mask.
+ */
+ section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
+ NULL, &page_mask, is_write, false, &as);
/* Illegal translation */
if (section.mr == &io_mem_unassigned) {
xlat += section.offset_within_address_space -
section.offset_within_region;
- if (plen == (hwaddr)-1) {
- /*
- * We use default page size here. Logically it only happens
- * for identity mappings.
- */
- plen = TARGET_PAGE_SIZE;
- }
-
- /* Convert to address mask */
- plen -= 1;
-
return (IOMMUTLBEntry) {
- .target_as = section.address_space,
- .iova = addr & ~plen,
- .translated_addr = xlat & ~plen,
- .addr_mask = plen,
+ .target_as = as,
+ .iova = addr & ~page_mask,
+ .translated_addr = xlat & ~page_mask,
+ .addr_mask = page_mask,
/* IOTLBs are for DMAs, and DMA only allows on RAMs. */
.perm = IOMMU_RW,
};
}
/* Called from RCU critical section */
-MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
- hwaddr *xlat, hwaddr *plen,
- bool is_write)
+MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
+ hwaddr *plen, bool is_write)
{
MemoryRegion *mr;
MemoryRegionSection section;
+ AddressSpace *as = NULL;
/* This can be MMIO, so setup MMIO bit. */
- section = address_space_do_translate(as, addr, xlat, plen, is_write, true);
+ section = flatview_do_translate(fv, addr, xlat, plen, NULL,
+ is_write, true, &as);
mr = section.mr;
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
cpu->interrupt_request &= ~0x01;
tlb_flush(cpu);
+ /* loadvm has just updated the content of RAM, bypassing the
+ * usual mechanisms that ensure we flush TBs for writes to
+ * memory we've translated code from. So we must flush all TBs,
+ * which will now be stale.
+ */
+ tb_flush(cpu);
+
return 0;
}
}
#if !defined(CONFIG_USER_ONLY)
-void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr)
{
CPUAddressSpace *newas;
+ AddressSpace *as = g_new0(AddressSpace, 1);
+ char *as_name;
+
+ assert(mr);
+ as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
+ address_space_init(as, mr, as_name);
+ g_free(as_name);
/* Target code should have set num_ases before calling us */
assert(asidx < cpu->num_ases);
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
- CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ static bool tcg_target_initialized;
cpu_list_add(cpu);
+ if (tcg_enabled() && !tcg_target_initialized) {
+ tcg_target_initialized = true;
+ cc->tcg_initialize();
+ }
+
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
} else {
AddressSpaceDispatch *d;
- d = atomic_rcu_read(§ion->address_space->dispatch);
+ d = flatview_to_dispatch(section->fv);
iotlb = section - d->map.sections;
iotlb += xlat;
}
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
uint16_t section);
-static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
+static subpage_t *subpage_init(FlatView *fv, hwaddr base);
-static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
+static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) =
qemu_anon_ram_alloc;
/*
* Accelerators with unusual needs may need this. Hopefully, we can
* get rid of it eventually.
*/
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
+void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared))
{
phys_mem_alloc = alloc;
}
g_free(map->nodes);
}
-static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
+static void register_subpage(FlatView *fv, MemoryRegionSection *section)
{
+ AddressSpaceDispatch *d = flatview_to_dispatch(fv);
subpage_t *subpage;
hwaddr base = section->offset_within_address_space
& TARGET_PAGE_MASK;
assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
if (!(existing->mr->subpage)) {
- subpage = subpage_init(d->as, base);
- subsection.address_space = d->as;
+ subpage = subpage_init(fv, base);
+ subsection.fv = fv;
subsection.mr = &subpage->iomem;
phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
phys_section_add(&d->map, &subsection));
}
-static void register_multipage(AddressSpaceDispatch *d,
+static void register_multipage(FlatView *fv,
MemoryRegionSection *section)
{
+ AddressSpaceDispatch *d = flatview_to_dispatch(fv);
hwaddr start_addr = section->offset_within_address_space;
uint16_t section_index = phys_section_add(&d->map, section);
uint64_t num_pages = int128_get64(int128_rshift(section->size,
phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
}
-static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
+void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
{
- AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
- AddressSpaceDispatch *d = as->next_dispatch;
MemoryRegionSection now = *section, remain = *section;
Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
- now.offset_within_address_space;
now.size = int128_min(int128_make64(left), now.size);
- register_subpage(d, &now);
+ register_subpage(fv, &now);
} else {
now.size = int128_zero();
}
remain.offset_within_region += int128_get64(now.size);
now = remain;
if (int128_lt(remain.size, page_size)) {
- register_subpage(d, &now);
+ register_subpage(fv, &now);
} else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
now.size = page_size;
- register_subpage(d, &now);
+ register_subpage(fv, &now);
} else {
now.size = int128_and(now.size, int128_neg(page_size));
- register_multipage(d, &now);
+ register_multipage(fv, &now);
}
}
}
void *area;
block->page_size = qemu_fd_getpagesize(fd);
- block->mr->align = block->page_size;
+ if (block->mr->align % block->page_size) {
+ error_setg(errp, "alignment 0x%" PRIx64
+ " must be multiples of page size 0x%zx",
+ block->mr->align, block->page_size);
+ return NULL;
+ }
+ block->mr->align = MAX(block->page_size, block->mr->align);
#if defined(__s390x__)
if (kvm_enabled()) {
block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
}
#endif
-/* Called with the ramlist lock held. */
+/* Allocate space within the ram_addr_t space that governs the
+ * dirty bitmaps.
+ * Called with the ramlist lock held.
+ */
static ram_addr_t find_ram_offset(ram_addr_t size)
{
RAMBlock *block, *next_block;
}
RAMBLOCK_FOREACH(block) {
- ram_addr_t end, next = RAM_ADDR_MAX;
+ ram_addr_t candidate, next = RAM_ADDR_MAX;
- end = block->offset + block->max_length;
+ /* Align blocks to start on a 'long' in the bitmap
+ * which makes the bitmap sync'ing take the fast path.
+ */
+ candidate = block->offset + block->max_length;
+ candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
+ /* Search for the closest following block
+ * and find the gap.
+ */
RAMBLOCK_FOREACH(next_block) {
- if (next_block->offset >= end) {
+ if (next_block->offset >= candidate) {
next = MIN(next, next_block->offset);
}
}
- if (next - end >= size && next - end < mingap) {
- offset = end;
- mingap = next - end;
+
+ /* If it fits remember our place and remember the size
+ * of gap, but keep going so that we might find a smaller
+ * gap to fill so avoiding fragmentation.
+ */
+ if (next - candidate >= size && next - candidate < mingap) {
+ offset = candidate;
+ mingap = next - candidate;
}
+
+ trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
}
if (offset == RAM_ADDR_MAX) {
abort();
}
+ trace_find_ram_offset(size, offset);
+
return offset;
}
}
}
-static void ram_block_add(RAMBlock *new_block, Error **errp)
+static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
{
RAMBlock *block;
RAMBlock *last_block = NULL;
}
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
- &new_block->mr->align);
+ &new_block->mr->align, shared);
if (!new_block->host) {
error_setg_errno(errp, errno,
"cannot set up guest memory '%s'",
return NULL;
}
- ram_block_add(new_block, &local_err);
+ ram_block_add(new_block, &local_err, share);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
void (*resized)(const char*,
uint64_t length,
void *host),
- void *host, bool resizeable,
+ void *host, bool resizeable, bool share,
MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
if (resizeable) {
new_block->flags |= RAM_RESIZEABLE;
}
- ram_block_add(new_block, &local_err);
+ ram_block_add(new_block, &local_err, share);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
+ return qemu_ram_alloc_internal(size, size, NULL, host, false,
+ false, mr, errp);
}
-RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
+RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share,
+ MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
+ return qemu_ram_alloc_internal(size, size, NULL, NULL, false,
+ share, mr, errp);
}
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
void *host),
MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
+ return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true,
+ false, mr, errp);
}
static void reclaim_ramblock(RAMBlock *block)
flags, -1, 0);
}
if (area != vaddr) {
- fprintf(stderr, "Could not remap addr: "
- RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
- length, addr);
+ error_report("Could not remap addr: "
+ RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
+ length, addr);
exit(1);
}
memory_try_enable_merging(vaddr, length);
return block->offset + offset;
}
-/* Called within RCU critical section. */
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
- uint64_t val, unsigned size)
+/* Called within RCU critical section. */
+void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
+ CPUState *cpu,
+ vaddr mem_vaddr,
+ ram_addr_t ram_addr,
+ unsigned size)
{
- bool locked = false;
+ ndi->cpu = cpu;
+ ndi->ram_addr = ram_addr;
+ ndi->mem_vaddr = mem_vaddr;
+ ndi->size = size;
+ ndi->locked = false;
assert(tcg_enabled());
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- locked = true;
+ ndi->locked = true;
tb_lock();
tb_invalidate_phys_page_fast(ram_addr, size);
}
+}
+
+/* Called within RCU critical section. */
+void memory_notdirty_write_complete(NotDirtyInfo *ndi)
+{
+ if (ndi->locked) {
+ tb_unlock();
+ }
+
+ /* Set both VGA and migration bits for simplicity and to remove
+ * the notdirty callback faster.
+ */
+ cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
+ DIRTY_CLIENTS_NOCODE);
+ /* we remove the notdirty callback only if the code has been
+ flushed */
+ if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
+ tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
+ }
+}
+
+/* Called within RCU critical section. */
+static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
+ uint64_t val, unsigned size)
+{
+ NotDirtyInfo ndi;
+
+ memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
+ ram_addr, size);
+
switch (size) {
case 1:
stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
case 4:
stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
break;
+ case 8:
+ stq_p(qemu_map_ram_ptr(NULL, ram_addr), val);
+ break;
default:
abort();
}
-
- if (locked) {
- tb_unlock();
- }
-
- /* Set both VGA and migration bits for simplicity and to remove
- * the notdirty callback faster.
- */
- cpu_physical_memory_set_dirty_range(ram_addr, size,
- DIRTY_CLIENTS_NOCODE);
- /* we remove the notdirty callback only if the code has been
- flushed */
- if (!cpu_physical_memory_is_clean(ram_addr)) {
- tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
- }
+ memory_notdirty_write_complete(&ndi);
}
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
.write = notdirty_mem_write,
.valid.accepts = notdirty_mem_accepts,
.endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
};
/* Generate a debug exception if a watchpoint has been hit. */
{
CPUState *cpu = current_cpu;
CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUArchState *env = cpu->env_ptr;
- target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
- uint32_t cpu_flags;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
} else {
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
- tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
+ /* Force execution of one insn next time. */
+ cpu->cflags_next_tb = 1 | curr_cflags();
cpu_loop_exit_noexc(cpu);
}
}
case 4:
data = address_space_ldl(as, addr, attrs, &res);
break;
+ case 8:
+ data = address_space_ldq(as, addr, attrs, &res);
+ break;
default: abort();
}
*pdata = data;
case 4:
address_space_stl(as, addr, val, attrs, &res);
break;
+ case 8:
+ address_space_stq(as, addr, val, attrs, &res);
+ break;
default: abort();
}
return res;
.read_with_attrs = watch_mem_read,
.write_with_attrs = watch_mem_write,
.endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
};
+static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, int len);
+static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ const uint8_t *buf, int len);
+static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
+ bool is_write);
+
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
unsigned len, MemTxAttrs attrs)
{
printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
subpage, len, addr);
#endif
- res = address_space_read(subpage->as, addr + subpage->base,
- attrs, buf, len);
+ res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
if (res) {
return res;
}
default:
abort();
}
- return address_space_write(subpage->as, addr + subpage->base,
- attrs, buf, len);
+ return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
}
static bool subpage_accepts(void *opaque, hwaddr addr,
__func__, subpage, is_write ? 'w' : 'r', len, addr);
#endif
- return address_space_access_valid(subpage->as, addr + subpage->base,
- len, is_write);
+ return flatview_access_valid(subpage->fv, addr + subpage->base,
+ len, is_write);
}
static const MemoryRegionOps subpage_ops = {
return 0;
}
-static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
+static subpage_t *subpage_init(FlatView *fv, hwaddr base)
{
subpage_t *mmio;
mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
- mmio->as = as;
+ mmio->fv = fv;
mmio->base = base;
memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
NULL, TARGET_PAGE_SIZE);
return mmio;
}
-static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
- MemoryRegion *mr)
+static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
{
- assert(as);
+ assert(fv);
MemoryRegionSection section = {
- .address_space = as,
+ .fv = fv,
.mr = mr,
.offset_within_address_space = 0,
.offset_within_region = 0,
return phys_section_add(map, §ion);
}
+static void readonly_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ /* Ignore any write to ROM. */
+}
+
+static bool readonly_mem_accepts(void *opaque, hwaddr addr,
+ unsigned size, bool is_write)
+{
+ return is_write;
+}
+
+/* This will only be used for writes, because reads are special cased
+ * to directly access the underlying host ram.
+ */
+static const MemoryRegionOps readonly_mem_ops = {
+ .write = readonly_mem_write,
+ .valid.accepts = readonly_mem_accepts,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+};
+
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
static void io_mem_init(void)
{
- memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
+ memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
+ NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX);
NULL, UINT64_MAX);
}
-static void mem_begin(MemoryListener *listener)
+AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
{
- AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
uint16_t n;
- n = dummy_section(&d->map, as, &io_mem_unassigned);
+ n = dummy_section(&d->map, fv, &io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
- n = dummy_section(&d->map, as, &io_mem_notdirty);
+ n = dummy_section(&d->map, fv, &io_mem_notdirty);
assert(n == PHYS_SECTION_NOTDIRTY);
- n = dummy_section(&d->map, as, &io_mem_rom);
+ n = dummy_section(&d->map, fv, &io_mem_rom);
assert(n == PHYS_SECTION_ROM);
- n = dummy_section(&d->map, as, &io_mem_watch);
+ n = dummy_section(&d->map, fv, &io_mem_watch);
assert(n == PHYS_SECTION_WATCH);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
- d->as = as;
- as->next_dispatch = d;
+
+ return d;
}
-static void address_space_dispatch_free(AddressSpaceDispatch *d)
+void address_space_dispatch_free(AddressSpaceDispatch *d)
{
phys_sections_free(&d->map);
g_free(d);
}
-static void mem_commit(MemoryListener *listener)
-{
- AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
- AddressSpaceDispatch *cur = as->dispatch;
- AddressSpaceDispatch *next = as->next_dispatch;
-
- phys_page_compact_all(next, next->map.nodes_nb);
-
- atomic_rcu_set(&as->dispatch, next);
- if (cur) {
- call_rcu(cur, address_space_dispatch_free, rcu);
- }
-}
-
static void tcg_commit(MemoryListener *listener)
{
CPUAddressSpace *cpuas;
* We reload the dispatch pointer now because cpu_reloading_memory_map()
* may have split the RCU critical section.
*/
- d = atomic_rcu_read(&cpuas->as->dispatch);
+ d = address_space_to_dispatch(cpuas->as);
atomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu);
}
-void address_space_init_dispatch(AddressSpace *as)
-{
- as->dispatch = NULL;
- as->dispatch_listener = (MemoryListener) {
- .begin = mem_begin,
- .commit = mem_commit,
- .region_add = mem_add,
- .region_nop = mem_add,
- .priority = 0,
- };
- memory_listener_register(&as->dispatch_listener, as);
-}
-
-void address_space_unregister(AddressSpace *as)
-{
- memory_listener_unregister(&as->dispatch_listener);
-}
-
-void address_space_destroy_dispatch(AddressSpace *as)
-{
- AddressSpaceDispatch *d = as->dispatch;
-
- atomic_rcu_set(&as->dispatch, NULL);
- if (d) {
- call_rcu(d, address_space_dispatch_free, rcu);
- }
-}
-
static void memory_map_init(void)
{
system_memory = g_malloc(sizeof(*system_memory));
}
/* Called within RCU critical section. */
-static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs,
- const uint8_t *buf,
- int len, hwaddr addr1,
- hwaddr l, MemoryRegion *mr)
+static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf,
+ int len, hwaddr addr1,
+ hwaddr l, MemoryRegion *mr)
{
uint8_t *ptr;
uint64_t val;
}
l = len;
- mr = address_space_translate(as, addr, &addr1, &l, true);
+ mr = flatview_translate(fv, addr, &addr1, &l, true);
}
return result;
}
-MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- const uint8_t *buf, int len)
+/* Called from RCU critical section. */
+static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ const uint8_t *buf, int len)
{
hwaddr l;
hwaddr addr1;
MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
- if (len > 0) {
- rcu_read_lock();
- l = len;
- mr = address_space_translate(as, addr, &addr1, &l, true);
- result = address_space_write_continue(as, addr, attrs, buf, len,
- addr1, l, mr);
- rcu_read_unlock();
- }
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, true);
+ result = flatview_write_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
return result;
}
/* Called within RCU critical section. */
-MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
- int len, hwaddr addr1, hwaddr l,
- MemoryRegion *mr)
+MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ int len, hwaddr addr1, hwaddr l,
+ MemoryRegion *mr)
{
uint8_t *ptr;
uint64_t val;
}
l = len;
- mr = address_space_translate(as, addr, &addr1, &l, false);
+ mr = flatview_translate(fv, addr, &addr1, &l, false);
}
return result;
}
-MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, int len)
+/* Called from RCU critical section. */
+static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, int len)
{
hwaddr l;
hwaddr addr1;
MemoryRegion *mr;
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, false);
+ return flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+}
+
+MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, int len)
+{
MemTxResult result = MEMTX_OK;
+ FlatView *fv;
if (len > 0) {
rcu_read_lock();
- l = len;
- mr = address_space_translate(as, addr, &addr1, &l, false);
- result = address_space_read_continue(as, addr, attrs, buf, len,
- addr1, l, mr);
+ fv = address_space_to_flatview(as);
+ result = flatview_read(fv, addr, attrs, buf, len);
+ rcu_read_unlock();
+ }
+
+ return result;
+}
+
+MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf, int len)
+{
+ MemTxResult result = MEMTX_OK;
+ FlatView *fv;
+
+ if (len > 0) {
+ rcu_read_lock();
+ fv = address_space_to_flatview(as);
+ result = flatview_write(fv, addr, attrs, buf, len);
rcu_read_unlock();
}
uint8_t *buf, int len, bool is_write)
{
if (is_write) {
- return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
+ return address_space_write(as, addr, attrs, buf, len);
} else {
- return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
+ return address_space_read_full(as, addr, attrs, buf, len);
}
}
qemu_mutex_unlock(&map_client_list_lock);
}
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
+static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
+ bool is_write)
{
MemoryRegion *mr;
hwaddr l, xlat;
- rcu_read_lock();
while (len > 0) {
l = len;
- mr = address_space_translate(as, addr, &xlat, &l, is_write);
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write);
if (!memory_access_is_direct(mr, is_write)) {
l = memory_access_size(mr, l, addr);
if (!memory_region_access_valid(mr, xlat, l, is_write)) {
len -= l;
addr += l;
}
- rcu_read_unlock();
return true;
}
+bool address_space_access_valid(AddressSpace *as, hwaddr addr,
+ int len, bool is_write)
+{
+ FlatView *fv;
+ bool result;
+
+ rcu_read_lock();
+ fv = address_space_to_flatview(as);
+ result = flatview_access_valid(fv, addr, len, is_write);
+ rcu_read_unlock();
+ return result;
+}
+
static hwaddr
-address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
+flatview_extend_translation(FlatView *fv, hwaddr addr,
+ hwaddr target_len,
MemoryRegion *mr, hwaddr base, hwaddr len,
bool is_write)
{
}
len = target_len;
- this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
+ this_mr = flatview_translate(fv, addr, &xlat,
+ &len, is_write);
if (this_mr != mr || xlat != base + done) {
return done;
}
hwaddr l, xlat;
MemoryRegion *mr;
void *ptr;
+ FlatView *fv;
if (len == 0) {
return NULL;
l = len;
rcu_read_lock();
- mr = address_space_translate(as, addr, &xlat, &l, is_write);
+ fv = address_space_to_flatview(as);
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
memory_region_ref(mr);
bounce.mr = mr;
if (!is_write) {
- address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
+ flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
bounce.buffer, l);
}
memory_region_ref(mr);
- *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
+ *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
+ l, is_write);
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
rcu_read_unlock();
{
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
- qemu_real_host_page_size = getpagesize();
- qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
if (qemu_host_page_size == 0) {
qemu_host_page_size = qemu_real_host_page_size;
}
}
qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
}
+
+#if !defined(CONFIG_USER_ONLY)
+
+static void mtree_print_phys_entries(fprintf_function mon, void *f,
+ int start, int end, int skip, int ptr)
+{
+ if (start == end - 1) {
+ mon(f, "\t%3d ", start);
+ } else {
+ mon(f, "\t%3d..%-3d ", start, end - 1);
+ }
+ mon(f, " skip=%d ", skip);
+ if (ptr == PHYS_MAP_NODE_NIL) {
+ mon(f, " ptr=NIL");
+ } else if (!skip) {
+ mon(f, " ptr=#%d", ptr);
+ } else {
+ mon(f, " ptr=[%d]", ptr);
+ }
+ mon(f, "\n");
+}
+
+#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
+ int128_sub((size), int128_one())) : 0)
+
+void mtree_print_dispatch(fprintf_function mon, void *f,
+ AddressSpaceDispatch *d, MemoryRegion *root)
+{
+ int i;
+
+ mon(f, " Dispatch\n");
+ mon(f, " Physical sections\n");
+
+ for (i = 0; i < d->map.sections_nb; ++i) {
+ MemoryRegionSection *s = d->map.sections + i;
+ const char *names[] = { " [unassigned]", " [not dirty]",
+ " [ROM]", " [watch]" };
+
+ mon(f, " #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx " %s%s%s%s%s",
+ i,
+ s->offset_within_address_space,
+ s->offset_within_address_space + MR_SIZE(s->mr->size),
+ s->mr->name ? s->mr->name : "(noname)",
+ i < ARRAY_SIZE(names) ? names[i] : "",
+ s->mr == root ? " [ROOT]" : "",
+ s == d->mru_section ? " [MRU]" : "",
+ s->mr->is_iommu ? " [iommu]" : "");
+
+ if (s->mr->alias) {
+ mon(f, " alias=%s", s->mr->alias->name ?
+ s->mr->alias->name : "noname");
+ }
+ mon(f, "\n");
+ }
+
+ mon(f, " Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
+ P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
+ for (i = 0; i < d->map.nodes_nb; ++i) {
+ int j, jprev;
+ PhysPageEntry prev;
+ Node *n = d->map.nodes + i;
+
+ mon(f, " [%d]\n", i);
+
+ for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
+ PhysPageEntry *pe = *n + j;
+
+ if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
+ continue;
+ }
+
+ mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
+
+ jprev = j;
+ prev = *pe;
+ }
+
+ if (jprev != ARRAY_SIZE(*n)) {
+ mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
+ }
+ }
+}
+
+#endif