*/
#include "qemu/osdep.h"
#include "qapi/error.h"
-#ifndef _WIN32
-#endif
#include "qemu/cutils.h"
#include "cpu.h"
#include "trace-root.h"
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
-#include <fcntl.h>
#include <linux/falloc.h>
#endif
{
MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage;
- bool update;
- if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
- section_covers_addr(section, addr)) {
- update = false;
- } else {
+ if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
+ !section_covers_addr(section, addr)) {
section = phys_page_find(d, addr);
- update = true;
+ atomic_set(&d->mru_section, section);
}
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
- if (update) {
- atomic_set(&d->mru_section, section);
- }
return section;
}
cpu->interrupt_request &= ~0x01;
tlb_flush(cpu);
+ /* loadvm has just updated the content of RAM, bypassing the
+ * usual mechanisms that ensure we flush TBs for writes to
+ * memory we've translated code from. So we must flush all TBs,
+ * which will now be stale.
+ */
+ tb_flush(cpu);
+
return 0;
}
}
#if !defined(CONFIG_USER_ONLY)
-void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr)
{
CPUAddressSpace *newas;
+ AddressSpace *as = g_new0(AddressSpace, 1);
+ char *as_name;
+
+ assert(mr);
+ as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
+ address_space_init(as, mr, as_name);
+ g_free(as_name);
/* Target code should have set num_ases before calling us */
assert(asidx < cpu->num_ases);
void *area;
block->page_size = qemu_fd_getpagesize(fd);
- block->mr->align = block->page_size;
+ if (block->mr->align % block->page_size) {
+ error_setg(errp, "alignment 0x%" PRIx64
+ " must be multiples of page size 0x%zx",
+ block->mr->align, block->page_size);
+ return NULL;
+ }
+ block->mr->align = MAX(block->page_size, block->mr->align);
#if defined(__s390x__)
if (kvm_enabled()) {
block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
}
#endif
-/* Called with the ramlist lock held. */
+/* Allocate space within the ram_addr_t space that governs the
+ * dirty bitmaps.
+ * Called with the ramlist lock held.
+ */
static ram_addr_t find_ram_offset(ram_addr_t size)
{
RAMBlock *block, *next_block;
}
RAMBLOCK_FOREACH(block) {
- ram_addr_t end, next = RAM_ADDR_MAX;
+ ram_addr_t candidate, next = RAM_ADDR_MAX;
- end = block->offset + block->max_length;
+ /* Align blocks to start on a 'long' in the bitmap
+ * which makes the bitmap sync'ing take the fast path.
+ */
+ candidate = block->offset + block->max_length;
+ candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
+ /* Search for the closest following block
+ * and find the gap.
+ */
RAMBLOCK_FOREACH(next_block) {
- if (next_block->offset >= end) {
+ if (next_block->offset >= candidate) {
next = MIN(next, next_block->offset);
}
}
- if (next - end >= size && next - end < mingap) {
- offset = end;
- mingap = next - end;
+
+ /* If it fits remember our place and remember the size
+ * of gap, but keep going so that we might find a smaller
+ * gap to fill so avoiding fragmentation.
+ */
+ if (next - candidate >= size && next - candidate < mingap) {
+ offset = candidate;
+ mingap = next - candidate;
}
+
+ trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
}
if (offset == RAM_ADDR_MAX) {
abort();
}
+ trace_find_ram_offset(size, offset);
+
return offset;
}
flags, -1, 0);
}
if (area != vaddr) {
- fprintf(stderr, "Could not remap addr: "
- RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
- length, addr);
+ error_report("Could not remap addr: "
+ RAM_ADDR_FMT "@" RAM_ADDR_FMT "",
+ length, addr);
exit(1);
}
memory_try_enable_merging(vaddr, length);
return block->offset + offset;
}
-/* Called within RCU critical section. */
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
- uint64_t val, unsigned size)
+/* Called within RCU critical section. */
+void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
+ CPUState *cpu,
+ vaddr mem_vaddr,
+ ram_addr_t ram_addr,
+ unsigned size)
{
- bool locked = false;
+ ndi->cpu = cpu;
+ ndi->ram_addr = ram_addr;
+ ndi->mem_vaddr = mem_vaddr;
+ ndi->size = size;
+ ndi->locked = false;
assert(tcg_enabled());
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- locked = true;
+ ndi->locked = true;
tb_lock();
tb_invalidate_phys_page_fast(ram_addr, size);
}
+}
+
+/* Called within RCU critical section. */
+void memory_notdirty_write_complete(NotDirtyInfo *ndi)
+{
+ if (ndi->locked) {
+ tb_unlock();
+ }
+
+ /* Set both VGA and migration bits for simplicity and to remove
+ * the notdirty callback faster.
+ */
+ cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
+ DIRTY_CLIENTS_NOCODE);
+ /* we remove the notdirty callback only if the code has been
+ flushed */
+ if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
+ tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
+ }
+}
+
+/* Called within RCU critical section. */
+static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
+ uint64_t val, unsigned size)
+{
+ NotDirtyInfo ndi;
+
+ memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
+ ram_addr, size);
+
switch (size) {
case 1:
stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
default:
abort();
}
-
- if (locked) {
- tb_unlock();
- }
-
- /* Set both VGA and migration bits for simplicity and to remove
- * the notdirty callback faster.
- */
- cpu_physical_memory_set_dirty_range(ram_addr, size,
- DIRTY_CLIENTS_NOCODE);
- /* we remove the notdirty callback only if the code has been
- flushed */
- if (!cpu_physical_memory_is_clean(ram_addr)) {
- tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
- }
+ memory_notdirty_write_complete(&ndi);
}
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
return phys_section_add(map, §ion);
}
+static void readonly_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ /* Ignore any write to ROM. */
+}
+
+static bool readonly_mem_accepts(void *opaque, hwaddr addr,
+ unsigned size, bool is_write)
+{
+ return is_write;
+}
+
+/* This will only be used for writes, because reads are special cased
+ * to directly access the underlying host ram.
+ */
+static const MemoryRegionOps readonly_mem_ops = {
+ .write = readonly_mem_write,
+ .valid.accepts = readonly_mem_accepts,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+};
+
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
static void io_mem_init(void)
{
- memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
+ memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
+ NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX);