#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
-#include "exec/cputlb.h"
#include "translate-all.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "qemu/range.h"
+#ifndef _WIN32
+#include "qemu/mmap-alloc.h"
+#endif
//#define DEBUG_SUBPAGE
*/
#define RAM_RESIZEABLE (1 << 2)
+/* RAM is backed by an mmapped file.
+ */
+#define RAM_FILE (1 << 3)
#endif
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
-DEFINE_TLS(CPUState *, current_cpu);
+__thread CPUState *current_cpu;
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
static void tcg_commit(MemoryListener *listener);
static MemoryRegion io_mem_watch;
+
+/**
+ * CPUAddressSpace: all the information a CPU needs about an AddressSpace
+ * @cpu: the CPU whose AddressSpace this is
+ * @as: the AddressSpace itself
+ * @memory_dispatch: its dispatch pointer (cached, RCU protected)
+ * @tcg_as_listener: listener for tracking changes to the AddressSpace
+ */
+struct CPUAddressSpace {
+ CPUState *cpu;
+ AddressSpace *as;
+ struct AddressSpaceDispatch *memory_dispatch;
+ MemoryListener tcg_as_listener;
+};
+
#endif
#if !defined(CONFIG_USER_ONLY)
hwaddr *xlat, hwaddr *plen)
{
MemoryRegionSection *section;
- section = address_space_translate_internal(cpu->memory_dispatch,
+ section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
addr, xlat, plen, false);
assert(!section->mr->iommu_ops);
}
};
+static bool cpu_common_crash_occurred_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return cpu->crash_occurred;
+}
+
+static const VMStateDescription vmstate_cpu_common_crash_occurred = {
+ .name = "cpu_common/crash_occurred",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_common_crash_occurred_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(crash_occurred, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_cpu_common = {
.name = "cpu_common",
.version_id = 1,
},
.subsections = (const VMStateDescription*[]) {
&vmstate_cpu_common_exception_index,
+ &vmstate_cpu_common_crash_occurred,
NULL
}
};
/* We only support one address space per cpu at the moment. */
assert(cpu->as == as);
- if (cpu->tcg_as_listener) {
- memory_listener_unregister(cpu->tcg_as_listener);
- } else {
- cpu->tcg_as_listener = g_new0(MemoryListener, 1);
+ if (cpu->cpu_ases) {
+ /* We've already registered the listener for our only AS */
+ return;
}
- cpu->tcg_as_listener->commit = tcg_commit;
- memory_listener_register(cpu->tcg_as_listener, as);
+
+ cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
+ cpu->cpu_ases[0].cpu = cpu;
+ cpu->cpu_ases[0].as = as;
+ cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
+ memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
}
#endif
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
- cpu_reload_memory_map(cpu);
#endif
#if defined(CONFIG_USER_ONLY)
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
{
+ CPUState *cpu;
ram_addr_t start1;
RAMBlock *block;
ram_addr_t end;
block = qemu_get_ram_block(start);
assert(block == qemu_get_ram_block(end - 1));
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
- cpu_tlb_reset_dirty_all(start1, length);
+ CPU_FOREACH(cpu) {
+ tlb_reset_dirty(cpu, start1, length);
+ }
rcu_read_unlock();
}
char *filename;
char *sanitized_name;
char *c;
- void *area = NULL;
+ void *area;
int fd;
uint64_t hpagesize;
Error *local_err = NULL;
perror("ftruncate");
}
- area = mmap(0, memory, PROT_READ | PROT_WRITE,
- (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
- fd, 0);
+ area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
"unable to map backing store for hugepages");
return area;
error:
- if (mem_prealloc) {
- error_report("%s", error_get_pretty(*errp));
- exit(1);
- }
return NULL;
}
#endif
new_block->used_length = size;
new_block->max_length = size;
new_block->flags = share ? RAM_SHARED : 0;
+ new_block->flags |= RAM_FILE;
new_block->host = file_ram_alloc(new_block, size,
mem_path, errp);
if (!new_block->host) {
xen_invalidate_map_cache_entry(block->host);
#ifndef _WIN32
} else if (block->fd >= 0) {
- munmap(block->host, block->max_length);
+ if (block->flags & RAM_FILE) {
+ qemu_ram_munmap(block->host, block->max_length);
+ } else {
+ munmap(block->host, block->max_length);
+ }
close(block->fd);
#endif
} else {
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
- CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, current_cpu->mem_io_vaddr);
+ tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
}
}
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
{
- AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
+ CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
return sections[index & ~TARGET_PAGE_MASK].mr;
static void tcg_commit(MemoryListener *listener)
{
- CPUState *cpu;
+ CPUAddressSpace *cpuas;
+ AddressSpaceDispatch *d;
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
- /* XXX: slow ! */
- CPU_FOREACH(cpu) {
- /* FIXME: Disentangle the cpu.h circular files deps so we can
- directly get the right CPU from listener. */
- if (cpu->tcg_as_listener != listener) {
- continue;
- }
- cpu_reload_memory_map(cpu);
- }
+ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
+ cpu_reloading_memory_map();
+ /* The CPU and TLB are protected by the iothread lock.
+ * We reload the dispatch pointer now because cpu_reloading_memory_map()
+ * may have split the RCU critical section.
+ */
+ d = atomic_rcu_read(&cpuas->as->dispatch);
+ cpuas->memory_dispatch = d;
+ tlb_flush(cpuas->cpu, 1);
}
void address_space_init_dispatch(AddressSpace *as)
if (l > access_size_max) {
l = access_size_max;
}
- if (l & (l - 1)) {
- l = 1 << (qemu_fls(l) - 1);
- }
+ l = pow2floor(l);
return l;
}