#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
-#include "exec/cputlb.h"
#include "translate-all.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "qemu/range.h"
+#ifndef _WIN32
+#include "qemu/mmap-alloc.h"
+#endif
//#define DEBUG_SUBPAGE
*/
#define RAM_RESIZEABLE (1 << 2)
+/* RAM is backed by an mmapped file.
+ */
+#define RAM_FILE (1 << 3)
#endif
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
-DEFINE_TLS(CPUState *, current_cpu);
+__thread CPUState *current_cpu;
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
static void tcg_commit(MemoryListener *listener);
static MemoryRegion io_mem_watch;
+
+/**
+ * CPUAddressSpace: all the information a CPU needs about an AddressSpace
+ * @cpu: the CPU whose AddressSpace this is
+ * @as: the AddressSpace itself
+ * @memory_dispatch: its dispatch pointer (cached, RCU protected)
+ * @tcg_as_listener: listener for tracking changes to the AddressSpace
+ */
+struct CPUAddressSpace {
+ CPUState *cpu;
+ AddressSpace *as;
+ struct AddressSpaceDispatch *memory_dispatch;
+ MemoryListener tcg_as_listener;
+};
+
#endif
#if !defined(CONFIG_USER_ONLY)
hwaddr *xlat, hwaddr *plen)
{
MemoryRegionSection *section;
- section = address_space_translate_internal(cpu->memory_dispatch,
+ section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
addr, xlat, plen, false);
assert(!section->mr->iommu_ops);
}
};
+static bool cpu_common_crash_occurred_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return cpu->crash_occurred;
+}
+
+static const VMStateDescription vmstate_cpu_common_crash_occurred = {
+ .name = "cpu_common/crash_occurred",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_common_crash_occurred_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(crash_occurred, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_cpu_common = {
.name = "cpu_common",
.version_id = 1,
},
.subsections = (const VMStateDescription*[]) {
&vmstate_cpu_common_exception_index,
+ &vmstate_cpu_common_crash_occurred,
NULL
}
};
/* We only support one address space per cpu at the moment. */
assert(cpu->as == as);
- if (cpu->tcg_as_listener) {
- memory_listener_unregister(cpu->tcg_as_listener);
- } else {
- cpu->tcg_as_listener = g_new0(MemoryListener, 1);
+ if (cpu->cpu_ases) {
+ /* We've already registered the listener for our only AS */
+ return;
}
- cpu->tcg_as_listener->commit = tcg_commit;
- memory_listener_register(cpu->tcg_as_listener, as);
+
+ cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
+ cpu->cpu_ases[0].cpu = cpu;
+ cpu->cpu_ases[0].as = as;
+ cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
+ memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
}
#endif
-void cpu_exec_init(CPUArchState *env)
+#ifndef CONFIG_USER_ONLY
+static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
+
+static int cpu_get_free_index(Error **errp)
+{
+ int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
+
+ if (cpu >= MAX_CPUMASK_BITS) {
+ error_setg(errp, "Trying to use more CPUs than max of %d",
+ MAX_CPUMASK_BITS);
+ return -1;
+ }
+
+ bitmap_set(cpu_index_map, cpu, 1);
+ return cpu;
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+ if (cpu->cpu_index == -1) {
+ /* cpu_index was never allocated by this @cpu or was already freed. */
+ return;
+ }
+
+ bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
+ cpu->cpu_index = -1;
+}
+#else
+
+static int cpu_get_free_index(Error **errp)
{
- CPUState *cpu = ENV_GET_CPU(env);
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUState *some_cpu;
- int cpu_index;
+ int cpu_index = 0;
-#if defined(CONFIG_USER_ONLY)
- cpu_list_lock();
-#endif
- cpu_index = 0;
CPU_FOREACH(some_cpu) {
cpu_index++;
}
- cpu->cpu_index = cpu_index;
- cpu->numa_node = 0;
- QTAILQ_INIT(&cpu->breakpoints);
- QTAILQ_INIT(&cpu->watchpoints);
+ return cpu_index;
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+}
+#endif
+
+void cpu_exec_init(CPUState *cpu, Error **errp)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ int cpu_index;
+ Error *local_err = NULL;
+
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
- cpu_reload_memory_map(cpu);
#endif
+
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
+ cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
+ return;
+ }
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
}
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
- cpu_save, cpu_load, env);
+ cpu_save, cpu_load, cpu->env_ptr);
assert(cc->vmsd == NULL);
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
#endif
} else {
/* must flush all the translated code to avoid inconsistencies */
/* XXX: only flush what is necessary */
- CPUArchState *env = cpu->env_ptr;
- tb_flush(env);
+ tb_flush(cpu);
}
}
}
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
{
+ CPUState *cpu;
ram_addr_t start1;
RAMBlock *block;
ram_addr_t end;
block = qemu_get_ram_block(start);
assert(block == qemu_get_ram_block(end - 1));
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
- cpu_tlb_reset_dirty_all(start1, length);
+ CPU_FOREACH(cpu) {
+ tlb_reset_dirty(cpu, start1, length);
+ }
rcu_read_unlock();
}
iotlb |= PHYS_SECTION_ROM;
}
} else {
- iotlb = section - section->address_space->dispatch->map.sections;
+ AddressSpaceDispatch *d;
+
+ d = atomic_rcu_read(§ion->address_space->dispatch);
+ iotlb = section - d->map.sections;
iotlb += xlat;
}
char *filename;
char *sanitized_name;
char *c;
- void *area = NULL;
+ void *area;
int fd;
uint64_t hpagesize;
Error *local_err = NULL;
unlink(filename);
g_free(filename);
- memory = (memory+hpagesize-1) & ~(hpagesize-1);
+ memory = ROUND_UP(memory, hpagesize);
/*
* ftruncate is not supported by hugetlbfs in older
perror("ftruncate");
}
- area = mmap(0, memory, PROT_READ | PROT_WRITE,
- (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
- fd, 0);
+ area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
"unable to map backing store for hugepages");
return area;
error:
- if (mem_prealloc) {
- error_report("%s", error_get_pretty(*errp));
- exit(1);
- }
return NULL;
}
#endif
new_block->used_length = size;
new_block->max_length = size;
new_block->flags = share ? RAM_SHARED : 0;
+ new_block->flags |= RAM_FILE;
new_block->host = file_ram_alloc(new_block, size,
mem_path, errp);
if (!new_block->host) {
xen_invalidate_map_cache_entry(block->host);
#ifndef _WIN32
} else if (block->fd >= 0) {
- munmap(block->host, block->max_length);
+ if (block->flags & RAM_FILE) {
+ qemu_ram_munmap(block->host, block->max_length);
+ } else {
+ munmap(block->host, block->max_length);
+ }
close(block->fd);
#endif
} else {
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
- CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, current_cpu->mem_io_vaddr);
+ tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
}
}
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
{
- AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
+ CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
return sections[index & ~TARGET_PAGE_MASK].mr;
static void tcg_commit(MemoryListener *listener)
{
- CPUState *cpu;
+ CPUAddressSpace *cpuas;
+ AddressSpaceDispatch *d;
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
- /* XXX: slow ! */
- CPU_FOREACH(cpu) {
- /* FIXME: Disentangle the cpu.h circular files deps so we can
- directly get the right CPU from listener. */
- if (cpu->tcg_as_listener != listener) {
- continue;
- }
- cpu_reload_memory_map(cpu);
- }
+ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
+ cpu_reloading_memory_map();
+ /* The CPU and TLB are protected by the iothread lock.
+ * We reload the dispatch pointer now because cpu_reloading_memory_map()
+ * may have split the RCU critical section.
+ */
+ d = atomic_rcu_read(&cpuas->as->dispatch);
+ cpuas->memory_dispatch = d;
+ tlb_flush(cpuas->cpu, 1);
}
void address_space_init_dispatch(AddressSpace *as)
if (l > access_size_max) {
l = access_size_max;
}
- if (l & (l - 1)) {
- l = 1 << (qemu_fls(l) - 1);
- }
+ l = pow2floor(l);
return l;
}