#endif
+#ifdef TARGET_PAGE_BITS_VARY
+int target_page_bits;
+bool target_page_bits_decided;
+#endif
+
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
2 = Adaptive rate instruction counting. */
int use_icount;
+bool set_preferred_target_page_bits(int bits)
+{
+ /* The target page size is the lowest common denominator for all
+ * the CPUs in the system, so we can only make it smaller, never
+ * larger. And we can't make it smaller once we've committed to
+ * a particular size.
+ */
+#ifdef TARGET_PAGE_BITS_VARY
+ assert(bits >= TARGET_PAGE_BITS_MIN);
+ if (target_page_bits == 0 || target_page_bits > bits) {
+ if (target_page_bits_decided) {
+ return false;
+ }
+ target_page_bits = bits;
+ }
+#endif
+ return true;
+}
+
#if !defined(CONFIG_USER_ONLY)
+static void finalize_target_page_bits(void)
+{
+#ifdef TARGET_PAGE_BITS_VARY
+ if (target_page_bits == 0) {
+ target_page_bits = TARGET_PAGE_BITS_MIN;
+ }
+ target_page_bits_decided = true;
+#endif
+}
+
typedef struct PhysPageEntry PhysPageEntry;
struct PhysPageEntry {
MemoryRegion iomem;
AddressSpace *as;
hwaddr base;
- uint16_t sub_section[TARGET_PAGE_SIZE];
+ uint16_t sub_section[];
} subpage_t;
#define PHYS_SECTION_UNASSIGNED 0
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
{
+ static unsigned alloc_hint = 16;
if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
- map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
+ map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
+ alloc_hint = map->nodes_nb_alloc;
}
}
/* Compact a non leaf page entry. Simply detect that the entry has a single child,
* and update our entry so we can skip it and go directly to the destination.
*/
-static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
+static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
{
unsigned valid_ptr = P_L2_SIZE;
int valid = 0;
valid_ptr = i;
valid++;
if (p[i].skip) {
- phys_page_compact(&p[i], nodes, compacted);
+ phys_page_compact(&p[i], nodes);
}
}
static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
{
- DECLARE_BITMAP(compacted, nodes_nb);
-
if (d->phys_map.skip) {
- phys_page_compact(&d->phys_map, d->map.nodes, compacted);
+ phys_page_compact(&d->phys_map, d->map.nodes);
}
}
/* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
* the section must cover the entire address space.
*/
- return section->size.hi ||
+ return int128_gethi(section->size) ||
range_covers_byte(section->offset_within_address_space,
- section->size.lo, addr);
+ int128_getlo(section->size), addr);
}
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
}
#endif
-#ifndef CONFIG_USER_ONLY
-static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
-
-static int cpu_get_free_index(Error **errp)
-{
- int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
-
- if (cpu >= MAX_CPUMASK_BITS) {
- error_setg(errp, "Trying to use more CPUs than max of %d",
- MAX_CPUMASK_BITS);
- return -1;
- }
-
- bitmap_set(cpu_index_map, cpu, 1);
- return cpu;
-}
-
-static void cpu_release_index(CPUState *cpu)
-{
- bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
-}
-#else
-
-static int cpu_get_free_index(Error **errp)
-{
- CPUState *some_cpu;
- int cpu_index = 0;
-
- CPU_FOREACH(some_cpu) {
- cpu_index++;
- }
- return cpu_index;
-}
-
-static void cpu_release_index(CPUState *cpu)
-{
- return;
-}
-#endif
-
-void cpu_exec_exit(CPUState *cpu)
+void cpu_exec_unrealizefn(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
-#if defined(CONFIG_USER_ONLY)
- cpu_list_lock();
-#endif
- if (cpu->cpu_index == -1) {
- /* cpu_index was never allocated by this @cpu or was already freed. */
-#if defined(CONFIG_USER_ONLY)
- cpu_list_unlock();
-#endif
- return;
- }
-
- QTAILQ_REMOVE(&cpus, cpu, node);
- cpu_release_index(cpu);
- cpu->cpu_index = -1;
-#if defined(CONFIG_USER_ONLY)
- cpu_list_unlock();
-#endif
+ cpu_list_remove(cpu);
if (cc->vmsd != NULL) {
vmstate_unregister(NULL, cc->vmsd, cpu);
}
}
-void cpu_exec_init(CPUState *cpu, Error **errp)
+void cpu_exec_initfn(CPUState *cpu)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- Error *local_err = NULL;
-
cpu->as = NULL;
cpu->num_ases = 0;
cpu->memory = system_memory;
object_ref(OBJECT(cpu->memory));
#endif
+}
-#if defined(CONFIG_USER_ONLY)
- cpu_list_lock();
-#endif
- cpu->cpu_index = cpu_get_free_index(&local_err);
- if (local_err) {
- error_propagate(errp, local_err);
-#if defined(CONFIG_USER_ONLY)
- cpu_list_unlock();
-#endif
- return;
- }
- QTAILQ_INSERT_TAIL(&cpus, cpu, node);
-#if defined(CONFIG_USER_ONLY)
- (void) cc;
- cpu_list_unlock();
-#else
+void cpu_exec_realizefn(CPUState *cpu, Error **errp)
+{
+ CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
+
+ cpu_list_add(cpu);
+
+#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
char *filename;
char *sanitized_name;
char *c;
- void *area;
+ void *area = MAP_FAILED;
int fd = -1;
- int64_t page_size;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
*/
}
- page_size = qemu_fd_getpagesize(fd);
- block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
+ block->page_size = qemu_fd_getpagesize(fd);
+ block->mr->align = block->page_size;
+#if defined(__s390x__)
+ if (kvm_enabled()) {
+ block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
+ }
+#endif
- if (memory < page_size) {
+ if (memory < block->page_size) {
error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than page size 0x%" PRIx64,
- memory, page_size);
+ "or larger than page size 0x%zx",
+ memory, block->page_size);
goto error;
}
- memory = ROUND_UP(memory, page_size);
+ memory = ROUND_UP(memory, block->page_size);
/*
* ftruncate is not supported by hugetlbfs in older
}
if (mem_prealloc) {
- os_mem_prealloc(fd, area, memory);
+ os_mem_prealloc(fd, area, memory, errp);
+ if (errp && *errp) {
+ goto error;
+ }
}
block->fd = fd;
return area;
error:
+ if (area != MAP_FAILED) {
+ qemu_ram_munmap(area, memory);
+ }
if (unlink_on_error) {
unlink(path);
}
}
}
+size_t qemu_ram_pagesize(RAMBlock *rb)
+{
+ return rb->page_size;
+}
+
static int memory_try_enable_merging(void *addr, size_t len)
{
if (!machine_mem_merge(current_machine)) {
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
+ /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
- if (kvm_enabled()) {
- kvm_setup_guest_memory(new_block->host, new_block->max_length);
- }
}
}
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
+ new_block->page_size = getpagesize();
new_block->host = host;
if (host) {
new_block->flags |= RAM_PREALLOC;
continue;
}
cpu->watchpoint_hit = wp;
+
+ /* The tb_lock will be reset when cpu_loop_exit or
+ * cpu_loop_exit_noexc longjmp back into the cpu_exec
+ * main loop.
+ */
+ tb_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
{
subpage_t *mmio;
- mmio = g_malloc0(sizeof(subpage_t));
-
+ mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
mmio->as = as;
mmio->base = base;
memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
void cpu_exec_init_all(void)
{
qemu_mutex_init(&ram_list.mutex);
+ /* The data structures we set up here depend on knowing the page size,
+ * so no more changes can be made after this point.
+ * In an ideal world, nothing we did before we had finished the
+ * machine setup would care about the target page size, and we could
+ * do this much later, rather than requiring board models to state
+ * up front what their requirements are.
+ */
+ finalize_target_page_bits();
io_mem_init();
memory_map_init();
qemu_mutex_init(&map_client_list_lock);