#endif
#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
+#include "qemu/main-loop.h"
#include "exec/cputlb.h"
#include "translate-all.h"
hwaddr *plen, bool resolve_subpage)
{
MemoryRegionSection *section;
+ MemoryRegion *mr;
Int128 diff;
section = address_space_lookup_region(d, addr, resolve_subpage);
/* Compute offset within MemoryRegion */
*xlat = addr + section->offset_within_region;
- diff = int128_sub(section->mr->size, int128_make64(addr));
- *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
+ mr = section->mr;
+
+ /* MMIO registers can be expected to perform full-width accesses based only
+ * on their address, without considering adjacent registers that could
+ * decode to completely different MemoryRegions. When such registers
+ * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
+ * regions overlap wildly. For this reason we cannot clamp the accesses
+ * here.
+ *
+ * If the length is small (as is the case for address_space_ldl/stl),
+ * everything works fine. If the incoming length is large, however,
+ * the caller really has to do the clamping through memory_access_size.
+ */
+ if (memory_region_is_ram(mr)) {
+ diff = int128_sub(section->size, int128_make64(addr));
+ *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
+ }
return section;
}
.name = "cpu_common/exception_index",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = cpu_common_exception_index_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(exception_index, CPUState),
VMSTATE_END_OF_LIST()
VMSTATE_UINT32(interrupt_request, CPUState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_cpu_common_exception_index,
- .needed = cpu_common_exception_index_needed,
- } , {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_cpu_common_exception_index,
+ NULL
}
};
}
#endif
-void cpu_exec_init(CPUArchState *env)
+#ifndef CONFIG_USER_ONLY
+static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
+
+static int cpu_get_free_index(Error **errp)
+{
+ int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
+
+ if (cpu >= MAX_CPUMASK_BITS) {
+ error_setg(errp, "Trying to use more CPUs than max of %d",
+ MAX_CPUMASK_BITS);
+ return -1;
+ }
+
+ bitmap_set(cpu_index_map, cpu, 1);
+ return cpu;
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+ if (cpu->cpu_index == -1) {
+ /* cpu_index was never allocated by this @cpu or was already freed. */
+ return;
+ }
+
+ bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
+ cpu->cpu_index = -1;
+}
+#else
+
+static int cpu_get_free_index(Error **errp)
{
- CPUState *cpu = ENV_GET_CPU(env);
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUState *some_cpu;
- int cpu_index;
+ int cpu_index = 0;
-#if defined(CONFIG_USER_ONLY)
- cpu_list_lock();
-#endif
- cpu_index = 0;
CPU_FOREACH(some_cpu) {
cpu_index++;
}
- cpu->cpu_index = cpu_index;
- cpu->numa_node = 0;
- QTAILQ_INIT(&cpu->breakpoints);
- QTAILQ_INIT(&cpu->watchpoints);
+ return cpu_index;
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+}
+#endif
+
+void cpu_exec_init(CPUState *cpu, Error **errp)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ int cpu_index;
+ Error *local_err = NULL;
+
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
cpu_reload_memory_map(cpu);
#endif
+
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
+ cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
+ return;
+ }
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
}
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
- cpu_save, cpu_load, env);
+ cpu_save, cpu_load, cpu->env_ptr);
assert(cc->vmsd == NULL);
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
#endif
} else {
/* must flush all the translated code to avoid inconsistencies */
/* XXX: only flush what is necessary */
- CPUArchState *env = cpu->env_ptr;
- tb_flush(env);
+ tb_flush(cpu);
}
}
}
}
}
+ new_ram_size = MAX(old_ram_size,
+ (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
+ if (new_ram_size > old_ram_size) {
+ migration_bitmap_extend(old_ram_size, new_ram_size);
+ }
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
* tail, so save the last element in last_block.
return l;
}
+static bool prepare_mmio_access(MemoryRegion *mr)
+{
+ bool unlocked = !qemu_mutex_iothread_locked();
+ bool release_lock = false;
+
+ if (unlocked && mr->global_locking) {
+ qemu_mutex_lock_iothread();
+ unlocked = false;
+ release_lock = true;
+ }
+ if (mr->flush_coalesced_mmio) {
+ if (unlocked) {
+ qemu_mutex_lock_iothread();
+ }
+ qemu_flush_coalesced_mmio_buffer();
+ if (unlocked) {
+ qemu_mutex_unlock_iothread();
+ }
+ }
+
+ return release_lock;
+}
+
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
uint8_t *buf, int len, bool is_write)
{
hwaddr addr1;
MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
+ bool release_lock = false;
rcu_read_lock();
while (len > 0) {
if (is_write) {
if (!memory_access_is_direct(mr, is_write)) {
+ release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
} else {
if (!memory_access_is_direct(mr, is_write)) {
/* I/O case */
+ release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
switch (l) {
case 8:
memcpy(buf, ptr, l);
}
}
+
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ release_lock = false;
+ }
+
len -= l;
buf += l;
addr += l;
if (!(memory_region_is_ram(mr) ||
memory_region_is_romd(mr))) {
- /* do nothing */
+ l = memory_access_size(mr, l, addr1);
} else {
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
hwaddr l = 4;
hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, false);
if (l < 4 || !memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
rcu_read_unlock();
return val;
}
hwaddr l = 8;
hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
rcu_read_unlock();
return val;
}
hwaddr l = 2;
hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 2 || !memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
rcu_read_unlock();
return val;
}
hwaddr addr1;
MemTxResult r;
uint8_t dirty_log_mask;
+ bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
rcu_read_unlock();
}
hwaddr l = 4;
hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
rcu_read_unlock();
}
hwaddr l = 2;
hwaddr addr1;
MemTxResult r;
+ bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
if (result) {
*result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
rcu_read_unlock();
}
return res;
}
-void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
+int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
{
RAMBlock *block;
+ int ret = 0;
rcu_read_lock();
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- func(block->host, block->offset, block->used_length, opaque);
+ ret = func(block->idstr, block->host, block->offset,
+ block->used_length, opaque);
+ if (ret) {
+ break;
+ }
}
rcu_read_unlock();
+ return ret;
}
#endif