#endif
#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
+#include "qemu/main-loop.h"
#include "exec/cputlb.h"
#include "translate-all.h"
//#define DEBUG_SUBPAGE
#if !defined(CONFIG_USER_ONLY)
-static bool in_migration;
-
/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
* are protected by the ramlist lock.
*/
}
}
-static uint32_t phys_map_node_alloc(PhysPageMap *map)
+static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
{
unsigned i;
uint32_t ret;
+ PhysPageEntry e;
+ PhysPageEntry *p;
ret = map->nodes_nb++;
+ p = map->nodes[ret];
assert(ret != PHYS_MAP_NODE_NIL);
assert(ret != map->nodes_nb_alloc);
+
+ e.skip = leaf ? 0 : 1;
+ e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
for (i = 0; i < P_L2_SIZE; ++i) {
- map->nodes[ret][i].skip = 1;
- map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
+ memcpy(&p[i], &e, sizeof(e));
}
return ret;
}
int level)
{
PhysPageEntry *p;
- int i;
hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
- lp->ptr = phys_map_node_alloc(map);
- p = map->nodes[lp->ptr];
- if (level == 0) {
- for (i = 0; i < P_L2_SIZE; i++) {
- p[i].skip = 0;
- p[i].ptr = PHYS_SECTION_UNASSIGNED;
- }
- }
- } else {
- p = map->nodes[lp->ptr];
+ lp->ptr = phys_map_node_alloc(map, level == 0);
}
+ p = map->nodes[lp->ptr];
lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
while (*nb && lp < &p[P_L2_SIZE]) {
hwaddr *plen, bool resolve_subpage)
{
MemoryRegionSection *section;
+ MemoryRegion *mr;
Int128 diff;
section = address_space_lookup_region(d, addr, resolve_subpage);
/* Compute offset within MemoryRegion */
*xlat = addr + section->offset_within_region;
- diff = int128_sub(section->mr->size, int128_make64(addr));
- *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
+ mr = section->mr;
+
+ /* MMIO registers can be expected to perform full-width accesses based only
+ * on their address, without considering adjacent registers that could
+ * decode to completely different MemoryRegions. When such registers
+ * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
+ * regions overlap wildly. For this reason we cannot clamp the accesses
+ * here.
+ *
+ * If the length is small (as is the case for address_space_ldl/stl),
+ * everything works fine. If the incoming length is large, however,
+ * the caller really has to do the clamping through memory_access_size.
+ */
+ if (memory_region_is_ram(mr)) {
+ diff = int128_sub(section->size, int128_make64(addr));
+ *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
+ }
return section;
}
return false;
}
+/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
MemoryRegion *mr;
- hwaddr len = *plen;
- rcu_read_lock();
for (;;) {
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
section = address_space_translate_internal(d, addr, &addr, plen, true);
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
- len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
+ *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
if (!(iotlb.perm & (1 << is_write))) {
mr = &io_mem_unassigned;
break;
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
- len = MIN(page, len);
+ *plen = MIN(page, *plen);
}
- *plen = len;
*xlat = addr;
- rcu_read_unlock();
return mr;
}
}
#endif
-void cpu_exec_init_all(void)
-{
-#if !defined(CONFIG_USER_ONLY)
- qemu_mutex_init(&ram_list.mutex);
- memory_map_init();
- io_mem_init();
-#endif
-}
-
#if !defined(CONFIG_USER_ONLY)
static int cpu_common_post_load(void *opaque, int version_id)
.name = "cpu_common/exception_index",
.version_id = 1,
.minimum_version_id = 1,
+ .needed = cpu_common_exception_index_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32(exception_index, CPUState),
VMSTATE_END_OF_LIST()
VMSTATE_UINT32(interrupt_request, CPUState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection[]) {
- {
- .vmsd = &vmstate_cpu_common_exception_index,
- .needed = cpu_common_exception_index_needed,
- } , {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_cpu_common_exception_index,
+ NULL
}
};
}
#endif
-void cpu_exec_init(CPUArchState *env)
+#ifndef CONFIG_USER_ONLY
+static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
+
+static int cpu_get_free_index(Error **errp)
+{
+ int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
+
+ if (cpu >= MAX_CPUMASK_BITS) {
+ error_setg(errp, "Trying to use more CPUs than max of %d",
+ MAX_CPUMASK_BITS);
+ return -1;
+ }
+
+ bitmap_set(cpu_index_map, cpu, 1);
+ return cpu;
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+ if (cpu->cpu_index == -1) {
+ /* cpu_index was never allocated by this @cpu or was already freed. */
+ return;
+ }
+
+ bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
+ cpu->cpu_index = -1;
+}
+#else
+
+static int cpu_get_free_index(Error **errp)
{
- CPUState *cpu = ENV_GET_CPU(env);
- CPUClass *cc = CPU_GET_CLASS(cpu);
CPUState *some_cpu;
- int cpu_index;
+ int cpu_index = 0;
-#if defined(CONFIG_USER_ONLY)
- cpu_list_lock();
-#endif
- cpu_index = 0;
CPU_FOREACH(some_cpu) {
cpu_index++;
}
- cpu->cpu_index = cpu_index;
- cpu->numa_node = 0;
- QTAILQ_INIT(&cpu->breakpoints);
- QTAILQ_INIT(&cpu->watchpoints);
+ return cpu_index;
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+}
+#endif
+
+void cpu_exec_init(CPUState *cpu, Error **errp)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ int cpu_index;
+ Error *local_err = NULL;
+
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
cpu_reload_memory_map(cpu);
#endif
+
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
+ cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
+ return;
+ }
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
}
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
- cpu_save, cpu_load, env);
+ cpu_save, cpu_load, cpu->env_ptr);
assert(cc->vmsd == NULL);
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
#endif
} else {
/* must flush all the translated code to avoid inconsistencies */
/* XXX: only flush what is necessary */
- CPUArchState *env = cpu->env_ptr;
- tb_flush(env);
+ tb_flush(cpu);
}
}
}
}
/* Note: start and end must be within the same ram block. */
-void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
- unsigned client)
+bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
{
- if (length == 0)
- return;
- cpu_physical_memory_clear_dirty_range_type(start, length, client);
+ unsigned long end, page;
+ bool dirty;
- if (tcg_enabled()) {
+ if (length == 0) {
+ return false;
+ }
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+ dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
+ page, end - page);
+
+ if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
-}
-static void cpu_physical_memory_set_dirty_tracking(bool enable)
-{
- in_migration = enable;
+ return dirty;
}
/* Called from RCU critical section */
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
- cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
+ cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
+ DIRTY_CLIENTS_ALL);
memory_region_set_size(block->mr, newsize);
if (block->resized) {
block->resized(block->idstr, newsize, block->host);
}
}
+ new_ram_size = MAX(old_ram_size,
+ (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
+ if (new_ram_size > old_ram_size) {
+ migration_bitmap_extend(old_ram_size, new_ram_size);
+ }
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
* tail, so save the last element in last_block.
}
}
cpu_physical_memory_set_dirty_range(new_block->offset,
- new_block->used_length);
+ new_block->used_length,
+ DIRTY_CLIENTS_ALL);
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
default:
abort();
}
- cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
+ /* Set both VGA and migration bits for simplicity and to remove
+ * the notdirty callback faster.
+ */
+ cpu_physical_memory_set_dirty_range(ram_addr, size,
+ DIRTY_CLIENTS_NOCODE);
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
};
/* Generate a debug exception if a watchpoint has been hit. */
-static void check_watchpoint(int offset, int len, int flags)
+static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
{
CPUState *cpu = current_cpu;
CPUArchState *env = cpu->env_ptr;
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
}
wp->hitaddr = vaddr;
+ wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
cpu->watchpoint_hit = wp;
tb_check_watchpoint(cpu);
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
-static uint64_t watch_mem_read(void *opaque, hwaddr addr,
- unsigned size)
+static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
+ unsigned size, MemTxAttrs attrs)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
+ MemTxResult res;
+ uint64_t data;
+
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
switch (size) {
- case 1: return ldub_phys(&address_space_memory, addr);
- case 2: return lduw_phys(&address_space_memory, addr);
- case 4: return ldl_phys(&address_space_memory, addr);
+ case 1:
+ data = address_space_ldub(&address_space_memory, addr, attrs, &res);
+ break;
+ case 2:
+ data = address_space_lduw(&address_space_memory, addr, attrs, &res);
+ break;
+ case 4:
+ data = address_space_ldl(&address_space_memory, addr, attrs, &res);
+ break;
default: abort();
}
+ *pdata = data;
+ return res;
}
-static void watch_mem_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
+static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size,
+ MemTxAttrs attrs)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
+ MemTxResult res;
+
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
switch (size) {
case 1:
- stb_phys(&address_space_memory, addr, val);
+ address_space_stb(&address_space_memory, addr, val, attrs, &res);
break;
case 2:
- stw_phys(&address_space_memory, addr, val);
+ address_space_stw(&address_space_memory, addr, val, attrs, &res);
break;
case 4:
- stl_phys(&address_space_memory, addr, val);
+ address_space_stl(&address_space_memory, addr, val, attrs, &res);
break;
default: abort();
}
+ return res;
}
static const MemoryRegionOps watch_mem_ops = {
- .read = watch_mem_read,
- .write = watch_mem_write,
+ .read_with_attrs = watch_mem_read,
+ .write_with_attrs = watch_mem_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
{
subpage_t *subpage = opaque;
uint8_t buf[8];
+ MemTxResult res;
#if defined(DEBUG_SUBPAGE)
printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
subpage, len, addr);
#endif
- if (address_space_read(subpage->as, addr + subpage->base, buf, len)) {
- return MEMTX_DECODE_ERROR;
+ res = address_space_read(subpage->as, addr + subpage->base,
+ attrs, buf, len);
+ if (res) {
+ return res;
}
switch (len) {
case 1:
default:
abort();
}
- if (address_space_write(subpage->as, addr + subpage->base, buf, len)) {
- return MEMTX_DECODE_ERROR;
- }
- return MEMTX_OK;
+ return address_space_write(subpage->as, addr + subpage->base,
+ attrs, buf, len);
}
static bool subpage_accepts(void *opaque, hwaddr addr,
}
}
-static void core_log_global_start(MemoryListener *listener)
-{
- cpu_physical_memory_set_dirty_tracking(true);
-}
-
-static void core_log_global_stop(MemoryListener *listener)
-{
- cpu_physical_memory_set_dirty_tracking(false);
-}
-
-static MemoryListener core_memory_listener = {
- .log_global_start = core_log_global_start,
- .log_global_stop = core_log_global_stop,
- .priority = 1,
-};
-
void address_space_init_dispatch(AddressSpace *as)
{
as->dispatch = NULL;
memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
65536);
address_space_init(&address_space_io, system_io, "I/O");
-
- memory_listener_register(&core_memory_listener, &address_space_memory);
}
MemoryRegion *get_system_memory(void)
#else
-static void invalidate_and_set_dirty(hwaddr addr,
+static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr length)
{
- if (cpu_physical_memory_range_includes_clean(addr, length)) {
- tb_invalidate_phys_range(addr, addr + length, 0);
- cpu_physical_memory_set_dirty_range_nocode(addr, length);
+ uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
+ /* No early return if dirty_log_mask is or becomes 0, because
+ * cpu_physical_memory_set_dirty_range will still call
+ * xen_modified_memory.
+ */
+ if (dirty_log_mask) {
+ dirty_log_mask =
+ cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
- xen_modified_memory(addr, length);
+ if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
+ tb_invalidate_phys_range(addr, addr + length);
+ dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
+ }
+ cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
}
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
return l;
}
-bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
- int len, bool is_write)
+static bool prepare_mmio_access(MemoryRegion *mr)
+{
+ bool unlocked = !qemu_mutex_iothread_locked();
+ bool release_lock = false;
+
+ if (unlocked && mr->global_locking) {
+ qemu_mutex_lock_iothread();
+ unlocked = false;
+ release_lock = true;
+ }
+ if (mr->flush_coalesced_mmio) {
+ if (unlocked) {
+ qemu_mutex_lock_iothread();
+ }
+ qemu_flush_coalesced_mmio_buffer();
+ if (unlocked) {
+ qemu_mutex_unlock_iothread();
+ }
+ }
+
+ return release_lock;
+}
+
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len, bool is_write)
{
hwaddr l;
uint8_t *ptr;
hwaddr addr1;
MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
- MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ bool release_lock = false;
+ rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, is_write);
if (is_write) {
if (!memory_access_is_direct(mr, is_write)) {
+ release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
- invalidate_and_set_dirty(addr1, l);
+ invalidate_and_set_dirty(mr, addr1, l);
}
} else {
if (!memory_access_is_direct(mr, is_write)) {
/* I/O case */
+ release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
switch (l) {
case 8:
memcpy(buf, ptr, l);
}
}
+
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ release_lock = false;
+ }
+
len -= l;
buf += l;
addr += l;
}
+ rcu_read_unlock();
return result;
}
-bool address_space_write(AddressSpace *as, hwaddr addr,
- const uint8_t *buf, int len)
+MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ const uint8_t *buf, int len)
{
- return address_space_rw(as, addr, (uint8_t *)buf, len, true);
+ return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
}
-bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
+MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len)
{
- return address_space_rw(as, addr, buf, len, false);
+ return address_space_rw(as, addr, attrs, buf, len, false);
}
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
int len, int is_write)
{
- address_space_rw(&address_space_memory, addr, buf, len, is_write);
+ address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
+ buf, len, is_write);
}
enum write_rom_type {
hwaddr addr1;
MemoryRegion *mr;
+ rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true);
if (!(memory_region_is_ram(mr) ||
memory_region_is_romd(mr))) {
- /* do nothing */
+ l = memory_access_size(mr, l, addr1);
} else {
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
switch (type) {
case WRITE_DATA:
memcpy(ptr, buf, l);
- invalidate_and_set_dirty(addr1, l);
+ invalidate_and_set_dirty(mr, addr1, l);
break;
case FLUSH_CACHE:
flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
buf += l;
addr += l;
}
+ rcu_read_unlock();
}
/* used for ROM loading : can write in RAM and ROM */
void *buffer;
hwaddr addr;
hwaddr len;
+ bool in_use;
} BounceBuffer;
static BounceBuffer bounce;
typedef struct MapClient {
- void *opaque;
- void (*callback)(void *opaque);
+ QEMUBH *bh;
QLIST_ENTRY(MapClient) link;
} MapClient;
+QemuMutex map_client_list_lock;
static QLIST_HEAD(map_client_list, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list);
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+static void cpu_unregister_map_client_do(MapClient *client)
+{
+ QLIST_REMOVE(client, link);
+ g_free(client);
+}
+
+static void cpu_notify_map_clients_locked(void)
+{
+ MapClient *client;
+
+ while (!QLIST_EMPTY(&map_client_list)) {
+ client = QLIST_FIRST(&map_client_list);
+ qemu_bh_schedule(client->bh);
+ cpu_unregister_map_client_do(client);
+ }
+}
+
+void cpu_register_map_client(QEMUBH *bh)
{
MapClient *client = g_malloc(sizeof(*client));
- client->opaque = opaque;
- client->callback = callback;
+ qemu_mutex_lock(&map_client_list_lock);
+ client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
- return client;
+ if (!atomic_read(&bounce.in_use)) {
+ cpu_notify_map_clients_locked();
+ }
+ qemu_mutex_unlock(&map_client_list_lock);
}
-static void cpu_unregister_map_client(void *_client)
+void cpu_exec_init_all(void)
{
- MapClient *client = (MapClient *)_client;
-
- QLIST_REMOVE(client, link);
- g_free(client);
+ qemu_mutex_init(&ram_list.mutex);
+ memory_map_init();
+ io_mem_init();
+ qemu_mutex_init(&map_client_list_lock);
}
-static void cpu_notify_map_clients(void)
+void cpu_unregister_map_client(QEMUBH *bh)
{
MapClient *client;
- while (!QLIST_EMPTY(&map_client_list)) {
- client = QLIST_FIRST(&map_client_list);
- client->callback(client->opaque);
- cpu_unregister_map_client(client);
+ qemu_mutex_lock(&map_client_list_lock);
+ QLIST_FOREACH(client, &map_client_list, link) {
+ if (client->bh == bh) {
+ cpu_unregister_map_client_do(client);
+ break;
+ }
}
+ qemu_mutex_unlock(&map_client_list_lock);
+}
+
+static void cpu_notify_map_clients(void)
+{
+ qemu_mutex_lock(&map_client_list_lock);
+ cpu_notify_map_clients_locked();
+ qemu_mutex_unlock(&map_client_list_lock);
}
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
MemoryRegion *mr;
hwaddr l, xlat;
+ rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &xlat, &l, is_write);
len -= l;
addr += l;
}
+ rcu_read_unlock();
return true;
}
}
l = len;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &xlat, &l, is_write);
+
if (!memory_access_is_direct(mr, is_write)) {
- if (bounce.buffer) {
+ if (atomic_xchg(&bounce.in_use, true)) {
+ rcu_read_unlock();
return NULL;
}
/* Avoid unbounded allocations */
memory_region_ref(mr);
bounce.mr = mr;
if (!is_write) {
- address_space_read(as, addr, bounce.buffer, l);
+ address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
+ bounce.buffer, l);
}
+ rcu_read_unlock();
*plen = l;
return bounce.buffer;
}
}
memory_region_ref(mr);
+ rcu_read_unlock();
*plen = done;
return qemu_ram_ptr_length(raddr + base, plen);
}
mr = qemu_ram_addr_from_host(buffer, &addr1);
assert(mr != NULL);
if (is_write) {
- invalidate_and_set_dirty(addr1, access_len);
+ invalidate_and_set_dirty(mr, addr1, access_len);
}
if (xen_enabled()) {
xen_invalidate_map_cache_entry(buffer);
return;
}
if (is_write) {
- address_space_write(as, bounce.addr, bounce.buffer, access_len);
+ address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
+ bounce.buffer, access_len);
}
qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
+ atomic_mb_set(&bounce.in_use, false);
cpu_notify_map_clients();
}
}
/* warning: addr must be aligned */
-static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
- enum device_endian endian)
+static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
uint8_t *ptr;
uint64_t val;
MemoryRegion *mr;
hwaddr l = 4;
hwaddr addr1;
+ MemTxResult r;
+ bool release_lock = false;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, false);
if (l < 4 || !memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
/* I/O case */
- memory_region_dispatch_read(mr, addr1, &val, 4,
- MEMTXATTRS_UNSPECIFIED);
+ r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
val = ldl_p(ptr);
break;
}
+ r = MEMTX_OK;
+ }
+ if (result) {
+ *result = r;
+ }
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
}
+ rcu_read_unlock();
return val;
}
+uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_ldl_internal(as, addr, attrs, result,
+ DEVICE_NATIVE_ENDIAN);
+}
+
+uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_ldl_internal(as, addr, attrs, result,
+ DEVICE_LITTLE_ENDIAN);
+}
+
+uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_ldl_internal(as, addr, attrs, result,
+ DEVICE_BIG_ENDIAN);
+}
+
uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
{
- return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
+ return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
{
- return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
+ return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
{
- return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
+ return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* warning: addr must be aligned */
-static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
- enum device_endian endian)
+static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
uint8_t *ptr;
uint64_t val;
MemoryRegion *mr;
hwaddr l = 8;
hwaddr addr1;
+ MemTxResult r;
+ bool release_lock = false;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
/* I/O case */
- memory_region_dispatch_read(mr, addr1, &val, 8,
- MEMTXATTRS_UNSPECIFIED);
+ r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap64(val);
val = ldq_p(ptr);
break;
}
+ r = MEMTX_OK;
+ }
+ if (result) {
+ *result = r;
+ }
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
}
+ rcu_read_unlock();
return val;
}
+uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_ldq_internal(as, addr, attrs, result,
+ DEVICE_NATIVE_ENDIAN);
+}
+
+uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_ldq_internal(as, addr, attrs, result,
+ DEVICE_LITTLE_ENDIAN);
+}
+
+uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_ldq_internal(as, addr, attrs, result,
+ DEVICE_BIG_ENDIAN);
+}
+
uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
{
- return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
+ return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
{
- return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
+ return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
{
- return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
+ return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* XXX: optimize */
-uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
+uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
{
uint8_t val;
- address_space_rw(as, addr, &val, 1, 0);
+ MemTxResult r;
+
+ r = address_space_rw(as, addr, attrs, &val, 1, 0);
+ if (result) {
+ *result = r;
+ }
return val;
}
+uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
+{
+ return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
/* warning: addr must be aligned */
-static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
- enum device_endian endian)
+static inline uint32_t address_space_lduw_internal(AddressSpace *as,
+ hwaddr addr,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
uint8_t *ptr;
uint64_t val;
MemoryRegion *mr;
hwaddr l = 2;
hwaddr addr1;
+ MemTxResult r;
+ bool release_lock = false;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 2 || !memory_access_is_direct(mr, false)) {
+ release_lock |= prepare_mmio_access(mr);
+
/* I/O case */
- memory_region_dispatch_read(mr, addr1, &val, 2,
- MEMTXATTRS_UNSPECIFIED);
+ r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
val = lduw_p(ptr);
break;
}
+ r = MEMTX_OK;
+ }
+ if (result) {
+ *result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
+ rcu_read_unlock();
return val;
}
+uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_lduw_internal(as, addr, attrs, result,
+ DEVICE_NATIVE_ENDIAN);
+}
+
+uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_lduw_internal(as, addr, attrs, result,
+ DEVICE_LITTLE_ENDIAN);
+}
+
+uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ return address_space_lduw_internal(as, addr, attrs, result,
+ DEVICE_BIG_ENDIAN);
+}
+
uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
{
- return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
+ return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
{
- return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
+ return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
{
- return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
+ return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */
-void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
+void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
{
uint8_t *ptr;
MemoryRegion *mr;
hwaddr l = 4;
hwaddr addr1;
+ MemTxResult r;
+ uint8_t dirty_log_mask;
+ bool release_lock = false;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
- memory_region_dispatch_write(mr, addr1, val, 4,
- MEMTXATTRS_UNSPECIFIED);
+ release_lock |= prepare_mmio_access(mr);
+
+ r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
ptr = qemu_get_ram_ptr(addr1);
stl_p(ptr, val);
- if (unlikely(in_migration)) {
- if (cpu_physical_memory_is_clean(addr1)) {
- /* invalidate code */
- tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
- /* set dirty bit */
- cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
- }
- }
+ dirty_log_mask = memory_region_get_dirty_log_mask(mr);
+ dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
+ cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
+ r = MEMTX_OK;
+ }
+ if (result) {
+ *result = r;
}
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
+ }
+ rcu_read_unlock();
+}
+
+void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
+{
+ address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* warning: addr must be aligned */
-static inline void stl_phys_internal(AddressSpace *as,
- hwaddr addr, uint32_t val,
- enum device_endian endian)
+static inline void address_space_stl_internal(AddressSpace *as,
+ hwaddr addr, uint32_t val,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
uint8_t *ptr;
MemoryRegion *mr;
hwaddr l = 4;
hwaddr addr1;
+ MemTxResult r;
+ bool release_lock = false;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
val = bswap32(val);
}
#endif
- memory_region_dispatch_write(mr, addr1, val, 4,
- MEMTXATTRS_UNSPECIFIED);
+ r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
stl_p(ptr, val);
break;
}
- invalidate_and_set_dirty(addr1, 4);
+ invalidate_and_set_dirty(mr, addr1, 4);
+ r = MEMTX_OK;
+ }
+ if (result) {
+ *result = r;
+ }
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
}
+ rcu_read_unlock();
+}
+
+void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stl_internal(as, addr, val, attrs, result,
+ DEVICE_NATIVE_ENDIAN);
+}
+
+void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stl_internal(as, addr, val, attrs, result,
+ DEVICE_LITTLE_ENDIAN);
+}
+
+void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stl_internal(as, addr, val, attrs, result,
+ DEVICE_BIG_ENDIAN);
}
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
+ address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
+ address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
+ address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* XXX: optimize */
-void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
+void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
{
uint8_t v = val;
- address_space_rw(as, addr, &v, 1, 1);
+ MemTxResult r;
+
+ r = address_space_rw(as, addr, attrs, &v, 1, 1);
+ if (result) {
+ *result = r;
+ }
+}
+
+void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
+{
+ address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* warning: addr must be aligned */
-static inline void stw_phys_internal(AddressSpace *as,
- hwaddr addr, uint32_t val,
- enum device_endian endian)
+static inline void address_space_stw_internal(AddressSpace *as,
+ hwaddr addr, uint32_t val,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
uint8_t *ptr;
MemoryRegion *mr;
hwaddr l = 2;
hwaddr addr1;
+ MemTxResult r;
+ bool release_lock = false;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
val = bswap16(val);
}
#endif
- memory_region_dispatch_write(mr, addr1, val, 2,
- MEMTXATTRS_UNSPECIFIED);
+ r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
stw_p(ptr, val);
break;
}
- invalidate_and_set_dirty(addr1, 2);
+ invalidate_and_set_dirty(mr, addr1, 2);
+ r = MEMTX_OK;
+ }
+ if (result) {
+ *result = r;
+ }
+ if (release_lock) {
+ qemu_mutex_unlock_iothread();
}
+ rcu_read_unlock();
+}
+
+void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stw_internal(as, addr, val, attrs, result,
+ DEVICE_NATIVE_ENDIAN);
+}
+
+void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stw_internal(as, addr, val, attrs, result,
+ DEVICE_LITTLE_ENDIAN);
+}
+
+void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ address_space_stw_internal(as, addr, val, attrs, result,
+ DEVICE_BIG_ENDIAN);
}
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
+ address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
+ address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
{
- stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
+ address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* XXX: optimize */
-void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
+void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result)
{
+ MemTxResult r;
val = tswap64(val);
- address_space_rw(as, addr, (void *) &val, 8, 1);
+ r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
+ if (result) {
+ *result = r;
+ }
}
-void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
+void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result)
{
+ MemTxResult r;
val = cpu_to_le64(val);
- address_space_rw(as, addr, (void *) &val, 8, 1);
+ r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
+ if (result) {
+ *result = r;
+ }
+}
+void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result)
+{
+ MemTxResult r;
+ val = cpu_to_be64(val);
+ r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
+ if (result) {
+ *result = r;
+ }
+}
+
+void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
+{
+ address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
+{
+ address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
{
- val = cpu_to_be64(val);
- address_space_rw(as, addr, (void *) &val, 8, 1);
+ address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
}
/* virtual memory access for debug (includes writing to ROM) */
if (is_write) {
cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
} else {
- address_space_rw(cpu->as, phys_addr, buf, l, 0);
+ address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
+ buf, l, 0);
}
len -= l;
buf += l;
{
MemoryRegion*mr;
hwaddr l = 1;
+ bool res;
+ rcu_read_lock();
mr = address_space_translate(&address_space_memory,
phys_addr, &phys_addr, &l, false);
- return !(memory_region_is_ram(mr) ||
- memory_region_is_romd(mr));
+ res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
+ rcu_read_unlock();
+ return res;
}
-void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
+int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
{
RAMBlock *block;
+ int ret = 0;
rcu_read_lock();
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- func(block->host, block->offset, block->used_length, opaque);
+ ret = func(block->idstr, block->host, block->offset,
+ block->used_length, opaque);
+ if (ret) {
+ break;
+ }
}
rcu_read_unlock();
+ return ret;
}
#endif