#include "sysemu/hw_accel.h"
#include "exec/address-spaces.h"
#include "sysemu/xen-mapcache.h"
-#include "trace-root.h"
+#include "trace/trace-root.h"
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
#include <linux/falloc.h>
#include "monitor/monitor.h"
+#ifdef CONFIG_LIBDAXCTL
+#include <daxctl/libdaxctl.h>
+#endif
+
//#define DEBUG_SUBPAGE
#if !defined(CONFIG_USER_ONLY)
static MemoryRegion io_mem_unassigned;
#endif
-CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
-
-/* current CPU in the current thread. It is only valid inside
- cpu_exec() */
-__thread CPUState *current_cpu;
-
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
#if !defined(CONFIG_USER_ONLY)
-/* 0 = Do not count executed instructions.
- 1 = Precise instruction counting.
- 2 = Adaptive rate instruction counting. */
-int use_icount;
typedef struct PhysPageEntry PhysPageEntry;
hwaddr addr,
bool resolve_subpage)
{
- MemoryRegionSection *section = atomic_read(&d->mru_section);
+ MemoryRegionSection *section = qatomic_read(&d->mru_section);
subpage_t *subpage;
if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
!section_covers_addr(section, addr)) {
section = phys_page_find(d, addr);
- atomic_set(&d->mru_section, section);
+ qatomic_set(&d->mru_section, section);
}
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
*/
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
TCGIOMMUNotifier *notifier;
- Error *err = NULL;
- int i, ret;
+ int i;
for (i = 0; i < cpu->iommu_notifiers->len; i++) {
notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
0,
HWADDR_MAX,
iommu_idx);
- ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n,
- &err);
- if (ret) {
- error_report_err(err);
- exit(1);
- }
+ memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n,
+ &error_fatal);
}
if (!notifier->active) {
IOMMUMemoryRegionClass *imrc;
IOMMUTLBEntry iotlb;
int iommu_idx;
- AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+ AddressSpaceDispatch *d =
+ qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
for (;;) {
section = address_space_translate_internal(d, addr, &addr, plen, false);
}
};
-#endif
-
-CPUState *qemu_get_cpu(int index)
-{
- CPUState *cpu;
-
- CPU_FOREACH(cpu) {
- if (cpu->cpu_index == index) {
- return cpu;
- }
- }
-
- return NULL;
-}
-
-#if !defined(CONFIG_USER_ONLY)
void cpu_address_space_init(CPUState *cpu, int asidx,
const char *prefix, MemoryRegion *mr)
{
{
CPUClass *cc = CPU_GET_CLASS(cpu);
+ tlb_destroy(cpu);
cpu_list_remove(cpu);
if (cc->vmsd != NULL) {
DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
MemoryRegion *),
#endif
+ DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
DEFINE_PROP_END_OF_LIST(),
};
{
RAMBlock *block;
- block = atomic_rcu_read(&ram_list.mru_block);
+ block = qatomic_rcu_read(&ram_list.mru_block);
if (block && addr - block->offset < block->max_length) {
return block;
}
* call_rcu(reclaim_ramblock, xxx);
* rcu_read_unlock()
*
- * atomic_rcu_set is not needed here. The block was already published
+ * qatomic_rcu_set is not needed here. The block was already published
* when it was placed into the list. Here we're just making an extra
* copy of the pointer.
*/
page = start_page;
WITH_RCU_READ_LOCK_GUARD() {
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
ramblock = qemu_get_ram_block(start);
/* Range sanity check on the ramblock */
assert(start >= ramblock->offset &&
dest = 0;
WITH_RCU_READ_LOCK_GUARD() {
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
while (page < end) {
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
return size;
}
+static int64_t get_file_align(int fd)
+{
+ int64_t align = -1;
+#if defined(__linux__) && defined(CONFIG_LIBDAXCTL)
+ struct stat st;
+
+ if (fstat(fd, &st) < 0) {
+ return -errno;
+ }
+
+ /* Special handling for devdax character devices */
+ if (S_ISCHR(st.st_mode)) {
+ g_autofree char *path = NULL;
+ g_autofree char *rpath = NULL;
+ struct daxctl_ctx *ctx;
+ struct daxctl_region *region;
+ int rc = 0;
+
+ path = g_strdup_printf("/sys/dev/char/%d:%d",
+ major(st.st_rdev), minor(st.st_rdev));
+ rpath = realpath(path, NULL);
+
+ rc = daxctl_new(&ctx);
+ if (rc) {
+ return -1;
+ }
+
+ daxctl_region_foreach(ctx, region) {
+ if (strstr(rpath, daxctl_region_get_path(region))) {
+ align = daxctl_region_get_align(region);
+ break;
+ }
+ }
+ daxctl_unref(ctx);
+ }
+#endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */
+
+ return align;
+}
+
static int file_ram_open(const char *path,
const char *region_name,
bool *created,
DirtyMemoryBlocks *new_blocks;
int j;
- old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
new_blocks = g_malloc(sizeof(*new_blocks) +
sizeof(new_blocks->blocks[0]) * new_num_blocks);
new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
}
- atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+ qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
if (old_blocks) {
g_free_rcu(old_blocks, rcu);
{
RAMBlock *new_block;
Error *local_err = NULL;
- int64_t file_size;
+ int64_t file_size, file_align;
/* Just support these ram flags by now. */
assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0);
return NULL;
}
+ file_align = get_file_align(fd);
+ if (file_align > 0 && mr && file_align > mr->align) {
+ error_setg(errp, "backing store align 0x%" PRIx64
+ " is larger than 'align' option 0x%" PRIx64,
+ file_align, mr->align);
+ return NULL;
+ }
+
new_block = g_malloc0(sizeof(*new_block));
new_block->mr = mr;
new_block->used_length = size;
}
RCU_READ_LOCK_GUARD();
- block = atomic_rcu_read(&ram_list.mru_block);
+ block = qatomic_rcu_read(&ram_list.mru_block);
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (watchpoint_address_matches(wp, addr, len)
&& (wp->flags & flags)) {
+ if (replay_running_debug()) {
+ /*
+ * Don't process the watchpoints when we are
+ * in a reverse debugging operation.
+ */
+ replay_breakpoint();
+ return;
+ }
if (flags == BP_MEM_READ) {
wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
- AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
+ AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
return §ions[index & ~TARGET_PAGE_MASK];
* may have split the RCU critical section.
*/
d = address_space_to_dispatch(cpuas->as);
- atomic_rcu_set(&cpuas->memory_dispatch, d);
+ qatomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu);
}
bool unlocked = !qemu_mutex_iothread_locked();
bool release_lock = false;
- if (unlocked && mr->global_locking) {
+ if (unlocked) {
qemu_mutex_lock_iothread();
unlocked = false;
release_lock = true;
qemu_mutex_lock(&map_client_list_lock);
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
- if (!atomic_read(&bounce.in_use)) {
+ if (!qatomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked();
}
qemu_mutex_unlock(&map_client_list_lock);
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
- if (atomic_xchg(&bounce.in_use, true)) {
+ if (qatomic_xchg(&bounce.in_use, true)) {
+ *plen = 0;
return NULL;
}
/* Avoid unbounded allocations */
qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
- atomic_mb_set(&bounce.in_use, false);
+ qatomic_mb_set(&bounce.in_use, false);
cpu_notify_map_clients();
}
#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
#define RCU_READ_LOCK(...) rcu_read_lock()
#define RCU_READ_UNLOCK(...) rcu_read_unlock()
-#include "memory_ldst.inc.c"
+#include "memory_ldst.c.inc"
int64_t address_space_cache_init(MemoryRegionCache *cache,
AddressSpace *as,
/* Called from RCU critical section. address_space_read_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
MEMTXATTRS_UNSPECIFIED);
- flatview_read_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_read_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
/* Called from RCU critical section. address_space_write_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
MEMTXATTRS_UNSPECIFIED);
- flatview_write_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_write_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
#define ARG1_DECL MemoryRegionCache *cache
#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
#define RCU_READ_LOCK() ((void)0)
#define RCU_READ_UNLOCK() ((void)0)
-#include "memory_ldst.inc.c"
+#include "memory_ldst.c.inc"
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
while (len > 0) {
int asidx;
MemTxAttrs attrs;
+ MemTxResult res;
page = addr & TARGET_PAGE_MASK;
phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write) {
- address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l);
+ res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
} else {
- address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
- l);
+ res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
+ }
+ if (res != MEMTX_OK) {
+ return -1;
}
len -= l;
buf += l;
}
}
+/*
+ * If positive, discarding RAM is disabled. If negative, discarding RAM is
+ * required to work and cannot be disabled.
+ */
+static int ram_block_discard_disabled;
+
+int ram_block_discard_disable(bool state)
+{
+ int old;
+
+ if (!state) {
+ qatomic_dec(&ram_block_discard_disabled);
+ return 0;
+ }
+
+ do {
+ old = qatomic_read(&ram_block_discard_disabled);
+ if (old < 0) {
+ return -EBUSY;
+ }
+ } while (qatomic_cmpxchg(&ram_block_discard_disabled,
+ old, old + 1) != old);
+ return 0;
+}
+
+int ram_block_discard_require(bool state)
+{
+ int old;
+
+ if (!state) {
+ qatomic_inc(&ram_block_discard_disabled);
+ return 0;
+ }
+
+ do {
+ old = qatomic_read(&ram_block_discard_disabled);
+ if (old > 0) {
+ return -EBUSY;
+ }
+ } while (qatomic_cmpxchg(&ram_block_discard_disabled,
+ old, old - 1) != old);
+ return 0;
+}
+
+bool ram_block_discard_is_disabled(void)
+{
+ return qatomic_read(&ram_block_discard_disabled) > 0;
+}
+
+bool ram_block_discard_is_required(void)
+{
+ return qatomic_read(&ram_block_discard_disabled) < 0;
+}
+
#endif