X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/9f1f264edbdf5516d6f208497310b3eedbc7b74c..2acf4f8fdd1fb1d7d76fa26e67b39af898df0aed:/exec.c diff --git a/exec.c b/exec.c index a0bf9d61c8..bca441f7fd 100644 --- a/exec.c +++ b/exec.c @@ -50,7 +50,7 @@ #include "sysemu/hw_accel.h" #include "exec/address-spaces.h" #include "sysemu/xen-mapcache.h" -#include "trace-root.h" +#include "trace/trace-root.h" #ifdef CONFIG_FALLOCATE_PUNCH_HOLE #include @@ -77,6 +77,10 @@ #include "monitor/monitor.h" +#ifdef CONFIG_LIBDAXCTL +#include +#endif + //#define DEBUG_SUBPAGE #if !defined(CONFIG_USER_ONLY) @@ -94,20 +98,10 @@ AddressSpace address_space_memory; static MemoryRegion io_mem_unassigned; #endif -CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); - -/* current CPU in the current thread. It is only valid inside - cpu_exec() */ -__thread CPUState *current_cpu; - uintptr_t qemu_host_page_size; intptr_t qemu_host_page_mask; #if !defined(CONFIG_USER_ONLY) -/* 0 = Do not count executed instructions. - 1 = Precise instruction counting. - 2 = Adaptive rate instruction counting. */ -int use_icount; typedef struct PhysPageEntry PhysPageEntry; @@ -355,13 +349,13 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, hwaddr addr, bool resolve_subpage) { - MemoryRegionSection *section = atomic_read(&d->mru_section); + MemoryRegionSection *section = qatomic_read(&d->mru_section); subpage_t *subpage; if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || !section_covers_addr(section, addr)) { section = phys_page_find(d, addr); - atomic_set(&d->mru_section, section); + qatomic_set(&d->mru_section, section); } if (resolve_subpage && section->mr->subpage) { subpage = container_of(section->mr, subpage_t, iomem); @@ -629,8 +623,7 @@ static void tcg_register_iommu_notifier(CPUState *cpu, */ MemoryRegion *mr = MEMORY_REGION(iommu_mr); TCGIOMMUNotifier *notifier; - Error *err = NULL; - int i, ret; + int i; for (i = 0; i < cpu->iommu_notifiers->len; i++) { notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); @@ -659,12 +652,8 @@ static void tcg_register_iommu_notifier(CPUState *cpu, 0, HWADDR_MAX, iommu_idx); - ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, - &err); - if (ret) { - error_report_err(err); - exit(1); - } + memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, + &error_fatal); } if (!notifier->active) { @@ -697,7 +686,8 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; - AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); + AddressSpaceDispatch *d = + qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); for (;;) { section = address_space_translate_internal(d, addr, &addr, plen, false); @@ -828,22 +818,6 @@ const VMStateDescription vmstate_cpu_common = { } }; -#endif - -CPUState *qemu_get_cpu(int index) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - if (cpu->cpu_index == index) { - return cpu; - } - } - - return NULL; -} - -#if !defined(CONFIG_USER_ONLY) void cpu_address_space_init(CPUState *cpu, int asidx, const char *prefix, MemoryRegion *mr) { @@ -892,6 +866,7 @@ void cpu_exec_unrealizefn(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); + tlb_destroy(cpu); cpu_list_remove(cpu); if (cc->vmsd != NULL) { @@ -916,6 +891,7 @@ Property cpu_common_props[] = { DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, MemoryRegion *), #endif + DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), DEFINE_PROP_END_OF_LIST(), }; @@ -1263,7 +1239,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) { RAMBlock *block; - block = atomic_rcu_read(&ram_list.mru_block); + block = qatomic_rcu_read(&ram_list.mru_block); if (block && addr - block->offset < block->max_length) { return block; } @@ -1289,7 +1265,7 @@ found: * call_rcu(reclaim_ramblock, xxx); * rcu_read_unlock() * - * atomic_rcu_set is not needed here. The block was already published + * qatomic_rcu_set is not needed here. The block was already published * when it was placed into the list. Here we're just making an extra * copy of the pointer. */ @@ -1337,7 +1313,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, page = start_page; WITH_RCU_READ_LOCK_GUARD() { - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); ramblock = qemu_get_ram_block(start); /* Range sanity check on the ramblock */ assert(start >= ramblock->offset && @@ -1387,7 +1363,7 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty dest = 0; WITH_RCU_READ_LOCK_GUARD() { - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); while (page < end) { unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; @@ -1744,6 +1720,46 @@ static int64_t get_file_size(int fd) return size; } +static int64_t get_file_align(int fd) +{ + int64_t align = -1; +#if defined(__linux__) && defined(CONFIG_LIBDAXCTL) + struct stat st; + + if (fstat(fd, &st) < 0) { + return -errno; + } + + /* Special handling for devdax character devices */ + if (S_ISCHR(st.st_mode)) { + g_autofree char *path = NULL; + g_autofree char *rpath = NULL; + struct daxctl_ctx *ctx; + struct daxctl_region *region; + int rc = 0; + + path = g_strdup_printf("/sys/dev/char/%d:%d", + major(st.st_rdev), minor(st.st_rdev)); + rpath = realpath(path, NULL); + + rc = daxctl_new(&ctx); + if (rc) { + return -1; + } + + daxctl_region_foreach(ctx, region) { + if (strstr(rpath, daxctl_region_get_path(region))) { + align = daxctl_region_get_align(region); + break; + } + } + daxctl_unref(ctx); + } +#endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ + + return align; +} + static int file_ram_open(const char *path, const char *region_name, bool *created, @@ -2183,7 +2199,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size, DirtyMemoryBlocks *new_blocks; int j; - old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); + old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); new_blocks = g_malloc(sizeof(*new_blocks) + sizeof(new_blocks->blocks[0]) * new_num_blocks); @@ -2196,7 +2212,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size, new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); } - atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); + qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); if (old_blocks) { g_free_rcu(old_blocks, rcu); @@ -2295,7 +2311,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, { RAMBlock *new_block; Error *local_err = NULL; - int64_t file_size; + int64_t file_size, file_align; /* Just support these ram flags by now. */ assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0); @@ -2331,6 +2347,14 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, return NULL; } + file_align = get_file_align(fd); + if (file_align > 0 && mr && file_align > mr->align) { + error_setg(errp, "backing store align 0x%" PRIx64 + " is larger than 'align' option 0x%" PRIx64, + file_align, mr->align); + return NULL; + } + new_block = g_malloc0(sizeof(*new_block)); new_block->mr = mr; new_block->used_length = size; @@ -2635,7 +2659,7 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, } RCU_READ_LOCK_GUARD(); - block = atomic_rcu_read(&ram_list.mru_block); + block = qatomic_rcu_read(&ram_list.mru_block); if (block && block->host && host - block->host < block->max_length) { goto found; } @@ -2719,6 +2743,14 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { if (watchpoint_address_matches(wp, addr, len) && (wp->flags & flags)) { + if (replay_running_debug()) { + /* + * Don't process the watchpoints when we are + * in a reverse debugging operation. + */ + replay_breakpoint(); + return; + } if (flags == BP_MEM_READ) { wp->flags |= BP_WATCHPOINT_HIT_READ; } else { @@ -2880,7 +2912,7 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, { int asidx = cpu_asidx_from_attrs(cpu, attrs); CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; - AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); + AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); MemoryRegionSection *sections = d->map.sections; return §ions[index & ~TARGET_PAGE_MASK]; @@ -2964,7 +2996,7 @@ static void tcg_commit(MemoryListener *listener) * may have split the RCU critical section. */ d = address_space_to_dispatch(cpuas->as); - atomic_rcu_set(&cpuas->memory_dispatch, d); + qatomic_rcu_set(&cpuas->memory_dispatch, d); tlb_flush(cpuas->cpu); } @@ -3104,7 +3136,7 @@ static bool prepare_mmio_access(MemoryRegion *mr) bool unlocked = !qemu_mutex_iothread_locked(); bool release_lock = false; - if (unlocked && mr->global_locking) { + if (unlocked) { qemu_mutex_lock_iothread(); unlocked = false; release_lock = true; @@ -3411,7 +3443,7 @@ void cpu_register_map_client(QEMUBH *bh) qemu_mutex_lock(&map_client_list_lock); client->bh = bh; QLIST_INSERT_HEAD(&map_client_list, client, link); - if (!atomic_read(&bounce.in_use)) { + if (!qatomic_read(&bounce.in_use)) { cpu_notify_map_clients_locked(); } qemu_mutex_unlock(&map_client_list_lock); @@ -3545,7 +3577,8 @@ void *address_space_map(AddressSpace *as, mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); if (!memory_access_is_direct(mr, is_write)) { - if (atomic_xchg(&bounce.in_use, true)) { + if (qatomic_xchg(&bounce.in_use, true)) { + *plen = 0; return NULL; } /* Avoid unbounded allocations */ @@ -3603,7 +3636,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, qemu_vfree(bounce.buffer); bounce.buffer = NULL; memory_region_unref(bounce.mr); - atomic_mb_set(&bounce.in_use, false); + qatomic_mb_set(&bounce.in_use, false); cpu_notify_map_clients(); } @@ -3627,7 +3660,7 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len, #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) #define RCU_READ_LOCK(...) rcu_read_lock() #define RCU_READ_UNLOCK(...) rcu_read_unlock() -#include "memory_ldst.inc.c" +#include "memory_ldst.c.inc" int64_t address_space_cache_init(MemoryRegionCache *cache, AddressSpace *as, @@ -3724,7 +3757,7 @@ static inline MemoryRegion *address_space_translate_cached( /* Called from RCU critical section. address_space_read_cached uses this * out of line function when the target is an MMIO or IOMMU region. */ -void +MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, void *buf, hwaddr len) { @@ -3734,15 +3767,15 @@ address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, l = len; mr = address_space_translate_cached(cache, addr, &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); - flatview_read_continue(cache->fv, - addr, MEMTXATTRS_UNSPECIFIED, buf, len, - addr1, l, mr); + return flatview_read_continue(cache->fv, + addr, MEMTXATTRS_UNSPECIFIED, buf, len, + addr1, l, mr); } /* Called from RCU critical section. address_space_write_cached uses this * out of line function when the target is an MMIO or IOMMU region. */ -void +MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, const void *buf, hwaddr len) { @@ -3752,9 +3785,9 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, l = len; mr = address_space_translate_cached(cache, addr, &addr1, &l, true, MEMTXATTRS_UNSPECIFIED); - flatview_write_continue(cache->fv, - addr, MEMTXATTRS_UNSPECIFIED, buf, len, - addr1, l, mr); + return flatview_write_continue(cache->fv, + addr, MEMTXATTRS_UNSPECIFIED, buf, len, + addr1, l, mr); } #define ARG1_DECL MemoryRegionCache *cache @@ -3763,7 +3796,7 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) #define RCU_READ_LOCK() ((void)0) #define RCU_READ_UNLOCK() ((void)0) -#include "memory_ldst.inc.c" +#include "memory_ldst.c.inc" /* virtual memory access for debug (includes writing to ROM) */ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, @@ -3777,6 +3810,7 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, while (len > 0) { int asidx; MemTxAttrs attrs; + MemTxResult res; page = addr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); @@ -3789,11 +3823,14 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, l = len; phys_addr += (addr & ~TARGET_PAGE_MASK); if (is_write) { - address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, - attrs, buf, l); + res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, + attrs, buf, l); } else { - address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, - l); + res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, + attrs, buf, l); + } + if (res != MEMTX_OK) { + return -1; } len -= l; buf += l; @@ -4057,4 +4094,58 @@ void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) } } +/* + * If positive, discarding RAM is disabled. If negative, discarding RAM is + * required to work and cannot be disabled. + */ +static int ram_block_discard_disabled; + +int ram_block_discard_disable(bool state) +{ + int old; + + if (!state) { + qatomic_dec(&ram_block_discard_disabled); + return 0; + } + + do { + old = qatomic_read(&ram_block_discard_disabled); + if (old < 0) { + return -EBUSY; + } + } while (qatomic_cmpxchg(&ram_block_discard_disabled, + old, old + 1) != old); + return 0; +} + +int ram_block_discard_require(bool state) +{ + int old; + + if (!state) { + qatomic_inc(&ram_block_discard_disabled); + return 0; + } + + do { + old = qatomic_read(&ram_block_discard_disabled); + if (old > 0) { + return -EBUSY; + } + } while (qatomic_cmpxchg(&ram_block_discard_disabled, + old, old - 1) != old); + return 0; +} + +bool ram_block_discard_is_disabled(void) +{ + return qatomic_read(&ram_block_discard_disabled) > 0; +} + +bool ram_block_discard_is_required(void) +{ + return qatomic_read(&ram_block_discard_disabled) < 0; +} + #endif