#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/target_page.h"
-#include "tcg.h"
+#include "tcg/tcg.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "sysemu/tcg.h"
+#include "sysemu/qtest.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "exec/ram_addr.h"
#include "exec/log.h"
+#include "qemu/pmem.h"
+
#include "migration/vmstate.h"
#include "qemu/range.h"
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
__thread CPUState *current_cpu;
-/* 0 = Do not count executed instructions.
- 1 = Precise instruction counting.
- 2 = Adaptive rate instruction counting. */
-int use_icount;
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
#if !defined(CONFIG_USER_ONLY)
+/* 0 = Do not count executed instructions.
+ 1 = Precise instruction counting.
+ 2 = Adaptive rate instruction counting. */
+int use_icount;
typedef struct PhysPageEntry PhysPageEntry;
{
CPUClass *cc = CPU_GET_CLASS(cpu);
+ tlb_destroy(cpu);
cpu_list_remove(cpu);
if (cc->vmsd != NULL) {
}
tlb_init(cpu);
-#ifndef CONFIG_USER_ONLY
+ qemu_plugin_vcpu_init_hook(cpu);
+
+#ifdef CONFIG_USER_ONLY
+ assert(cc->vmsd == NULL);
+#else /* !CONFIG_USER_ONLY */
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
- MemTxAttrs attrs;
- hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- if (phys != -1) {
- /* Locks grabbed by tb_invalidate_phys_addr */
- tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
- phys | (pc & ~TARGET_PAGE_MASK), attrs);
- }
+ /*
+ * There may not be a virtual to physical translation for the pc
+ * right now, but there may exist cached TB for this pc.
+ * Flush the whole TB cache to force re-translation of such TBs.
+ * This is heavyweight, but we're debugging anyway.
+ */
+ tb_flush(cpu);
}
#endif
int flags, CPUWatchpoint **watchpoint)
{
CPUWatchpoint *wp;
+ vaddr in_page;
/* forbid ranges which are empty or run off the end of the address space */
if (len == 0 || (addr + len - 1) < addr) {
QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
}
- tlb_flush_page(cpu, addr);
+ in_page = -(addr | TARGET_PAGE_MASK);
+ if (len <= in_page) {
+ tlb_flush_page(cpu, addr);
+ } else {
+ tlb_flush(cpu);
+ }
if (watchpoint)
*watchpoint = wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
+ if (watchpoint_address_matches(wp, addr, len)) {
ret |= wp->flags;
}
}
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
- qemu_log_lock();
+ FILE *logfile = qemu_log_lock();
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
qemu_log_flush();
- qemu_log_unlock();
+ qemu_log_unlock(logfile);
qemu_log_close();
}
va_end(ap2);
unsigned client)
{
DirtyMemoryBlocks *blocks;
- unsigned long end, page;
+ unsigned long end, page, start_page;
bool dirty = false;
RAMBlock *ramblock;
uint64_t mr_offset, mr_size;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
- page = start >> TARGET_PAGE_BITS;
+ start_page = start >> TARGET_PAGE_BITS;
+ page = start_page;
WITH_RCU_READ_LOCK_GUARD() {
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
page += num;
}
- mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
- mr_size = (end - page) << TARGET_PAGE_BITS;
+ mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset;
+ mr_size = (end - start_page) << TARGET_PAGE_BITS;
memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
}
long qemu_minrampagesize(void)
{
long hpsize = LONG_MAX;
- long mainrampagesize;
- Object *memdev_root;
- MachineState *ms = MACHINE(qdev_get_machine());
-
- mainrampagesize = qemu_mempath_getpagesize(mem_path);
-
- /* it's possible we have memory-backend objects with
- * hugepage-backed RAM. these may get mapped into system
- * address space via -numa parameters or memory hotplug
- * hooks. we want to take these into account, but we
- * also want to make sure these supported hugepage
- * sizes are applicable across the entire range of memory
- * we may boot from, so we take the min across all
- * backends, and assume normal pages in cases where a
- * backend isn't backed by hugepages.
- */
- memdev_root = object_resolve_path("/objects", NULL);
- if (memdev_root) {
- object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
- }
- if (hpsize == LONG_MAX) {
- /* No additional memory regions found ==> Report main RAM page size */
- return mainrampagesize;
- }
-
- /* If NUMA is disabled or the NUMA nodes are not backed with a
- * memory-backend, then there is at least one node using "normal" RAM,
- * so if its page size is smaller we have got to report that size instead.
- */
- if (hpsize > mainrampagesize &&
- (ms->numa_state == NULL ||
- ms->numa_state->num_nodes == 0 ||
- ms->numa_state->nodes[0].node_memdev == NULL)) {
- static bool warned;
- if (!warned) {
- error_report("Huge page support disabled (n/a for main memory).");
- warned = true;
- }
- return mainrampagesize;
- }
+ Object *memdev_root = object_resolve_path("/objects", NULL);
+ object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize);
return hpsize;
}
long qemu_maxrampagesize(void)
{
- long pagesize = qemu_mempath_getpagesize(mem_path);
+ long pagesize = 0;
Object *memdev_root = object_resolve_path("/objects", NULL);
- if (memdev_root) {
- object_child_foreach(memdev_root, find_max_backend_pagesize,
- &pagesize);
- }
+ object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize);
return pagesize;
}
#else
bool truncate,
Error **errp)
{
- MachineState *ms = MACHINE(qdev_get_machine());
void *area;
block->page_size = qemu_fd_getpagesize(fd);
return NULL;
}
- if (mem_prealloc) {
- os_mem_prealloc(fd, area, memory, ms->smp.cpus, errp);
- if (errp && *errp) {
- qemu_ram_munmap(fd, area, memory);
- return NULL;
- }
- }
-
block->fd = fd;
return area;
}
*/
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
{
+ const ram_addr_t unaligned_size = newsize;
+
assert(block);
newsize = HOST_PAGE_ALIGN(newsize);
if (block->used_length == newsize) {
+ /*
+ * We don't have to resize the ram block (which only knows aligned
+ * sizes), however, we have to notify if the unaligned size changed.
+ */
+ if (unaligned_size != memory_region_size(block->mr)) {
+ memory_region_set_size(block->mr, unaligned_size);
+ if (block->resized) {
+ block->resized(block->idstr, unaligned_size, block->host);
+ }
+ }
return 0;
}
block->used_length = newsize;
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
DIRTY_CLIENTS_ALL);
- memory_region_set_size(block->mr, newsize);
+ memory_region_set_size(block->mr, unaligned_size);
if (block->resized) {
- block->resized(block->idstr, newsize, block->host);
+ block->resized(block->idstr, unaligned_size, block->host);
}
return 0;
}
+/*
+ * Trigger sync on the given ram block for range [start, start + length]
+ * with the backing store if one is available.
+ * Otherwise no-op.
+ * @Note: this is supposed to be a synchronous op.
+ */
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
+{
+ /* The requested range should fit in within the block range */
+ g_assert((start + length) <= block->used_length);
+
+#ifdef CONFIG_LIBPMEM
+ /* The lack of support for pmem should not block the sync */
+ if (ramblock_is_pmem(block)) {
+ void *addr = ramblock_ptr(block, start);
+ pmem_persist(addr, length);
+ return;
+ }
+#endif
+ if (block->fd >= 0) {
+ /**
+ * Case there is no support for PMEM or the memory has not been
+ * specified as persistent (or is not one) - use the msync.
+ * Less optimal but still achieves the same goal
+ */
+ void *addr = ramblock_ptr(block, start);
+ if (qemu_msync(addr, length, block->fd)) {
+ warn_report("%s: failed to sync memory range: start: "
+ RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
+ __func__, start, length);
+ }
+ }
+}
+
/* Called with ram_list.mutex held */
static void dirty_memory_extend(ram_addr_t old_ram_size,
ram_addr_t new_ram_size)
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
- /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
- qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
+ /*
+ * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
+ * Configure it unless the machine is a qtest server, in which case
+ * KVM is not used and it may be forked (eg for fuzzing purposes).
+ */
+ if (!qtest_enabled()) {
+ qemu_madvise(new_block->host, new_block->max_length,
+ QEMU_MADV_DONTFORK);
+ }
ram_block_notify_add(new_block->host, new_block->max_length);
}
}
size = HOST_PAGE_ALIGN(size);
file_size = get_file_size(fd);
if (file_size > 0 && file_size < size) {
- error_setg(errp, "backing store %s size 0x%" PRIx64
+ error_setg(errp, "backing store size 0x%" PRIx64
" does not match 'size' option 0x" RAM_ADDR_FMT,
- mem_path, file_size, size);
+ file_size, size);
return NULL;
}
}
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, hwaddr len);
+ MemTxAttrs attrs, void *buf, hwaddr len);
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len);
+ const void *buf, hwaddr len);
static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
bool is_write, MemTxAttrs attrs);
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- uint8_t *buf, target_ulong len, int is_write)
+ void *ptr, target_ulong len, bool is_write)
{
int flags;
target_ulong l, page;
void * p;
+ uint8_t *buf = ptr;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
/* Called within RCU critical section. */
static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf,
+ const void *ptr,
hwaddr len, hwaddr addr1,
hwaddr l, MemoryRegion *mr)
{
- uint8_t *ptr;
+ uint8_t *ram_ptr;
uint64_t val;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
+ const uint8_t *buf = ptr;
for (;;) {
if (!memory_access_is_direct(mr, true)) {
size_memop(l), attrs);
} else {
/* RAM case */
- ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
- memcpy(ptr, buf, l);
+ ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
+ memcpy(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
/* Called from RCU critical section. */
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len)
+ const void *buf, hwaddr len)
{
hwaddr l;
hwaddr addr1;
/* Called within RCU critical section. */
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
+ MemTxAttrs attrs, void *ptr,
hwaddr len, hwaddr addr1, hwaddr l,
MemoryRegion *mr)
{
- uint8_t *ptr;
+ uint8_t *ram_ptr;
uint64_t val;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
+ uint8_t *buf = ptr;
for (;;) {
if (!memory_access_is_direct(mr, false)) {
stn_he_p(buf, l, val);
} else {
/* RAM case */
- ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
- memcpy(buf, ptr, l);
+ ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
+ memcpy(buf, ram_ptr, l);
}
if (release_lock) {
/* Called from RCU critical section. */
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, hwaddr len)
+ MemTxAttrs attrs, void *buf, hwaddr len)
{
hwaddr l;
hwaddr addr1;
}
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, hwaddr len)
+ MemTxAttrs attrs, void *buf, hwaddr len)
{
MemTxResult result = MEMTX_OK;
FlatView *fv;
MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len)
+ const void *buf, hwaddr len)
{
MemTxResult result = MEMTX_OK;
FlatView *fv;
}
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, hwaddr len, bool is_write)
+ void *buf, hwaddr len, bool is_write)
{
if (is_write) {
return address_space_write(as, addr, attrs, buf, len);
}
}
-void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
- hwaddr len, int is_write)
+void cpu_physical_memory_rw(hwaddr addr, void *buf,
+ hwaddr len, bool is_write)
{
address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
buf, len, is_write);
static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf,
+ const void *ptr,
hwaddr len,
enum write_rom_type type)
{
hwaddr l;
- uint8_t *ptr;
+ uint8_t *ram_ptr;
hwaddr addr1;
MemoryRegion *mr;
+ const uint8_t *buf = ptr;
RCU_READ_LOCK_GUARD();
while (len > 0) {
l = memory_access_size(mr, l, addr1);
} else {
/* ROM/RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
+ ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
- memcpy(ptr, buf, l);
+ memcpy(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
break;
case FLUSH_CACHE:
- flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
+ flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l);
break;
}
}
/* used for ROM loading : can write in RAM and ROM */
MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, hwaddr len)
+ const void *buf, hwaddr len)
{
return address_space_write_rom_internal(as, addr, attrs,
buf, len, WRITE_DATA);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
+ *plen = 0;
return NULL;
}
/* Avoid unbounded allocations */
}
/* Unmaps a memory region previously mapped by address_space_map().
- * Will also mark the memory as dirty if is_write == 1. access_len gives
+ * Will also mark the memory as dirty if is_write is true. access_len gives
* the amount of memory that was actually read or written by the caller.
*/
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
- int is_write, hwaddr access_len)
+ bool is_write, hwaddr access_len)
{
if (buffer != bounce.buffer) {
MemoryRegion *mr;
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
- int is_write)
+ bool is_write)
{
return address_space_map(&address_space_memory, addr, plen, is_write,
MEMTXATTRS_UNSPECIFIED);
}
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
- int is_write, hwaddr access_len)
+ bool is_write, hwaddr access_len)
{
return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
}
/* Called from RCU critical section. address_space_read_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
MEMTXATTRS_UNSPECIFIED);
- flatview_read_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_read_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
/* Called from RCU critical section. address_space_write_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
MEMTXATTRS_UNSPECIFIED);
- flatview_write_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_write_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
#define ARG1_DECL MemoryRegionCache *cache
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- uint8_t *buf, target_ulong len, int is_write)
+ void *ptr, target_ulong len, bool is_write)
{
hwaddr phys_addr;
target_ulong l, page;
+ uint8_t *buf = ptr;
cpu_synchronize_state(cpu);
while (len > 0) {
int asidx;
MemTxAttrs attrs;
+ MemTxResult res;
page = addr & TARGET_PAGE_MASK;
phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write) {
- address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l);
+ res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
} else {
- address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l, 0);
+ res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
+ }
+ if (res != MEMTX_OK) {
+ return -1;
}
len -= l;
buf += l;
uint8_t *host_startaddr = rb->host + start;
- if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
+ if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
error_report("ram_block_discard_range: Unaligned start address: %p",
host_startaddr);
goto err;
if ((start + length) <= rb->used_length) {
bool need_madvise, need_fallocate;
- uint8_t *host_endaddr = host_startaddr + length;
- if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
- error_report("ram_block_discard_range: Unaligned end address: %p",
- host_endaddr);
+ if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
+ error_report("ram_block_discard_range: Unaligned length: %zx",
+ length);
goto err;
}