#include "exec/ram_addr.h"
#include "exec/log.h"
+#include "qemu/pmem.h"
+
#include "migration/vmstate.h"
#include "qemu/range.h"
AddressSpace address_space_io;
AddressSpace address_space_memory;
-MemoryRegion io_mem_rom, io_mem_notdirty;
static MemoryRegion io_mem_unassigned;
#endif
-#ifdef TARGET_PAGE_BITS_VARY
-int target_page_bits;
-bool target_page_bits_decided;
-#endif
-
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
/* current CPU in the current thread. It is only valid inside
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
-bool set_preferred_target_page_bits(int bits)
-{
- /* The target page size is the lowest common denominator for all
- * the CPUs in the system, so we can only make it smaller, never
- * larger. And we can't make it smaller once we've committed to
- * a particular size.
- */
-#ifdef TARGET_PAGE_BITS_VARY
- assert(bits >= TARGET_PAGE_BITS_MIN);
- if (target_page_bits == 0 || target_page_bits > bits) {
- if (target_page_bits_decided) {
- return false;
- }
- target_page_bits = bits;
- }
-#endif
- return true;
-}
-
#if !defined(CONFIG_USER_ONLY)
-static void finalize_target_page_bits(void)
-{
-#ifdef TARGET_PAGE_BITS_VARY
- if (target_page_bits == 0) {
- target_page_bits = TARGET_PAGE_BITS_MIN;
- }
- target_page_bits_decided = true;
-#endif
-}
-
typedef struct PhysPageEntry PhysPageEntry;
struct PhysPageEntry {
} subpage_t;
#define PHYS_SECTION_UNASSIGNED 0
-#define PHYS_SECTION_NOTDIRTY 1
-#define PHYS_SECTION_ROM 2
static void io_mem_init(void);
static void memory_map_init(void);
{
static unsigned alloc_hint = 16;
if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
- map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
- map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
+ map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes);
map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
alloc_hint = map->nodes_nb_alloc;
}
}
static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
- hwaddr *index, hwaddr *nb, uint16_t leaf,
+ hwaddr *index, uint64_t *nb, uint16_t leaf,
int level)
{
PhysPageEntry *p;
}
static void phys_page_set(AddressSpaceDispatch *d,
- hwaddr index, hwaddr nb,
+ hwaddr index, uint64_t nb,
uint16_t leaf)
{
/* Wildly overreserve - it doesn't matter much. */
assert(valid_ptr < P_L2_SIZE);
/* Don't compress if it won't fit in the # of bits we have. */
- if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
+ if (P_L2_LEVELS >= (1 << 6) &&
+ lp->skip + p[valid_ptr].skip >= (1 << 6)) {
return;
}
*/
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
TCGIOMMUNotifier *notifier;
- int i;
+ Error *err = NULL;
+ int i, ret;
for (i = 0; i < cpu->iommu_notifiers->len; i++) {
notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
0,
HWADDR_MAX,
iommu_idx);
- memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n);
+ ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n,
+ &err);
+ if (ret) {
+ error_report_err(err);
+ exit(1);
+ }
}
if (!notifier->active) {
}
tlb_init(cpu);
+ qemu_plugin_vcpu_init_hook(cpu);
+
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
void tb_invalidate_phys_addr(target_ulong addr)
{
mmap_lock();
- tb_invalidate_phys_page_range(addr, addr + 1, 0);
+ tb_invalidate_phys_page_range(addr, addr + 1);
mmap_unlock();
}
return;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
mr = address_space_translate(as, addr, &addr, &l, false, attrs);
if (!(memory_region_is_ram(mr)
|| memory_region_is_romd(mr))) {
- rcu_read_unlock();
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
- rcu_read_unlock();
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
}
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
- qemu_log_lock();
+ FILE *logfile = qemu_log_lock();
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
qemu_log_flush();
- qemu_log_unlock();
+ qemu_log_unlock(logfile);
qemu_log_close();
}
va_end(ap2);
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
block = qemu_get_ram_block(start);
assert(block == qemu_get_ram_block(end - 1));
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
CPU_FOREACH(cpu) {
tlb_reset_dirty(cpu, start1, length);
}
- rcu_read_unlock();
}
/* Note: start and end must be within the same ram block. */
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ ramblock = qemu_get_ram_block(start);
+ /* Range sanity check on the ramblock */
+ assert(start >= ramblock->offset &&
+ start + length <= ramblock->offset + ramblock->used_length);
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
- ramblock = qemu_get_ram_block(start);
- /* Range sanity check on the ramblock */
- assert(start >= ramblock->offset &&
- start + length <= ramblock->offset + ramblock->used_length);
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page,
+ DIRTY_MEMORY_BLOCK_SIZE - offset);
- while (page < end) {
- unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+ dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
+ offset, num);
+ page += num;
+ }
- dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
- offset, num);
- page += num;
+ mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
+ mr_size = (end - page) << TARGET_PAGE_BITS;
+ memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
}
- mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
- mr_size = (end - page) << TARGET_PAGE_BITS;
- memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
-
- rcu_read_unlock();
-
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
end = last >> TARGET_PAGE_BITS;
dest = 0;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page,
+ DIRTY_MEMORY_BLOCK_SIZE - offset);
- while (page < end) {
- unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
-
- assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
- assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
- offset >>= BITS_PER_LEVEL;
+ assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
+ assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
+ offset >>= BITS_PER_LEVEL;
- bitmap_copy_and_clear_atomic(snap->dirty + dest,
- blocks->blocks[idx] + offset,
- num);
- page += num;
- dest += num >> BITS_PER_LEVEL;
+ bitmap_copy_and_clear_atomic(snap->dirty + dest,
+ blocks->blocks[idx] + offset,
+ num);
+ page += num;
+ dest += num >> BITS_PER_LEVEL;
+ }
}
- rcu_read_unlock();
-
if (tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
/* Called from RCU critical section */
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
- MemoryRegionSection *section,
- target_ulong vaddr,
- hwaddr paddr, hwaddr xlat,
- int prot,
- target_ulong *address)
-{
- hwaddr iotlb;
-
- if (memory_region_is_ram(section->mr)) {
- /* Normal RAM. */
- iotlb = memory_region_get_ram_addr(section->mr) + xlat;
- if (!section->readonly) {
- iotlb |= PHYS_SECTION_NOTDIRTY;
- } else {
- iotlb |= PHYS_SECTION_ROM;
- }
- } else {
- AddressSpaceDispatch *d;
-
- d = flatview_to_dispatch(section->fv);
- iotlb = section - d->map.sections;
- iotlb += xlat;
- }
-
- return iotlb;
+ MemoryRegionSection *section)
+{
+ AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
+ return section - d->map.sections;
}
#endif /* defined(CONFIG_USER_ONLY) */
#if !defined(CONFIG_USER_ONLY)
-static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- uint16_t section);
+static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
+ uint16_t section);
static subpage_t *subpage_init(FlatView *fv, hwaddr base);
static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) =
RAMBlock *block;
char *psize;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
"Block Name", "PSize", "Offset", "Used", "Total");
RAMBLOCK_FOREACH(block) {
(uint64_t)block->max_length);
g_free(psize);
}
- rcu_read_unlock();
}
#ifdef __linux__
#else
long qemu_minrampagesize(void)
{
- return getpagesize();
+ return qemu_real_host_page_size;
}
long qemu_maxrampagesize(void)
{
- return getpagesize();
+ return qemu_real_host_page_size;
}
#endif
#ifdef CONFIG_POSIX
static int64_t get_file_size(int fd)
{
- int64_t size = lseek(fd, 0, SEEK_END);
+ int64_t size;
+#if defined(__linux__)
+ struct stat st;
+
+ if (fstat(fd, &st) < 0) {
+ return -errno;
+ }
+
+ /* Special handling for devdax character devices */
+ if (S_ISCHR(st.st_mode)) {
+ g_autofree char *subsystem_path = NULL;
+ g_autofree char *subsystem = NULL;
+
+ subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
+ major(st.st_rdev), minor(st.st_rdev));
+ subsystem = g_file_read_link(subsystem_path, NULL);
+
+ if (subsystem && g_str_has_suffix(subsystem, "/dax")) {
+ g_autofree char *size_path = NULL;
+ g_autofree char *size_str = NULL;
+
+ size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
+ major(st.st_rdev), minor(st.st_rdev));
+
+ if (g_file_get_contents(size_path, &size_str, NULL, NULL)) {
+ return g_ascii_strtoll(size_str, NULL, 0);
+ }
+ }
+ }
+#endif /* defined(__linux__) */
+
+ /* st.st_size may be zero for special files yet lseek(2) works */
+ size = lseek(fd, 0, SEEK_END);
if (size < 0) {
return -errno;
}
bool truncate,
Error **errp)
{
+ Error *err = NULL;
MachineState *ms = MACHINE(qdev_get_machine());
void *area;
}
if (mem_prealloc) {
- os_mem_prealloc(fd, area, memory, ms->smp.cpus, errp);
- if (errp && *errp) {
+ os_mem_prealloc(fd, area, memory, ms->smp.cpus, &err);
+ if (err) {
+ error_propagate(errp, err);
qemu_ram_munmap(fd, area, memory);
return NULL;
}
RAMBlock *block;
ram_addr_t last = 0;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH(block) {
last = MAX(last, block->offset + block->max_length);
}
- rcu_read_unlock();
return last >> TARGET_PAGE_BITS;
}
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH(block) {
if (block != new_block &&
!strcmp(block->idstr, new_block->idstr)) {
abort();
}
}
- rcu_read_unlock();
}
/* Called with iothread lock held. */
return 0;
}
+/*
+ * Trigger sync on the given ram block for range [start, start + length]
+ * with the backing store if one is available.
+ * Otherwise no-op.
+ * @Note: this is supposed to be a synchronous op.
+ */
+void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length)
+{
+ void *addr = ramblock_ptr(block, start);
+
+ /* The requested range should fit in within the block range */
+ g_assert((start + length) <= block->used_length);
+
+#ifdef CONFIG_LIBPMEM
+ /* The lack of support for pmem should not block the sync */
+ if (ramblock_is_pmem(block)) {
+ pmem_persist(addr, length);
+ return;
+ }
+#endif
+ if (block->fd >= 0) {
+ /**
+ * Case there is no support for PMEM or the memory has not been
+ * specified as persistent (or is not one) - use the msync.
+ * Less optimal but still achieves the same goal
+ */
+ if (qemu_msync(addr, length, block->fd)) {
+ warn_report("%s: failed to sync memory range: start: "
+ RAM_ADDR_FMT " length: " RAM_ADDR_FMT,
+ __func__, start, length);
+ }
+ }
+}
+
/* Called with ram_list.mutex held */
static void dirty_memory_extend(ram_addr_t old_ram_size,
ram_addr_t new_ram_size)
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
- new_block->page_size = getpagesize();
+ new_block->page_size = qemu_real_host_page_size;
new_block->host = host;
if (host) {
new_block->flags |= RAM_PREALLOC;
if (xen_enabled()) {
ram_addr_t ram_addr;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
ram_addr = xen_ram_addr_from_mapcache(ptr);
block = qemu_get_ram_block(ram_addr);
if (block) {
*offset = ram_addr - block->offset;
}
- rcu_read_unlock();
return block;
}
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
block = atomic_rcu_read(&ram_list.mru_block);
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
}
- rcu_read_unlock();
return NULL;
found:
if (round_offset) {
*offset &= TARGET_PAGE_MASK;
}
- rcu_read_unlock();
return block;
}
return block->offset + offset;
}
-/* Called within RCU critical section. */
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
- CPUState *cpu,
- vaddr mem_vaddr,
- ram_addr_t ram_addr,
- unsigned size)
-{
- ndi->cpu = cpu;
- ndi->ram_addr = ram_addr;
- ndi->mem_vaddr = mem_vaddr;
- ndi->size = size;
- ndi->pages = NULL;
-
- assert(tcg_enabled());
- if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
- tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
- }
-}
-
-/* Called within RCU critical section. */
-void memory_notdirty_write_complete(NotDirtyInfo *ndi)
-{
- if (ndi->pages) {
- assert(tcg_enabled());
- page_collection_unlock(ndi->pages);
- ndi->pages = NULL;
- }
-
- /* Set both VGA and migration bits for simplicity and to remove
- * the notdirty callback faster.
- */
- cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
- DIRTY_CLIENTS_NOCODE);
- /* we remove the notdirty callback only if the code has been
- flushed */
- if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
- tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
- }
-}
-
-/* Called within RCU critical section. */
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
- uint64_t val, unsigned size)
-{
- NotDirtyInfo ndi;
-
- memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
- ram_addr, size);
-
- stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
- memory_notdirty_write_complete(&ndi);
-}
-
-static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs)
-{
- return is_write;
-}
-
-static const MemoryRegionOps notdirty_mem_ops = {
- .write = notdirty_mem_write,
- .valid.accepts = notdirty_mem_accepts,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 8,
- .unaligned = false,
- },
- .impl = {
- .min_access_size = 1,
- .max_access_size = 8,
- .unaligned = false,
- },
-};
-
/* Generate a debug exception if a watchpoint has been hit. */
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra)
cpu->watchpoint_hit = wp;
mmap_lock();
- tb_check_watchpoint(cpu);
+ tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
mmap_unlock();
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- uint16_t section)
+static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
+ uint16_t section)
{
int idx, eidx;
{
subpage_t *mmio;
+ /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
mmio->fv = fv;
mmio->base = base;
printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
mmio, base, TARGET_PAGE_SIZE);
#endif
- subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
return mmio;
}
return phys_section_add(map, §ion);
}
-static void readonly_mem_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
- /* Ignore any write to ROM. */
-}
-
-static bool readonly_mem_accepts(void *opaque, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs)
-{
- return is_write;
-}
-
-/* This will only be used for writes, because reads are special cased
- * to directly access the underlying host ram.
- */
-static const MemoryRegionOps readonly_mem_ops = {
- .write = readonly_mem_write,
- .valid.accepts = readonly_mem_accepts,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 8,
- .unaligned = false,
- },
- .impl = {
- .min_access_size = 1,
- .max_access_size = 8,
- .unaligned = false,
- },
-};
-
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs)
{
static void io_mem_init(void)
{
- memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
- NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX);
-
- /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
- * which can be called without the iothread mutex.
- */
- memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
- NULL, UINT64_MAX);
- memory_region_clear_global_locking(&io_mem_notdirty);
}
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
n = dummy_section(&d->map, fv, &io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
- n = dummy_section(&d->map, fv, &io_mem_notdirty);
- assert(n == PHYS_SECTION_NOTDIRTY);
- n = dummy_section(&d->map, fv, &io_mem_rom);
- assert(n == PHYS_SECTION_ROM);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
* by pushing the migration thread's memory read after the vCPU thread has
* written the memory.
*/
- cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
- run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
+ if (replay_mode == REPLAY_MODE_NONE) {
+ /*
+ * VGA can make calls to this function while updating the screen.
+ * In record/replay mode this causes a deadlock, because
+ * run_on_cpu waits for rr mutex. Therefore no races are possible
+ * in this case and no need for making run_on_cpu when
+ * record/replay is not enabled.
+ */
+ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
+ run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
+ }
}
static void tcg_commit(MemoryListener *listener)
FlatView *fv;
if (len > 0) {
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
result = flatview_read(fv, addr, attrs, buf, len);
- rcu_read_unlock();
}
return result;
FlatView *fv;
if (len > 0) {
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
result = flatview_write(fv, addr, attrs, buf, len);
- rcu_read_unlock();
}
return result;
hwaddr addr1;
MemoryRegion *mr;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
buf += l;
addr += l;
}
- rcu_read_unlock();
return MEMTX_OK;
}
FlatView *fv;
bool result;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
result = flatview_access_valid(fv, addr, len, is_write, attrs);
- rcu_read_unlock();
return result;
}
}
l = len;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
fv = address_space_to_flatview(as);
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
- rcu_read_unlock();
return NULL;
}
/* Avoid unbounded allocations */
bounce.buffer, l);
}
- rcu_read_unlock();
*plen = l;
return bounce.buffer;
}
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
l, is_write, attrs);
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
- rcu_read_unlock();
return ptr;
}
hwaddr l = 1;
bool res;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
mr = address_space_translate(&address_space_memory,
phys_addr, &phys_addr, &l, false,
MEMTXATTRS_UNSPECIFIED);
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
- rcu_read_unlock();
return res;
}
RAMBlock *block;
int ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH(block) {
ret = func(block, opaque);
if (ret) {
break;
}
}
- rcu_read_unlock();
return ret;
}
if ((start + length) <= rb->used_length) {
bool need_madvise, need_fallocate;
- uint8_t *host_endaddr = host_startaddr + length;
- if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
- error_report("ram_block_discard_range: Unaligned end address: %p",
- host_endaddr);
+ if (length & (rb->page_size - 1)) {
+ error_report("ram_block_discard_range: Unaligned length: %zx",
+ length);
goto err;
}