MemoryRegion io_mem_rom, io_mem_notdirty;
static MemoryRegion io_mem_unassigned;
-
-/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
-#define RAM_PREALLOC (1 << 0)
-
-/* RAM is mmap-ed with MAP_SHARED */
-#define RAM_SHARED (1 << 1)
-
-/* Only a portion of RAM (used_length) is actually used, and migrated.
- * This used_length size can change across reboots.
- */
-#define RAM_RESIZEABLE (1 << 2)
-
-/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
- * zero the page and wake waiting processes.
- * (Set during postcopy)
- */
-#define RAM_UF_ZEROPAGE (1 << 3)
-
-/* RAM can be migrated */
-#define RAM_MIGRATABLE (1 << 4)
#endif
#ifdef TARGET_PAGE_BITS_VARY
}
}
-bool memory_region_is_unassigned(MemoryRegion *mr)
-{
- return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
- && mr != &io_mem_watch;
-}
-
/* Called from RCU critical section */
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
}
#if defined(CONFIG_USER_ONLY)
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
+void tb_invalidate_phys_addr(target_ulong addr)
{
mmap_lock();
- tb_lock();
- tb_invalidate_phys_page_range(pc, pc + 1, 0);
- tb_unlock();
+ tb_invalidate_phys_page_range(addr, addr + 1, 0);
mmap_unlock();
}
+
+static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
+{
+ tb_invalidate_phys_addr(pc);
+}
#else
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
+{
+ ram_addr_t ram_addr;
+ MemoryRegion *mr;
+ hwaddr l = 1;
+
+ if (!tcg_enabled()) {
+ return;
+ }
+
+ rcu_read_lock();
+ mr = address_space_translate(as, addr, &addr, &l, false, attrs);
+ if (!(memory_region_is_ram(mr)
+ || memory_region_is_romd(mr))) {
+ rcu_read_unlock();
+ return;
+ }
+ ram_addr = memory_region_get_ram_addr(mr) + addr;
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
+ rcu_read_unlock();
+}
+
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
MemTxAttrs attrs;
RAMBlock *block;
ram_addr_t end;
+ assert(tcg_enabled());
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
}
#endif
-#ifdef __linux__
+#ifdef CONFIG_POSIX
static int64_t get_file_size(int fd)
{
int64_t size = lseek(fd, 0, SEEK_END);
" must be multiples of page size 0x%zx",
block->mr->align, block->page_size);
return NULL;
+ } else if (block->mr->align && !is_power_of_2(block->mr->align)) {
+ error_setg(errp, "alignment 0x%" PRIx64
+ " must be a power of two", block->mr->align);
+ return NULL;
}
block->mr->align = MAX(block->page_size, block->mr->align);
#if defined(__s390x__)
return offset;
}
-unsigned long last_ram_page(void)
+static unsigned long last_ram_page(void)
{
RAMBlock *block;
ram_addr_t last = 0;
}
}
-#ifdef __linux__
+#ifdef CONFIG_POSIX
RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
- bool share, int fd,
+ uint32_t ram_flags, int fd,
Error **errp)
{
RAMBlock *new_block;
Error *local_err = NULL;
int64_t file_size;
+ /* Just support these ram flags by now. */
+ assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0);
+
if (xen_enabled()) {
error_setg(errp, "-mem-path not supported with Xen");
return NULL;
new_block->mr = mr;
new_block->used_length = size;
new_block->max_length = size;
- new_block->flags = share ? RAM_SHARED : 0;
+ new_block->flags = ram_flags;
new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
if (!new_block->host) {
g_free(new_block);
return NULL;
}
- ram_block_add(new_block, &local_err, share);
+ ram_block_add(new_block, &local_err, ram_flags & RAM_SHARED);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
- bool share, const char *mem_path,
+ uint32_t ram_flags, const char *mem_path,
Error **errp)
{
int fd;
return NULL;
}
- block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
+ block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, errp);
if (!block) {
if (created) {
unlink(mem_path);
ndi->ram_addr = ram_addr;
ndi->mem_vaddr = mem_vaddr;
ndi->size = size;
- ndi->locked = false;
+ ndi->pages = NULL;
assert(tcg_enabled());
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- ndi->locked = true;
- tb_lock();
- tb_invalidate_phys_page_fast(ram_addr, size);
+ ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
+ tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
}
}
/* Called within RCU critical section. */
void memory_notdirty_write_complete(NotDirtyInfo *ndi)
{
- if (ndi->locked) {
- tb_unlock();
+ if (ndi->pages) {
+ assert(tcg_enabled());
+ page_collection_unlock(ndi->pages);
+ ndi->pages = NULL;
}
/* Set both VGA and migration bits for simplicity and to remove
}
cpu->watchpoint_hit = wp;
- /* Both tb_lock and iothread_mutex will be reset when
- * cpu_loop_exit or cpu_loop_exit_noexc longjmp
- * back into the cpu_exec main loop.
- */
- tb_lock();
+ mmap_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
+ mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | curr_cflags();
+ mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
}
CPUAddressSpace *cpuas;
AddressSpaceDispatch *d;
+ assert(tcg_enabled());
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
- tb_lock();
tb_invalidate_phys_range(addr, addr + length);
- tb_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
#define ARG1 as
#define SUFFIX
#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
-#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
-#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
-#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
#define RCU_READ_LOCK(...) rcu_read_lock()
#define RCU_READ_UNLOCK(...) rcu_read_unlock()
#include "memory_ldst.inc.c"
#define ARG1 cache
#define SUFFIX _cached_slow
#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
-#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
-#define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat))
-#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
#define RCU_READ_LOCK() ((void)0)
#define RCU_READ_UNLOCK() ((void)0)
#include "memory_ldst.inc.c"
return ret;
}
+bool ramblock_is_pmem(RAMBlock *rb)
+{
+ return rb->flags & RAM_PMEM;
+}
+
#endif
void page_size_init(void)