* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#ifndef _WIN32
-#include <sys/types.h>
#include <sys/mman.h>
#endif
-#include "qemu-common.h"
+#include "qemu/cutils.h"
#include "cpu.h"
#include "tcg.h"
#include "hw/hw.h"
#include "hw/boards.h"
#endif
#include "hw/qdev.h"
-#include "qemu/osdep.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "hw/xen/xen.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
+#include "exec/log.h"
#include "qemu/range.h"
#ifndef _WIN32
struct AddressSpaceDispatch {
struct rcu_head rcu;
+ MemoryRegionSection *mru_section;
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
}
}
+static inline bool section_covers_addr(const MemoryRegionSection *section,
+ hwaddr addr)
+{
+ /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
+ * the section must cover the entire address space.
+ */
+ return section->size.hi ||
+ range_covers_byte(section->offset_within_address_space,
+ section->size.lo, addr);
+}
+
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Node *nodes, MemoryRegionSection *sections)
{
lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
}
- if (sections[lp.ptr].size.hi ||
- range_covers_byte(sections[lp.ptr].offset_within_address_space,
- sections[lp.ptr].size.lo, addr)) {
+ if (section_covers_addr(§ions[lp.ptr], addr)) {
return §ions[lp.ptr];
} else {
return §ions[PHYS_SECTION_UNASSIGNED];
hwaddr addr,
bool resolve_subpage)
{
- MemoryRegionSection *section;
+ MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage;
+ bool update;
- section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
+ if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
+ section_covers_addr(section, addr)) {
+ update = false;
+ } else {
+ section = phys_page_find(d->phys_map, addr, d->map.nodes,
+ d->map.sections);
+ update = true;
+ }
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
+ if (update) {
+ atomic_set(&d->mru_section, section);
+ }
return section;
}
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
}
-#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
- register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
- cpu_save, cpu_load, cpu->env_ptr);
- assert(cc->vmsd == NULL);
- assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
-#endif
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
}
ram_addr_t length,
unsigned client)
{
+ DirtyMemoryBlocks *blocks;
unsigned long end, page;
- bool dirty;
+ bool dirty = false;
if (length == 0) {
return false;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
- page, end - page);
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
+ offset, num);
+ page += num;
+ }
+
+ rcu_read_unlock();
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
#ifdef __linux__
-
-#include <sys/vfs.h>
-
-#define HUGETLBFS_MAGIC 0x958458f6
-
-static long gethugepagesize(const char *path, Error **errp)
-{
- struct statfs fs;
- int ret;
-
- do {
- ret = statfs(path, &fs);
- } while (ret != 0 && errno == EINTR);
-
- if (ret != 0) {
- error_setg_errno(errp, errno, "failed to get page size of file %s",
- path);
- return 0;
- }
-
- return fs.f_bsize;
-}
-
static void *file_ram_alloc(RAMBlock *block,
ram_addr_t memory,
const char *path,
Error **errp)
{
- struct stat st;
+ bool unlink_on_error = false;
char *filename;
char *sanitized_name;
char *c;
void *area;
int fd;
- uint64_t hpagesize;
- Error *local_err = NULL;
-
- hpagesize = gethugepagesize(path, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- goto error;
- }
- block->mr->align = hpagesize;
-
- if (memory < hpagesize) {
- error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than huge page size 0x%" PRIx64,
- memory, hpagesize);
- goto error;
- }
+ int64_t page_size;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
"host lacks kvm mmu notifiers, -mem-path unsupported");
- goto error;
+ return NULL;
}
- if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
- /* Make name safe to use with mkstemp by replacing '/' with '_'. */
- sanitized_name = g_strdup(memory_region_name(block->mr));
- for (c = sanitized_name; *c != '\0'; c++) {
- if (*c == '/') {
- *c = '_';
- }
+ for (;;) {
+ fd = open(path, O_RDWR);
+ if (fd >= 0) {
+ /* @path names an existing file, use it */
+ break;
}
+ if (errno == ENOENT) {
+ /* @path names a file that doesn't exist, create it */
+ fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
+ if (fd >= 0) {
+ unlink_on_error = true;
+ break;
+ }
+ } else if (errno == EISDIR) {
+ /* @path names a directory, create a file there */
+ /* Make name safe to use with mkstemp by replacing '/' with '_'. */
+ sanitized_name = g_strdup(memory_region_name(block->mr));
+ for (c = sanitized_name; *c != '\0'; c++) {
+ if (*c == '/') {
+ *c = '_';
+ }
+ }
- filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
- sanitized_name);
- g_free(sanitized_name);
+ filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
+ sanitized_name);
+ g_free(sanitized_name);
- fd = mkstemp(filename);
- if (fd >= 0) {
- unlink(filename);
+ fd = mkstemp(filename);
+ if (fd >= 0) {
+ unlink(filename);
+ g_free(filename);
+ break;
+ }
+ g_free(filename);
}
- g_free(filename);
- } else {
- fd = open(path, O_RDWR | O_CREAT, 0644);
+ if (errno != EEXIST && errno != EINTR) {
+ error_setg_errno(errp, errno,
+ "can't open backing store %s for guest RAM",
+ path);
+ goto error;
+ }
+ /*
+ * Try again on EINTR and EEXIST. The latter happens when
+ * something else creates the file between our two open().
+ */
}
- if (fd < 0) {
- error_setg_errno(errp, errno,
- "unable to create backing store for hugepages");
+ page_size = qemu_fd_getpagesize(fd);
+ block->mr->align = page_size;
+
+ if (memory < page_size) {
+ error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
+ "or larger than page size 0x%" PRIx64,
+ memory, page_size);
goto error;
}
- memory = ROUND_UP(memory, hpagesize);
+ memory = ROUND_UP(memory, page_size);
/*
* ftruncate is not supported by hugetlbfs in older
perror("ftruncate");
}
- area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
+ area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
- "unable to map backing store for hugepages");
+ "unable to map backing store for guest RAM");
close(fd);
goto error;
}
return area;
error:
+ if (unlink_on_error) {
+ unlink(path);
+ }
+ close(fd);
return NULL;
}
#endif
return 0;
}
-static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
+/* Called with ram_list.mutex held */
+static void dirty_memory_extend(ram_addr_t old_ram_size,
+ ram_addr_t new_ram_size)
+{
+ ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ int i;
+
+ /* Only need to extend if block count increased */
+ if (new_num_blocks <= old_num_blocks) {
+ return;
+ }
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ DirtyMemoryBlocks *old_blocks;
+ DirtyMemoryBlocks *new_blocks;
+ int j;
+
+ old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ new_blocks = g_malloc(sizeof(*new_blocks) +
+ sizeof(new_blocks->blocks[0]) * new_num_blocks);
+
+ if (old_num_blocks) {
+ memcpy(new_blocks->blocks, old_blocks->blocks,
+ old_num_blocks * sizeof(old_blocks->blocks[0]));
+ }
+
+ for (j = old_num_blocks; j < new_num_blocks; j++) {
+ new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
+ }
+
+ atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+
+ if (old_blocks) {
+ g_free_rcu(old_blocks, rcu);
+ }
+ }
+}
+
+static void ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
RAMBlock *last_block = NULL;
ram_addr_t old_ram_size, new_ram_size;
+ Error *err = NULL;
old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
if (!new_block->host) {
if (xen_enabled()) {
xen_ram_alloc(new_block->offset, new_block->max_length,
- new_block->mr);
+ new_block->mr, &err);
+ if (err) {
+ error_propagate(errp, err);
+ qemu_mutex_unlock_ramlist();
+ return;
+ }
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
&new_block->mr->align);
"cannot set up guest memory '%s'",
memory_region_name(new_block->mr));
qemu_mutex_unlock_ramlist();
- return -1;
+ return;
}
memory_try_enable_merging(new_block->host, new_block->max_length);
}
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
if (new_ram_size > old_ram_size) {
migration_bitmap_extend(old_ram_size, new_ram_size);
+ dirty_memory_extend(old_ram_size, new_ram_size);
}
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
ram_list.version++;
qemu_mutex_unlock_ramlist();
- new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
-
- if (new_ram_size > old_ram_size) {
- int i;
-
- /* ram_list.dirty_memory[] is protected by the iothread lock. */
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- ram_list.dirty_memory[i] =
- bitmap_zero_extend(ram_list.dirty_memory[i],
- old_ram_size, new_ram_size);
- }
- }
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
kvm_setup_guest_memory(new_block->host, new_block->max_length);
}
}
-
- return new_block->offset;
}
#ifdef __linux__
-ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
- bool share, const char *mem_path,
- Error **errp)
+RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
+ bool share, const char *mem_path,
+ Error **errp)
{
RAMBlock *new_block;
- ram_addr_t addr;
Error *local_err = NULL;
if (xen_enabled()) {
error_setg(errp, "-mem-path not supported with Xen");
- return -1;
+ return NULL;
}
if (phys_mem_alloc != qemu_anon_ram_alloc) {
*/
error_setg(errp,
"-mem-path not supported with this accelerator");
- return -1;
+ return NULL;
}
size = HOST_PAGE_ALIGN(size);
mem_path, errp);
if (!new_block->host) {
g_free(new_block);
- return -1;
+ return NULL;
}
- addr = ram_block_add(new_block, &local_err);
+ ram_block_add(new_block, &local_err);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
- return -1;
+ return NULL;
}
- return addr;
+ return new_block;
}
#endif
static
-ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
- void (*resized)(const char*,
- uint64_t length,
- void *host),
- void *host, bool resizeable,
- MemoryRegion *mr, Error **errp)
+RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ void *host, bool resizeable,
+ MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
- ram_addr_t addr;
Error *local_err = NULL;
size = HOST_PAGE_ALIGN(size);
if (resizeable) {
new_block->flags |= RAM_RESIZEABLE;
}
- addr = ram_block_add(new_block, &local_err);
+ ram_block_add(new_block, &local_err);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
- return -1;
+ return NULL;
}
- return addr;
+ return new_block;
}
-ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp)
{
return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
}
-ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
+RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
{
return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
}
-ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
+RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
void (*resized)(const char*,
uint64_t length,
void *host),
g_free(block);
}
-void qemu_ram_free(ram_addr_t addr)
+void qemu_ram_free(RAMBlock *block)
{
- RAMBlock *block;
-
qemu_mutex_lock_ramlist();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
- if (addr == block->offset) {
- QLIST_REMOVE_RCU(block, next);
- ram_list.mru_block = NULL;
- /* Write list before version */
- smp_wmb();
- ram_list.version++;
- call_rcu(block, reclaim_ramblock, rcu);
- break;
- }
- }
+ QLIST_REMOVE_RCU(block, next);
+ ram_list.mru_block = NULL;
+ /* Write list before version */
+ smp_wmb();
+ ram_list.version++;
+ call_rcu(block, reclaim_ramblock, rcu);
qemu_mutex_unlock_ramlist();
}
*
* Called within RCU critical section.
*/
-void *qemu_get_ram_ptr(ram_addr_t addr)
+void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
{
- RAMBlock *block = qemu_get_ram_block(addr);
+ RAMBlock *block = ram_block;
+
+ if (block == NULL) {
+ block = qemu_get_ram_block(addr);
+ }
if (xen_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
*
* Called within RCU critical section.
*/
-static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
+static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
+ hwaddr *size)
{
- RAMBlock *block;
+ RAMBlock *block = ram_block;
ram_addr_t offset_inside_block;
if (*size == 0) {
return NULL;
}
- block = qemu_get_ram_block(addr);
+ if (block == NULL) {
+ block = qemu_get_ram_block(addr);
+ }
offset_inside_block = addr - block->offset;
*size = MIN(*size, block->max_length - offset_inside_block);
}
switch (size) {
case 1:
- stb_p(qemu_get_ram_ptr(ram_addr), val);
+ stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
case 2:
- stw_p(qemu_get_ram_ptr(ram_addr), val);
+ stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
case 4:
- stl_p(qemu_get_ram_ptr(ram_addr), val);
+ stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
default:
abort();
static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
{
CPUState *cpu = current_cpu;
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
wp->hitaddr = vaddr;
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
+ if (wp->flags & BP_CPU &&
+ !cc->debug_check_watchpoint(cpu, wp)) {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
+ continue;
+ }
cpu->watchpoint_hit = wp;
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
} else {
addr1 += memory_region_get_ram_addr(mr);
/* RAM case */
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
memcpy(ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ memory_region_get_ram_addr(mr) + addr1);
memcpy(buf, ptr, l);
}
} else {
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
memcpy(ptr, buf, l);
memory_region_ref(mr);
*plen = done;
- ptr = qemu_ram_ptr_length(raddr + base, plen);
+ ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
rcu_read_unlock();
return ptr;
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
stl_p(ptr, val);
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stl_le_p(ptr, val);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stw_le_p(ptr, val);