#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
-#include "qemu/cache-utils.h"
#include "qemu/range.h"
break;
}
- iotlb = mr->iommu_ops->translate(mr, addr);
+ iotlb = mr->iommu_ops->translate(mr, addr, is_write);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
return 0;
}
+static int cpu_common_pre_load(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ cpu->exception_index = 0;
+
+ return 0;
+}
+
+static bool cpu_common_exception_index_needed(void *opaque)
+{
+ CPUState *cpu = opaque;
+
+ return cpu->exception_index != 0;
+}
+
+static const VMStateDescription vmstate_cpu_common_exception_index = {
+ .name = "cpu_common/exception_index",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(exception_index, CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_cpu_common = {
.name = "cpu_common",
.version_id = 1,
.minimum_version_id = 1,
+ .pre_load = cpu_common_pre_load,
.post_load = cpu_common_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(halted, CPUState),
VMSTATE_UINT32(interrupt_request, CPUState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (VMStateSubsection[]) {
+ {
+ .vmsd = &vmstate_cpu_common_exception_index,
+ .needed = cpu_common_exception_index_needed,
+ } , {
+ /* empty */
+ }
}
};
{
}
+int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
+ int flags)
+{
+ return -ENOSYS;
+}
+
+void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
+{
+}
+
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
- vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp;
- /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
- if ((len & (len - 1)) || (addr & ~len_mask) ||
- len == 0 || len > TARGET_PAGE_SIZE) {
+ /* forbid ranges which are empty or run off the end of the address space */
+ if (len == 0 || (addr + len - 1) < addr) {
error_report("tried to set invalid watchpoint at %"
VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
return -EINVAL;
wp = g_malloc(sizeof(*wp));
wp->vaddr = addr;
- wp->len_mask = len_mask;
+ wp->len = len;
wp->flags = flags;
/* keep all GDB-injected watchpoints in front */
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
{
- vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (addr == wp->vaddr && len_mask == wp->len_mask
+ if (addr == wp->vaddr && len == wp->len
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(cpu, wp);
return 0;
}
}
}
+
+/* Return true if this watchpoint address matches the specified
+ * access (ie the address range covered by the watchpoint overlaps
+ * partially or completely with the address range covered by the
+ * access).
+ */
+static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
+ vaddr addr,
+ vaddr len)
+{
+ /* We know the lengths are non-zero, but a little caution is
+ * required to avoid errors in the case where the range ends
+ * exactly at the top of the address space and so addr + len
+ * wraps round to zero.
+ */
+ vaddr wpend = wp->vaddr + wp->len - 1;
+ vaddr addrend = addr + len - 1;
+
+ return !(addr > wpend || wp->vaddr > addrend);
+}
+
#endif
/* Add a breakpoint. */
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
+ if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
iotlb = PHYS_SECTION_WATCH + paddr;
if (mr->subpage) {
subpage_t *subpage = container_of(mr, subpage_t, iomem);
- memory_region_destroy(&subpage->iomem);
+ object_unref(OBJECT(&subpage->iomem));
g_free(subpage);
}
}
#define HUGETLBFS_MAGIC 0x958458f6
-static long gethugepagesize(const char *path)
+static long gethugepagesize(const char *path, Error **errp)
{
struct statfs fs;
int ret;
} while (ret != 0 && errno == EINTR);
if (ret != 0) {
- perror(path);
+ error_setg_errno(errp, errno, "failed to get page size of file %s",
+ path);
return 0;
}
char *filename;
char *sanitized_name;
char *c;
- void *area;
+ void *area = NULL;
int fd;
- unsigned long hpagesize;
+ uint64_t hpagesize;
+ Error *local_err = NULL;
- hpagesize = gethugepagesize(path);
- if (!hpagesize) {
+ hpagesize = gethugepagesize(path, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
goto error;
}
if (memory < hpagesize) {
- return NULL;
+ error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
+ "or larger than huge page size 0x%" PRIx64,
+ memory, hpagesize);
+ goto error;
}
if (kvm_enabled() && !kvm_has_sync_mmu()) {
}
/* Make name safe to use with mkstemp by replacing '/' with '_'. */
- sanitized_name = g_strdup(block->mr->name);
+ sanitized_name = g_strdup(memory_region_name(block->mr));
for (c = sanitized_name; *c != '\0'; c++) {
if (*c == '/')
*c = '_';
error:
if (mem_prealloc) {
+ error_report("%s\n", error_get_pretty(*errp));
exit(1);
}
return NULL;
return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
}
-static ram_addr_t ram_block_add(RAMBlock *new_block)
+static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
ram_addr_t old_ram_size, new_ram_size;
} else {
new_block->host = phys_mem_alloc(new_block->length);
if (!new_block->host) {
- fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
- new_block->mr->name, strerror(errno));
- exit(1);
+ error_setg_errno(errp, errno,
+ "cannot set up guest memory '%s'",
+ memory_region_name(new_block->mr));
+ qemu_mutex_unlock_ramlist();
+ return -1;
}
memory_try_enable_merging(new_block->host, new_block->length);
}
Error **errp)
{
RAMBlock *new_block;
+ ram_addr_t addr;
+ Error *local_err = NULL;
if (xen_enabled()) {
error_setg(errp, "-mem-path not supported with Xen");
return -1;
}
- return ram_block_add(new_block);
+ addr = ram_block_add(new_block, &local_err);
+ if (local_err) {
+ g_free(new_block);
+ error_propagate(errp, local_err);
+ return -1;
+ }
+ return addr;
}
#endif
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
- MemoryRegion *mr)
+ MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
+ ram_addr_t addr;
+ Error *local_err = NULL;
size = TARGET_PAGE_ALIGN(size);
new_block = g_malloc0(sizeof(*new_block));
if (host) {
new_block->flags |= RAM_PREALLOC;
}
- return ram_block_add(new_block);
+ addr = ram_block_add(new_block, &local_err);
+ if (local_err) {
+ g_free(new_block);
+ error_propagate(errp, local_err);
+ return -1;
+ }
+ return addr;
}
-ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
+ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_from_ptr(size, NULL, mr);
+ return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
}
void qemu_ram_free_from_ptr(ram_addr_t addr)
return block->fd;
}
+void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
+{
+ RAMBlock *block = qemu_get_ram_block(addr);
+
+ return block->host;
+}
+
/* Return a host pointer to ram allocated with qemu_ram_alloc.
With the exception of the softmmu code in this file, this should
only be used for local memory (e.g. video ram) that the device owns,
default:
abort();
}
- cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
- cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
+ cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
};
/* Generate a debug exception if a watchpoint has been hit. */
-static void check_watchpoint(int offset, int len_mask, int flags)
+static void check_watchpoint(int offset, int len, int flags)
{
CPUState *cpu = current_cpu;
CPUArchState *env = cpu->env_ptr;
}
vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if ((vaddr == (wp->vaddr & len_mask) ||
- (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
- wp->flags |= BP_WATCHPOINT_HIT;
+ if (cpu_watchpoint_address_matches(wp, vaddr, len)
+ && (wp->flags & flags)) {
+ if (flags == BP_MEM_READ) {
+ wp->flags |= BP_WATCHPOINT_HIT_READ;
+ } else {
+ wp->flags |= BP_WATCHPOINT_HIT_WRITE;
+ }
+ wp->hitaddr = vaddr;
if (!cpu->watchpoint_hit) {
cpu->watchpoint_hit = wp;
tb_check_watchpoint(cpu);
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
unsigned size)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
switch (size) {
case 1: return ldub_phys(&address_space_memory, addr);
case 2: return lduw_phys(&address_space_memory, addr);
static void watch_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
switch (size) {
case 1:
stb_phys(&address_space_memory, addr, val);
mmio->as = as;
mmio->base = base;
memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
- "subpage", TARGET_PAGE_SIZE);
+ NULL, TARGET_PAGE_SIZE);
mmio->iomem.subpage = true;
#if defined(DEBUG_SUBPAGE)
printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
static void io_mem_init(void)
{
- memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
+ memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
- "unassigned", UINT64_MAX);
+ NULL, UINT64_MAX);
memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
- "notdirty", UINT64_MAX);
+ NULL, UINT64_MAX);
memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
- "watch", UINT64_MAX);
+ NULL, UINT64_MAX);
}
static void mem_begin(MemoryListener *listener)
/* invalidate code */
tb_invalidate_phys_page_range(addr, addr + length, 0);
/* set dirty bit */
- cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
- cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
+ cpu_physical_memory_set_dirty_range_nocode(addr, length);
}
xen_modified_memory(addr, length);
}
mr = qemu_ram_addr_from_host(buffer, &addr1);
assert(mr != NULL);
if (is_write) {
- while (access_len) {
- unsigned l;
- l = TARGET_PAGE_SIZE;
- if (l > access_len)
- l = access_len;
- invalidate_and_set_dirty(addr1, l);
- addr1 += l;
- access_len -= l;
- }
+ invalidate_and_set_dirty(addr1, access_len);
}
if (xen_enabled()) {
xen_invalidate_map_cache_entry(buffer);
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- cpu_physical_memory_set_dirty_flag(addr1,
- DIRTY_MEMORY_MIGRATION);
- cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
+ cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
}
}
}
}
#endif
-#if !defined(CONFIG_USER_ONLY)
-
/*
* A helper function for the _utterly broken_ virtio device model to find out if
* it's running on a big endian machine. Don't do this at home kids!
*/
-bool virtio_is_big_endian(void);
-bool virtio_is_big_endian(void)
+bool target_words_bigendian(void);
+bool target_words_bigendian(void)
{
#if defined(TARGET_WORDS_BIGENDIAN)
return true;
#endif
}
-#endif
-
#ifndef CONFIG_USER_ONLY
bool cpu_physical_memory_is_io(hwaddr phys_addr)
{