qemu_plugin_vcpu_init_hook(cpu);
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ assert(cc->vmsd == NULL);
+#else /* !CONFIG_USER_ONLY */
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
int flags, CPUWatchpoint **watchpoint)
{
CPUWatchpoint *wp;
+ vaddr in_page;
/* forbid ranges which are empty or run off the end of the address space */
if (len == 0 || (addr + len - 1) < addr) {
QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
}
- tlb_flush_page(cpu, addr);
+ in_page = -(addr | TARGET_PAGE_MASK);
+ if (len <= in_page) {
+ tlb_flush_page(cpu, addr);
+ } else {
+ tlb_flush(cpu);
+ }
if (watchpoint)
*watchpoint = wp;
int ret = 0;
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) {
+ if (watchpoint_address_matches(wp, addr, len)) {
ret |= wp->flags;
}
}
*/
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
{
+ const ram_addr_t unaligned_size = newsize;
+
assert(block);
newsize = HOST_PAGE_ALIGN(newsize);
if (block->used_length == newsize) {
+ /*
+ * We don't have to resize the ram block (which only knows aligned
+ * sizes), however, we have to notify if the unaligned size changed.
+ */
+ if (unaligned_size != memory_region_size(block->mr)) {
+ memory_region_set_size(block->mr, unaligned_size);
+ if (block->resized) {
+ block->resized(block->idstr, unaligned_size, block->host);
+ }
+ }
return 0;
}
block->used_length = newsize;
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
DIRTY_CLIENTS_ALL);
- memory_region_set_size(block->mr, newsize);
+ memory_region_set_size(block->mr, unaligned_size);
if (block->resized) {
- block->resized(block->idstr, newsize, block->host);
+ block->resized(block->idstr, unaligned_size, block->host);
}
return 0;
}
* Otherwise no-op.
* @Note: this is supposed to be a synchronous op.
*/
-void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length)
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
{
/* The requested range should fit in within the block range */
g_assert((start + length) <= block->used_length);
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
+ *plen = 0;
return NULL;
}
/* Avoid unbounded allocations */
/* Called from RCU critical section. address_space_read_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
MEMTXATTRS_UNSPECIFIED);
- flatview_read_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_read_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
/* Called from RCU critical section. address_space_write_cached uses this
* out of line function when the target is an MMIO or IOMMU region.
*/
-void
+MemTxResult
address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
const void *buf, hwaddr len)
{
l = len;
mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
MEMTXATTRS_UNSPECIFIED);
- flatview_write_continue(cache->fv,
- addr, MEMTXATTRS_UNSPECIFIED, buf, len,
- addr1, l, mr);
+ return flatview_write_continue(cache->fv,
+ addr, MEMTXATTRS_UNSPECIFIED, buf, len,
+ addr1, l, mr);
}
#define ARG1_DECL MemoryRegionCache *cache
while (len > 0) {
int asidx;
MemTxAttrs attrs;
+ MemTxResult res;
page = addr & TARGET_PAGE_MASK;
phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write) {
- address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
- attrs, buf, l);
+ res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
} else {
- address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf,
- l);
+ res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr,
+ attrs, buf, l);
+ }
+ if (res != MEMTX_OK) {
+ return -1;
}
len -= l;
buf += l;