* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
#ifndef _WIN32
-#include <sys/types.h>
#include <sys/mman.h>
#endif
#include "hw/boards.h"
#endif
#include "hw/qdev.h"
-#include "qemu/osdep.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "hw/xen/xen.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
+#include "exec/log.h"
#include "qemu/range.h"
#ifndef _WIN32
memory_listener_register(&newas->tcg_as_listener, as);
}
}
+
+AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
+{
+ /* Return the AddressSpace corresponding to the specified index */
+ return cpu->cpu_ases[asidx].as;
+}
#endif
#ifndef CONFIG_USER_ONLY
#ifndef CONFIG_USER_ONLY
cpu->thread_id = qemu_get_thread_id();
+
+ /* This is a softmmu CPU object, so create a property for it
+ * so users can wire up its memory. (This can't go in qom/cpu.c
+ * because that file is compiled only once for both user-mode
+ * and system builds.) The default if no link is set up is to use
+ * the system address space.
+ */
+ object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
+ (Object **)&cpu->memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
+ cpu->memory = system_memory;
+ object_ref(OBJECT(cpu->memory));
#endif
#if defined(CONFIG_USER_ONLY)
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
}
-#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
- register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
- cpu_save, cpu_load, cpu->env_ptr);
- assert(cc->vmsd == NULL);
- assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
-#endif
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
}
#else
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
- hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
+ MemTxAttrs attrs;
+ hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
+ int asidx = cpu_asidx_from_attrs(cpu, attrs);
if (phys != -1) {
- tb_invalidate_phys_addr(cpu->as,
+ tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
phys | (pc & ~TARGET_PAGE_MASK));
}
}
ram_addr_t length,
unsigned client)
{
+ DirtyMemoryBlocks *blocks;
unsigned long end, page;
- bool dirty;
+ bool dirty = false;
if (length == 0) {
return false;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
- page, end - page);
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
+ offset, num);
+ page += num;
+ }
+
+ rcu_read_unlock();
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
return 0;
}
+/* Called with ram_list.mutex held */
+static void dirty_memory_extend(ram_addr_t old_ram_size,
+ ram_addr_t new_ram_size)
+{
+ ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ int i;
+
+ /* Only need to extend if block count increased */
+ if (new_num_blocks <= old_num_blocks) {
+ return;
+ }
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ DirtyMemoryBlocks *old_blocks;
+ DirtyMemoryBlocks *new_blocks;
+ int j;
+
+ old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ new_blocks = g_malloc(sizeof(*new_blocks) +
+ sizeof(new_blocks->blocks[0]) * new_num_blocks);
+
+ if (old_num_blocks) {
+ memcpy(new_blocks->blocks, old_blocks->blocks,
+ old_num_blocks * sizeof(old_blocks->blocks[0]));
+ }
+
+ for (j = old_num_blocks; j < new_num_blocks; j++) {
+ new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
+ }
+
+ atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+
+ if (old_blocks) {
+ g_free_rcu(old_blocks, rcu);
+ }
+ }
+}
+
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
RAMBlock *last_block = NULL;
ram_addr_t old_ram_size, new_ram_size;
+ Error *err = NULL;
old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
if (!new_block->host) {
if (xen_enabled()) {
xen_ram_alloc(new_block->offset, new_block->max_length,
- new_block->mr);
+ new_block->mr, &err);
+ if (err) {
+ error_propagate(errp, err);
+ qemu_mutex_unlock_ramlist();
+ return -1;
+ }
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
&new_block->mr->align);
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
if (new_ram_size > old_ram_size) {
migration_bitmap_extend(old_ram_size, new_ram_size);
+ dirty_memory_extend(old_ram_size, new_ram_size);
}
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
ram_list.version++;
qemu_mutex_unlock_ramlist();
- new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
-
- if (new_ram_size > old_ram_size) {
- int i;
-
- /* ram_list.dirty_memory[] is protected by the iothread lock. */
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- ram_list.dirty_memory[i] =
- bitmap_zero_extend(ram_list.dirty_memory[i],
- old_ram_size, new_ram_size);
- }
- }
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
error_propagate(errp, local_err);
return -1;
}
+
+ mr->ram_block = new_block;
return addr;
}
*
* Called within RCU critical section.
*/
-void *qemu_get_ram_ptr(ram_addr_t addr)
+void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
{
- RAMBlock *block = qemu_get_ram_block(addr);
+ RAMBlock *block = ram_block;
+
+ if (block == NULL) {
+ block = qemu_get_ram_block(addr);
+ }
if (xen_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
*
* Called within RCU critical section.
*/
-static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
+static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
+ hwaddr *size)
{
- RAMBlock *block;
+ RAMBlock *block = ram_block;
ram_addr_t offset_inside_block;
if (*size == 0) {
return NULL;
}
- block = qemu_get_ram_block(addr);
+ if (block == NULL) {
+ block = qemu_get_ram_block(addr);
+ }
offset_inside_block = addr - block->offset;
*size = MIN(*size, block->max_length - offset_inside_block);
}
switch (size) {
case 1:
- stb_p(qemu_get_ram_ptr(ram_addr), val);
+ stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
case 2:
- stw_p(qemu_get_ram_ptr(ram_addr), val);
+ stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
case 4:
- stl_p(qemu_get_ram_ptr(ram_addr), val);
+ stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
default:
abort();
static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
{
CPUState *cpu = current_cpu;
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
wp->hitaddr = vaddr;
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
+ if (wp->flags & BP_CPU &&
+ !cc->debug_check_watchpoint(cpu, wp)) {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
+ continue;
+ }
cpu->watchpoint_hit = wp;
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
{
MemTxResult res;
uint64_t data;
+ int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
+ AddressSpace *as = current_cpu->cpu_ases[asidx].as;
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
switch (size) {
case 1:
- data = address_space_ldub(&address_space_memory, addr, attrs, &res);
+ data = address_space_ldub(as, addr, attrs, &res);
break;
case 2:
- data = address_space_lduw(&address_space_memory, addr, attrs, &res);
+ data = address_space_lduw(as, addr, attrs, &res);
break;
case 4:
- data = address_space_ldl(&address_space_memory, addr, attrs, &res);
+ data = address_space_ldl(as, addr, attrs, &res);
break;
default: abort();
}
MemTxAttrs attrs)
{
MemTxResult res;
+ int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
+ AddressSpace *as = current_cpu->cpu_ases[asidx].as;
check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
switch (size) {
case 1:
- address_space_stb(&address_space_memory, addr, val, attrs, &res);
+ address_space_stb(as, addr, val, attrs, &res);
break;
case 2:
- address_space_stw(&address_space_memory, addr, val, attrs, &res);
+ address_space_stw(as, addr, val, attrs, &res);
break;
case 4:
- address_space_stl(&address_space_memory, addr, val, attrs, &res);
+ address_space_stl(as, addr, val, attrs, &res);
break;
default: abort();
}
return phys_section_add(map, §ion);
}
-MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
+MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
{
- CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
+ int asidx = cpu_asidx_from_attrs(cpu, attrs);
+ CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
} else {
addr1 += memory_region_get_ram_addr(mr);
/* RAM case */
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
memcpy(ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, mr->ram_addr + addr1);
memcpy(buf, ptr, l);
}
} else {
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
memcpy(ptr, buf, l);
memory_region_ref(mr);
*plen = done;
- ptr = qemu_ram_ptr_length(raddr + base, plen);
+ ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
rcu_read_unlock();
return ptr;
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
stl_p(ptr, val);
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stl_le_p(ptr, val);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stw_le_p(ptr, val);
target_ulong page;
while (len > 0) {
+ int asidx;
+ MemTxAttrs attrs;
+
page = addr & TARGET_PAGE_MASK;
- phys_addr = cpu_get_phys_page_debug(cpu, page);
+ phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
+ asidx = cpu_asidx_from_attrs(cpu, attrs);
/* if no physical page mapped, return an error */
if (phys_addr == -1)
return -1;
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write) {
- cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
+ cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
+ phys_addr, buf, l);
} else {
- address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
+ address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
+ MEMTXATTRS_UNSPECIFIED,
buf, l, 0);
}
len -= l;