if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
}
-#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
- register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
- cpu_save, cpu_load, cpu->env_ptr);
- assert(cc->vmsd == NULL);
- assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
-#endif
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
}
ram_addr_t length,
unsigned client)
{
+ DirtyMemoryBlocks *blocks;
unsigned long end, page;
- bool dirty;
+ bool dirty = false;
if (length == 0) {
return false;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
- page, end - page);
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ while (page < end) {
+ unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
+
+ dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
+ offset, num);
+ page += num;
+ }
+
+ rcu_read_unlock();
if (dirty && tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
return 0;
}
+/* Called with ram_list.mutex held */
+static void dirty_memory_extend(ram_addr_t old_ram_size,
+ ram_addr_t new_ram_size)
+{
+ ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
+ int i;
+
+ /* Only need to extend if block count increased */
+ if (new_num_blocks <= old_num_blocks) {
+ return;
+ }
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ DirtyMemoryBlocks *old_blocks;
+ DirtyMemoryBlocks *new_blocks;
+ int j;
+
+ old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ new_blocks = g_malloc(sizeof(*new_blocks) +
+ sizeof(new_blocks->blocks[0]) * new_num_blocks);
+
+ if (old_num_blocks) {
+ memcpy(new_blocks->blocks, old_blocks->blocks,
+ old_num_blocks * sizeof(old_blocks->blocks[0]));
+ }
+
+ for (j = old_num_blocks; j < new_num_blocks; j++) {
+ new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
+ }
+
+ atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+
+ if (old_blocks) {
+ g_free_rcu(old_blocks, rcu);
+ }
+ }
+}
+
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
if (new_ram_size > old_ram_size) {
migration_bitmap_extend(old_ram_size, new_ram_size);
+ dirty_memory_extend(old_ram_size, new_ram_size);
}
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
ram_list.version++;
qemu_mutex_unlock_ramlist();
- new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
-
- if (new_ram_size > old_ram_size) {
- int i;
-
- /* ram_list.dirty_memory[] is protected by the iothread lock. */
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- ram_list.dirty_memory[i] =
- bitmap_zero_extend(ram_list.dirty_memory[i],
- old_ram_size, new_ram_size);
- }
- }
cpu_physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
error_propagate(errp, local_err);
return -1;
}
+
+ mr->ram_block = new_block;
return addr;
}
*
* Called within RCU critical section.
*/
-void *qemu_get_ram_ptr(ram_addr_t addr)
+void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
{
- RAMBlock *block = qemu_get_ram_block(addr);
+ RAMBlock *block = ram_block;
+
+ if (block == NULL) {
+ block = qemu_get_ram_block(addr);
+ }
if (xen_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
*
* Called within RCU critical section.
*/
-static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
+static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
+ hwaddr *size)
{
- RAMBlock *block;
+ RAMBlock *block = ram_block;
ram_addr_t offset_inside_block;
if (*size == 0) {
return NULL;
}
- block = qemu_get_ram_block(addr);
+ if (block == NULL) {
+ block = qemu_get_ram_block(addr);
+ }
offset_inside_block = addr - block->offset;
*size = MIN(*size, block->max_length - offset_inside_block);
}
switch (size) {
case 1:
- stb_p(qemu_get_ram_ptr(ram_addr), val);
+ stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
case 2:
- stw_p(qemu_get_ram_ptr(ram_addr), val);
+ stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
case 4:
- stl_p(qemu_get_ram_ptr(ram_addr), val);
+ stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
break;
default:
abort();
static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
{
CPUState *cpu = current_cpu;
+ CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
wp->hitaddr = vaddr;
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
+ if (wp->flags & BP_CPU &&
+ !cc->debug_check_watchpoint(cpu, wp)) {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
+ continue;
+ }
cpu->watchpoint_hit = wp;
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
} else {
addr1 += memory_region_get_ram_addr(mr);
/* RAM case */
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
memcpy(ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, mr->ram_addr + addr1);
memcpy(buf, ptr, l);
}
} else {
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
memcpy(ptr, buf, l);
memory_region_ref(mr);
*plen = done;
- ptr = qemu_ram_ptr_length(raddr + base, plen);
+ ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
rcu_read_unlock();
return ptr;
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
+ ptr = qemu_get_ram_ptr(mr->ram_block,
+ (memory_region_get_ram_addr(mr)
& TARGET_PAGE_MASK)
+ addr1);
switch (endian) {
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
stl_p(ptr, val);
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stl_le_p(ptr, val);
} else {
/* RAM case */
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
- ptr = qemu_get_ram_ptr(addr1);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
stw_le_p(ptr, val);