* Authors:
*
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
*
*/
void qemu_ram_free_from_ptr(ram_addr_t addr);
struct MemoryRegion;
-int cpu_register_io_memory(MemoryRegion *mr);
-void cpu_unregister_io_memory(int table_address);
-
struct MemoryRegionSection;
void cpu_register_physical_memory_log(struct MemoryRegionSection *section,
- bool readable, bool readonly);
+ bool readonly);
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
#define CODE_DIRTY_FLAG 0x02
#define MIGRATION_DIRTY_FLAG 0x08
+static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
+}
+
/* read dirty bit (return 0 or 1) */
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
+ return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
}
-static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
+ int ret = 0;
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
+ }
+ return ret;
}
-static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
- int dirty_flags)
+static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
+ if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
+ !cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG)) {
+ ram_list.dirty_pages++;
+ }
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
}
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
{
- ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
+ cpu_physical_memory_set_dirty_flags(addr, 0xff);
}
-static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
- int dirty_flags)
+static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+ int mask = ~dirty_flags;
+
+ if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
+ cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG)) {
+ ram_list.dirty_pages--;
+ }
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
+ }
}
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
- int length,
+ ram_addr_t length,
int dirty_flags)
{
- int i, mask, len;
- uint8_t *p;
-
- len = length >> TARGET_PAGE_BITS;
- mask = ~dirty_flags;
- p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
- for (i = 0; i < len; i++) {
- p[i] &= mask;
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
}
}
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags);
+
+extern const IORangeOps memory_region_iorange_ops;
+
#endif
#endif