]> Git Repo - qemu.git/blobdiff - migration/ram.c
Merge remote-tracking branch 'remotes/riku/tags/pull-linux-user-20170531' into staging
[qemu.git] / migration / ram.c
index 3eb4430ce59e362fe7540e0d9239ace7688bd97e..26e03a5dfa35bccdebc29d8d3c00313f9e91653e 100644 (file)
 #include "qemu/bitmap.h"
 #include "qemu/timer.h"
 #include "qemu/main-loop.h"
+#include "xbzrle.h"
 #include "migration/migration.h"
-#include "migration/postcopy-ram.h"
+#include "migration/qemu-file.h"
+#include "migration/vmstate.h"
+#include "postcopy-ram.h"
 #include "exec/address-spaces.h"
 #include "migration/page_cache.h"
 #include "qemu/error-report.h"
 #include "qemu/rcu_queue.h"
 #include "migration/colo.h"
 
-static int dirty_rate_high_cnt;
-
-static uint64_t bitmap_sync_count;
-
 /***********************************************************/
 /* ram save/restore */
 
+/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
+ * worked for pages that where filled with the same char.  We switched
+ * it to only search for the zero value.  And to avoid confusion with
+ * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
+ */
+
 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
-#define RAM_SAVE_FLAG_COMPRESS 0x02
+#define RAM_SAVE_FLAG_ZERO     0x02
 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
 #define RAM_SAVE_FLAG_PAGE     0x08
 #define RAM_SAVE_FLAG_EOS      0x10
@@ -142,124 +147,156 @@ out:
     return ret;
 }
 
+/*
+ * An outstanding page request, on the source, having been received
+ * and queued
+ */
+struct RAMSrcPageRequest {
+    RAMBlock *rb;
+    hwaddr    offset;
+    hwaddr    len;
+
+    QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
+};
+
 /* State of RAM for migration */
 struct RAMState {
+    /* QEMUFile used for this migration */
+    QEMUFile *f;
     /* Last block that we have visited searching for dirty pages */
     RAMBlock *last_seen_block;
     /* Last block from where we have sent data */
     RAMBlock *last_sent_block;
-    /* Last offset we have sent data from */
-    ram_addr_t last_offset;
+    /* Last dirty target page we have sent */
+    ram_addr_t last_page;
     /* last ram version we have seen */
     uint32_t last_version;
     /* We are in the first round */
     bool ram_bulk_stage;
-};
-typedef struct RAMState RAMState;
-
-static RAMState ram_state;
-
-/* accounting for migration statistics */
-typedef struct AccountingInfo {
-    uint64_t dup_pages;
-    uint64_t skipped_pages;
+    /* How many times we have dirty too many pages */
+    int dirty_rate_high_cnt;
+    /* How many times we have synchronized the bitmap */
+    uint64_t bitmap_sync_count;
+    /* these variables are used for bitmap sync */
+    /* last time we did a full bitmap_sync */
+    int64_t time_last_bitmap_sync;
+    /* bytes transferred at start_time */
+    uint64_t bytes_xfer_prev;
+    /* number of dirty pages since start_time */
+    uint64_t num_dirty_pages_period;
+    /* xbzrle misses since the beginning of the period */
+    uint64_t xbzrle_cache_miss_prev;
+    /* number of iterations at the beginning of period */
+    uint64_t iterations_prev;
+    /* Accounting fields */
+    /* number of zero pages.  It used to be pages filled by the same char. */
+    uint64_t zero_pages;
+    /* number of normal transferred pages */
     uint64_t norm_pages;
+    /* Iterations since start */
     uint64_t iterations;
+    /* xbzrle transmitted bytes.  Notice that this is with
+     * compression, they can't be calculated from the pages */
     uint64_t xbzrle_bytes;
+    /* xbzrle transmmited pages */
     uint64_t xbzrle_pages;
+    /* xbzrle number of cache miss */
     uint64_t xbzrle_cache_miss;
+    /* xbzrle miss rate */
     double xbzrle_cache_miss_rate;
+    /* xbzrle number of overflows */
     uint64_t xbzrle_overflows;
-} AccountingInfo;
+    /* number of dirty bits in the bitmap */
+    uint64_t migration_dirty_pages;
+    /* total number of bytes transferred */
+    uint64_t bytes_transferred;
+    /* number of dirtied pages in the last second */
+    uint64_t dirty_pages_rate;
+    /* Count of requests incoming from destination */
+    uint64_t postcopy_requests;
+    /* protects modification of the bitmap */
+    QemuMutex bitmap_mutex;
+    /* The RAMBlock used in the last src_page_requests */
+    RAMBlock *last_req_rb;
+    /* Queue of outstanding page requests from the destination */
+    QemuMutex src_page_req_mutex;
+    QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
+};
+typedef struct RAMState RAMState;
 
-static AccountingInfo acct_info;
+static RAMState ram_state;
 
-static void acct_clear(void)
+uint64_t dup_mig_pages_transferred(void)
 {
-    memset(&acct_info, 0, sizeof(acct_info));
+    return ram_state.zero_pages;
 }
 
-uint64_t dup_mig_bytes_transferred(void)
+uint64_t norm_mig_pages_transferred(void)
 {
-    return acct_info.dup_pages * TARGET_PAGE_SIZE;
+    return ram_state.norm_pages;
 }
 
-uint64_t dup_mig_pages_transferred(void)
+uint64_t xbzrle_mig_bytes_transferred(void)
 {
-    return acct_info.dup_pages;
+    return ram_state.xbzrle_bytes;
 }
 
-uint64_t skipped_mig_bytes_transferred(void)
+uint64_t xbzrle_mig_pages_transferred(void)
 {
-    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
+    return ram_state.xbzrle_pages;
 }
 
-uint64_t skipped_mig_pages_transferred(void)
+uint64_t xbzrle_mig_pages_cache_miss(void)
 {
-    return acct_info.skipped_pages;
+    return ram_state.xbzrle_cache_miss;
 }
 
-uint64_t norm_mig_bytes_transferred(void)
+double xbzrle_mig_cache_miss_rate(void)
 {
-    return acct_info.norm_pages * TARGET_PAGE_SIZE;
+    return ram_state.xbzrle_cache_miss_rate;
 }
 
-uint64_t norm_mig_pages_transferred(void)
+uint64_t xbzrle_mig_pages_overflow(void)
 {
-    return acct_info.norm_pages;
+    return ram_state.xbzrle_overflows;
 }
 
-uint64_t xbzrle_mig_bytes_transferred(void)
+uint64_t ram_bytes_transferred(void)
 {
-    return acct_info.xbzrle_bytes;
+    return ram_state.bytes_transferred;
 }
 
-uint64_t xbzrle_mig_pages_transferred(void)
+uint64_t ram_bytes_remaining(void)
 {
-    return acct_info.xbzrle_pages;
+    return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
 }
 
-uint64_t xbzrle_mig_pages_cache_miss(void)
+uint64_t ram_dirty_sync_count(void)
 {
-    return acct_info.xbzrle_cache_miss;
+    return ram_state.bitmap_sync_count;
 }
 
-double xbzrle_mig_cache_miss_rate(void)
+uint64_t ram_dirty_pages_rate(void)
 {
-    return acct_info.xbzrle_cache_miss_rate;
+    return ram_state.dirty_pages_rate;
 }
 
-uint64_t xbzrle_mig_pages_overflow(void)
+uint64_t ram_postcopy_requests(void)
 {
-    return acct_info.xbzrle_overflows;
+    return ram_state.postcopy_requests;
 }
 
-static QemuMutex migration_bitmap_mutex;
-static uint64_t migration_dirty_pages;
-
 /* used by the search for pages to send */
 struct PageSearchStatus {
     /* Current block being searched */
     RAMBlock    *block;
-    /* Current offset to search from */
-    ram_addr_t   offset;
+    /* Current page to search from */
+    unsigned long page;
     /* Set once we wrap around */
     bool         complete_round;
 };
 typedef struct PageSearchStatus PageSearchStatus;
 
-static struct BitmapRcu {
-    struct rcu_head rcu;
-    /* Main migration bitmap */
-    unsigned long *bmap;
-    /* bitmap of pages that haven't been sent even once
-     * only maintained and used in postcopy at the moment
-     * where it's used to send the dirtymap at the start
-     * of the postcopy phase
-     */
-    unsigned long *unsentmap;
-} *migration_bitmap_rcu;
-
 struct CompressParam {
     bool done;
     bool quit;
@@ -293,7 +330,6 @@ static QemuCond comp_done_cond;
 /* The empty QEMUFileOps will be used by file in CompressParam */
 static const QEMUFileOps empty_ops = { };
 
-static bool compression_switch;
 static DecompressParam *decomp_param;
 static QemuThread *decompress_threads;
 static QemuMutex decomp_done_lock;
@@ -377,7 +413,6 @@ void migrate_compress_threads_create(void)
     if (!migrate_use_compression()) {
         return;
     }
-    compression_switch = true;
     thread_count = migrate_compress_threads();
     compress_threads = g_new0(QemuThread, thread_count);
     comp_param = g_new0(CompressParam, thread_count);
@@ -410,10 +445,14 @@ void migrate_compress_threads_create(void)
  * @offset: offset inside the block for the page
  *          in the lower bits, it contains flags
  */
-static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
+static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
+                               ram_addr_t offset)
 {
     size_t size, len;
 
+    if (block == rs->last_sent_block) {
+        offset |= RAM_SAVE_FLAG_CONTINUE;
+    }
     qemu_put_be64(f, offset);
     size = 8;
 
@@ -422,6 +461,7 @@ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
         qemu_put_byte(f, len);
         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
         size += 1 + len;
+        rs->last_sent_block = block;
     }
     return size;
 }
@@ -471,7 +511,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
     /* We don't care if this fails to allocate a new cache page
      * as long as it updated an old one */
     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
-                 bitmap_sync_count);
+                 rs->bitmap_sync_count);
 }
 
 #define ENCODING_FLAG_XBZRLE 0x1
@@ -483,27 +523,25 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
  *          0 means that page is identical to the one already sent
  *          -1 means that xbzrle would be longer than normal
  *
- * @f: QEMUFile where to send the data
+ * @rs: current RAM state
  * @current_data: pointer to the address of the page contents
  * @current_addr: addr of the page
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
- * @bytes_transferred: increase it with the number of transferred bytes
  */
-static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
+static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
                             ram_addr_t current_addr, RAMBlock *block,
-                            ram_addr_t offset, bool last_stage,
-                            uint64_t *bytes_transferred)
+                            ram_addr_t offset, bool last_stage)
 {
     int encoded_len = 0, bytes_xbzrle;
     uint8_t *prev_cached_page;
 
-    if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
-        acct_info.xbzrle_cache_miss++;
+    if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
+        rs->xbzrle_cache_miss++;
         if (!last_stage) {
             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
-                             bitmap_sync_count) == -1) {
+                             rs->bitmap_sync_count) == -1) {
                 return -1;
             } else {
                 /* update *current_data when the page has been
@@ -528,7 +566,7 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
         return 0;
     } else if (encoded_len == -1) {
         trace_save_xbzrle_page_overflow();
-        acct_info.xbzrle_overflows++;
+        rs->xbzrle_overflows++;
         /* update data in the cache */
         if (!last_stage) {
             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
@@ -543,14 +581,15 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
     }
 
     /* Send XBZRLE based compressed page */
-    bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
-    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
-    qemu_put_be16(f, encoded_len);
-    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
+    bytes_xbzrle = save_page_header(rs, rs->f, block,
+                                    offset | RAM_SAVE_FLAG_XBZRLE);
+    qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
+    qemu_put_be16(rs->f, encoded_len);
+    qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
     bytes_xbzrle += encoded_len + 1 + 2;
-    acct_info.xbzrle_pages++;
-    acct_info.xbzrle_bytes += bytes_xbzrle;
-    *bytes_transferred += bytes_xbzrle;
+    rs->xbzrle_pages++;
+    rs->xbzrle_bytes += bytes_xbzrle;
+    rs->bytes_transferred += bytes_xbzrle;
 
     return 1;
 }
@@ -564,70 +603,45 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
  *
  * @rs: current RAM state
  * @rb: RAMBlock where to search for dirty pages
- * @start: starting address (typically so we can continue from previous page)
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
+ * @start: page where we start the search
  */
 static inline
-ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
-                                       ram_addr_t start,
-                                       ram_addr_t *ram_addr_abs)
+unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
+                                          unsigned long start)
 {
-    unsigned long base = rb->offset >> TARGET_PAGE_BITS;
-    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
-    uint64_t rb_size = rb->used_length;
-    unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
-    unsigned long *bitmap;
-
+    unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
+    unsigned long *bitmap = rb->bmap;
     unsigned long next;
 
-    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
-    if (rs->ram_bulk_stage && nr > base) {
-        next = nr + 1;
+    if (rs->ram_bulk_stage && start > 0) {
+        next = start + 1;
     } else {
-        next = find_next_bit(bitmap, size, nr);
+        next = find_next_bit(bitmap, size, start);
     }
 
-    *ram_addr_abs = next << TARGET_PAGE_BITS;
-    return (next - base) << TARGET_PAGE_BITS;
+    return next;
 }
 
-static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
+static inline bool migration_bitmap_clear_dirty(RAMState *rs,
+                                                RAMBlock *rb,
+                                                unsigned long page)
 {
     bool ret;
-    int nr = addr >> TARGET_PAGE_BITS;
-    unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
 
-    ret = test_and_clear_bit(nr, bitmap);
+    ret = test_and_clear_bit(page, rb->bmap);
 
     if (ret) {
-        migration_dirty_pages--;
+        rs->migration_dirty_pages--;
     }
     return ret;
 }
 
-static int64_t num_dirty_pages_period;
-static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
-{
-    unsigned long *bitmap;
-    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
-    migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
-                             start, length, &num_dirty_pages_period);
-}
-
-/* Fix me: there are too many global variables used in migration process. */
-static int64_t start_time;
-static int64_t bytes_xfer_prev;
-static uint64_t xbzrle_cache_miss_prev;
-static uint64_t iterations_prev;
-
-static void migration_bitmap_sync_init(void)
+static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
+                                        ram_addr_t start, ram_addr_t length)
 {
-    start_time = 0;
-    bytes_xfer_prev = 0;
-    num_dirty_pages_period = 0;
-    xbzrle_cache_miss_prev = 0;
-    iterations_prev = 0;
+    rs->migration_dirty_pages +=
+        cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
+                                              &rs->num_dirty_pages_period);
 }
 
 /**
@@ -644,85 +658,81 @@ uint64_t ram_pagesize_summary(void)
     RAMBlock *block;
     uint64_t summary = 0;
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+    RAMBLOCK_FOREACH(block) {
         summary |= block->page_size;
     }
 
     return summary;
 }
 
-static void migration_bitmap_sync(void)
+static void migration_bitmap_sync(RAMState *rs)
 {
     RAMBlock *block;
-    MigrationState *s = migrate_get_current();
     int64_t end_time;
-    int64_t bytes_xfer_now;
+    uint64_t bytes_xfer_now;
 
-    bitmap_sync_count++;
+    rs->bitmap_sync_count++;
 
-    if (!bytes_xfer_prev) {
-        bytes_xfer_prev = ram_bytes_transferred();
-    }
-
-    if (!start_time) {
-        start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+    if (!rs->time_last_bitmap_sync) {
+        rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
     }
 
     trace_migration_bitmap_sync_start();
     memory_global_dirty_log_sync();
 
-    qemu_mutex_lock(&migration_bitmap_mutex);
+    qemu_mutex_lock(&rs->bitmap_mutex);
     rcu_read_lock();
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        migration_bitmap_sync_range(block->offset, block->used_length);
+    RAMBLOCK_FOREACH(block) {
+        migration_bitmap_sync_range(rs, block, 0, block->used_length);
     }
     rcu_read_unlock();
-    qemu_mutex_unlock(&migration_bitmap_mutex);
+    qemu_mutex_unlock(&rs->bitmap_mutex);
 
-    trace_migration_bitmap_sync_end(num_dirty_pages_period);
+    trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
 
     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 
     /* more than 1 second = 1000 millisecons */
-    if (end_time > start_time + 1000) {
+    if (end_time > rs->time_last_bitmap_sync + 1000) {
+        /* calculate period counters */
+        rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
+            / (end_time - rs->time_last_bitmap_sync);
+        bytes_xfer_now = ram_bytes_transferred();
+
         if (migrate_auto_converge()) {
             /* The following detection logic can be refined later. For now:
                Check to see if the dirtied bytes is 50% more than the approx.
                amount of bytes that just got transferred since the last time we
                were in this routine. If that happens twice, start or increase
                throttling */
-            bytes_xfer_now = ram_bytes_transferred();
 
-            if (s->dirty_pages_rate &&
-               (num_dirty_pages_period * TARGET_PAGE_SIZE >
-                   (bytes_xfer_now - bytes_xfer_prev)/2) &&
-               (dirty_rate_high_cnt++ >= 2)) {
+            if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
+                   (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
+                (++rs->dirty_rate_high_cnt >= 2)) {
                     trace_migration_throttle();
-                    dirty_rate_high_cnt = 0;
+                    rs->dirty_rate_high_cnt = 0;
                     mig_throttle_guest_down();
-             }
-             bytes_xfer_prev = bytes_xfer_now;
+            }
         }
 
         if (migrate_use_xbzrle()) {
-            if (iterations_prev != acct_info.iterations) {
-                acct_info.xbzrle_cache_miss_rate =
-                   (double)(acct_info.xbzrle_cache_miss -
-                            xbzrle_cache_miss_prev) /
-                   (acct_info.iterations - iterations_prev);
+            if (rs->iterations_prev != rs->iterations) {
+                rs->xbzrle_cache_miss_rate =
+                   (double)(rs->xbzrle_cache_miss -
+                            rs->xbzrle_cache_miss_prev) /
+                   (rs->iterations - rs->iterations_prev);
             }
-            iterations_prev = acct_info.iterations;
-            xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
+            rs->iterations_prev = rs->iterations;
+            rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
         }
-        s->dirty_pages_rate = num_dirty_pages_period * 1000
-            / (end_time - start_time);
-        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
-        start_time = end_time;
-        num_dirty_pages_period = 0;
+
+        /* reset period counters */
+        rs->time_last_bitmap_sync = end_time;
+        rs->num_dirty_pages_period = 0;
+        rs->bytes_xfer_prev = bytes_xfer_now;
     }
-    s->dirty_sync_count = bitmap_sync_count;
     if (migrate_use_events()) {
-        qapi_event_send_migration_pass(bitmap_sync_count, NULL);
+        qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
     }
 }
 
@@ -731,37 +741,35 @@ static void migration_bitmap_sync(void)
  *
  * Returns the number of pages written.
  *
- * @f: QEMUFile where to send the data
+ * @rs: current RAM state
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @p: pointer to the page
- * @bytes_transferred: increase it with the number of transferred bytes
  */
-static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
-                          uint8_t *p, uint64_t *bytes_transferred)
+static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
+                          uint8_t *p)
 {
     int pages = -1;
 
     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
-        acct_info.dup_pages++;
-        *bytes_transferred += save_page_header(f, block,
-                                               offset | RAM_SAVE_FLAG_COMPRESS);
-        qemu_put_byte(f, 0);
-        *bytes_transferred += 1;
+        rs->zero_pages++;
+        rs->bytes_transferred +=
+            save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
+        qemu_put_byte(rs->f, 0);
+        rs->bytes_transferred += 1;
         pages = 1;
     }
 
     return pages;
 }
 
-static void ram_release_pages(MigrationState *ms, const char *rbname,
-                              uint64_t offset, int pages)
+static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
 {
-    if (!migrate_release_ram() || !migration_in_postcopy(ms)) {
+    if (!migrate_release_ram() || !migration_in_postcopy()) {
         return;
     }
 
-    ram_discard_range(NULL, rbname, offset, pages << TARGET_PAGE_BITS);
+    ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
 }
 
 /**
@@ -773,16 +781,11 @@ static void ram_release_pages(MigrationState *ms, const char *rbname,
  *                if xbzrle noticed the page was the same.
  *
  * @rs: current RAM state
- * @ms: current migration state
- * @f: QEMUFile where to send the data
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
- * @bytes_transferred: increase it with the number of transferred bytes
  */
-static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
-                         PageSearchStatus *pss, bool last_stage,
-                         uint64_t *bytes_transferred)
+static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
 {
     int pages = -1;
     uint64_t bytes_xmit;
@@ -791,16 +794,17 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
     int ret;
     bool send_async = true;
     RAMBlock *block = pss->block;
-    ram_addr_t offset = pss->offset;
+    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
 
     p = block->host + offset;
+    trace_ram_save_page(block->idstr, (uint64_t)offset, p);
 
     /* In doubt sent page as normal */
     bytes_xmit = 0;
-    ret = ram_control_save_page(f, block->offset,
+    ret = ram_control_save_page(rs->f, block->offset,
                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
-        *bytes_transferred += bytes_xmit;
+        rs->bytes_transferred += bytes_xmit;
         pages = 1;
     }
 
@@ -808,29 +812,26 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
 
     current_addr = block->offset + offset;
 
-    if (block == rs->last_sent_block) {
-        offset |= RAM_SAVE_FLAG_CONTINUE;
-    }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
-                acct_info.norm_pages++;
+                rs->norm_pages++;
             } else if (bytes_xmit == 0) {
-                acct_info.dup_pages++;
+                rs->zero_pages++;
             }
         }
     } else {
-        pages = save_zero_page(f, block, offset, p, bytes_transferred);
+        pages = save_zero_page(rs, block, offset, p);
         if (pages > 0) {
             /* Must let xbzrle know, otherwise a previous (now 0'd) cached
              * page would be stale
              */
             xbzrle_cache_zero_page(rs, current_addr);
-            ram_release_pages(ms, block->idstr, pss->offset, pages);
+            ram_release_pages(block->idstr, offset, pages);
         } else if (!rs->ram_bulk_stage &&
-                   !migration_in_postcopy(ms) && migrate_use_xbzrle()) {
-            pages = save_xbzrle_page(f, &p, current_addr, block,
-                                     offset, last_stage, bytes_transferred);
+                   !migration_in_postcopy() && migrate_use_xbzrle()) {
+            pages = save_xbzrle_page(rs, &p, current_addr, block,
+                                     offset, last_stage);
             if (!last_stage) {
                 /* Can't send this cached data async, since the cache page
                  * might get updated before it gets to the wire
@@ -842,18 +843,18 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
 
     /* XBZRLE overflow or normal page */
     if (pages == -1) {
-        *bytes_transferred += save_page_header(f, block,
-                                               offset | RAM_SAVE_FLAG_PAGE);
+        rs->bytes_transferred += save_page_header(rs, rs->f, block,
+                                                  offset | RAM_SAVE_FLAG_PAGE);
         if (send_async) {
-            qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE,
+            qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
                                   migrate_release_ram() &
-                                  migration_in_postcopy(ms));
+                                  migration_in_postcopy());
         } else {
-            qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+            qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
         }
-        *bytes_transferred += TARGET_PAGE_SIZE;
+        rs->bytes_transferred += TARGET_PAGE_SIZE;
         pages = 1;
-        acct_info.norm_pages++;
+        rs->norm_pages++;
     }
 
     XBZRLE_cache_unlock();
@@ -864,10 +865,11 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
                                 ram_addr_t offset)
 {
+    RAMState *rs = &ram_state;
     int bytes_sent, blen;
     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
 
-    bytes_sent = save_page_header(f, block, offset |
+    bytes_sent = save_page_header(rs, f, block, offset |
                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
     blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
                                      migrate_compress_level());
@@ -877,16 +879,13 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
         error_report("compressed data failed!");
     } else {
         bytes_sent += blen;
-        ram_release_pages(migrate_get_current(), block->idstr,
-                          offset & TARGET_PAGE_MASK, 1);
+        ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
     }
 
     return bytes_sent;
 }
 
-static uint64_t bytes_transferred;
-
-static void flush_compressed_data(QEMUFile *f)
+static void flush_compressed_data(RAMState *rs)
 {
     int idx, len, thread_count;
 
@@ -906,8 +905,8 @@ static void flush_compressed_data(QEMUFile *f)
     for (idx = 0; idx < thread_count; idx++) {
         qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
-            len = qemu_put_qemu_file(f, comp_param[idx].file);
-            bytes_transferred += len;
+            len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
+            rs->bytes_transferred += len;
         }
         qemu_mutex_unlock(&comp_param[idx].mutex);
     }
@@ -920,9 +919,8 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block,
     param->offset = offset;
 }
 
-static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
-                                           ram_addr_t offset,
-                                           uint64_t *bytes_transferred)
+static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
+                                           ram_addr_t offset)
 {
     int idx, thread_count, bytes_xmit = -1, pages = -1;
 
@@ -932,14 +930,14 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
         for (idx = 0; idx < thread_count; idx++) {
             if (comp_param[idx].done) {
                 comp_param[idx].done = false;
-                bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
+                bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
                 qemu_mutex_lock(&comp_param[idx].mutex);
                 set_compress_params(&comp_param[idx], block, offset);
                 qemu_cond_signal(&comp_param[idx].cond);
                 qemu_mutex_unlock(&comp_param[idx].mutex);
                 pages = 1;
-                acct_info.norm_pages++;
-                *bytes_transferred += bytes_xmit;
+                rs->norm_pages++;
+                rs->bytes_transferred += bytes_xmit;
                 break;
             }
         }
@@ -960,39 +958,34 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
  * Returns the number of pages written.
  *
  * @rs: current RAM state
- * @ms: current migration state
- * @f: QEMUFile where to send the data
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
- * @bytes_transferred: increase it with the number of transferred bytes
  */
-static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
-                                    QEMUFile *f,
-                                    PageSearchStatus *pss, bool last_stage,
-                                    uint64_t *bytes_transferred)
+static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
+                                    bool last_stage)
 {
     int pages = -1;
     uint64_t bytes_xmit = 0;
     uint8_t *p;
     int ret, blen;
     RAMBlock *block = pss->block;
-    ram_addr_t offset = pss->offset;
+    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
 
     p = block->host + offset;
 
-    ret = ram_control_save_page(f, block->offset,
+    ret = ram_control_save_page(rs->f, block->offset,
                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
-        *bytes_transferred += bytes_xmit;
+        rs->bytes_transferred += bytes_xmit;
         pages = 1;
     }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
-                acct_info.norm_pages++;
+                rs->norm_pages++;
             } else if (bytes_xmit == 0) {
-                acct_info.dup_pages++;
+                rs->zero_pages++;
             }
         }
     } else {
@@ -1003,34 +996,32 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
          * is used to avoid resending the block name.
          */
         if (block != rs->last_sent_block) {
-            flush_compressed_data(f);
-            pages = save_zero_page(f, block, offset, p, bytes_transferred);
+            flush_compressed_data(rs);
+            pages = save_zero_page(rs, block, offset, p);
             if (pages == -1) {
                 /* Make sure the first page is sent out before other pages */
-                bytes_xmit = save_page_header(f, block, offset |
+                bytes_xmit = save_page_header(rs, rs->f, block, offset |
                                               RAM_SAVE_FLAG_COMPRESS_PAGE);
-                blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
+                blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
                                                  migrate_compress_level());
                 if (blen > 0) {
-                    *bytes_transferred += bytes_xmit + blen;
-                    acct_info.norm_pages++;
+                    rs->bytes_transferred += bytes_xmit + blen;
+                    rs->norm_pages++;
                     pages = 1;
                 } else {
-                    qemu_file_set_error(f, blen);
+                    qemu_file_set_error(rs->f, blen);
                     error_report("compressed data failed!");
                 }
             }
             if (pages > 0) {
-                ram_release_pages(ms, block->idstr, pss->offset, pages);
+                ram_release_pages(block->idstr, offset, pages);
             }
         } else {
-            offset |= RAM_SAVE_FLAG_CONTINUE;
-            pages = save_zero_page(f, block, offset, p, bytes_transferred);
+            pages = save_zero_page(rs, block, offset, p);
             if (pages == -1) {
-                pages = compress_page_with_multi_thread(f, block, offset,
-                                                        bytes_transferred);
+                pages = compress_page_with_multi_thread(rs, block, offset);
             } else {
-                ram_release_pages(ms, block->idstr, pss->offset, pages);
+                ram_release_pages(block->idstr, offset, pages);
             }
         }
     }
@@ -1045,19 +1036,14 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
  * Returns if a page is found
  *
  * @rs: current RAM state
- * @f: QEMUFile where to send the data
  * @pss: data about the state of the current dirty page scan
  * @again: set to false if the search has scanned the whole of RAM
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
  */
-static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
-                             bool *again, ram_addr_t *ram_addr_abs)
+static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
 {
-    pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset,
-                                              ram_addr_abs);
+    pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
     if (pss->complete_round && pss->block == rs->last_seen_block &&
-        pss->offset >= rs->last_offset) {
+        pss->page >= rs->last_page) {
         /*
          * We've been once around the RAM and haven't found anything.
          * Give up.
@@ -1065,9 +1051,9 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
         *again = false;
         return false;
     }
-    if (pss->offset >= pss->block->used_length) {
+    if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
         /* Didn't find anything in this RAM Block */
-        pss->offset = 0;
+        pss->page = 0;
         pss->block = QLIST_NEXT_RCU(pss->block, next);
         if (!pss->block) {
             /* Hit the end of the list */
@@ -1079,8 +1065,7 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
                 /* If xbzrle is on, stop using the data compression at this
                  * point. In theory, xbzrle can do better than compression.
                  */
-                flush_compressed_data(f);
-                compression_switch = false;
+                flush_compressed_data(rs);
             }
         }
         /* Didn't find anything this time, but try again on the new block */
@@ -1101,35 +1086,30 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
  *
  * Returns the block of the page (or NULL if none available)
  *
- * @ms: current migration state
+ * @rs: current RAM state
  * @offset: used to return the offset within the RAMBlock
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
  */
-static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
-                              ram_addr_t *ram_addr_abs)
+static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
 {
     RAMBlock *block = NULL;
 
-    qemu_mutex_lock(&ms->src_page_req_mutex);
-    if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
-        struct MigrationSrcPageRequest *entry =
-                                QSIMPLEQ_FIRST(&ms->src_page_requests);
+    qemu_mutex_lock(&rs->src_page_req_mutex);
+    if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
+        struct RAMSrcPageRequest *entry =
+                                QSIMPLEQ_FIRST(&rs->src_page_requests);
         block = entry->rb;
         *offset = entry->offset;
-        *ram_addr_abs = (entry->offset + entry->rb->offset) &
-                        TARGET_PAGE_MASK;
 
         if (entry->len > TARGET_PAGE_SIZE) {
             entry->len -= TARGET_PAGE_SIZE;
             entry->offset += TARGET_PAGE_SIZE;
         } else {
             memory_region_unref(block->mr);
-            QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
+            QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
             g_free(entry);
         }
     }
-    qemu_mutex_unlock(&ms->src_page_req_mutex);
+    qemu_mutex_unlock(&rs->src_page_req_mutex);
 
     return block;
 }
@@ -1142,21 +1122,16 @@ static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
  * Returns if a queued page is found
  *
  * @rs: current RAM state
- * @ms: current migration state
  * @pss: data about the state of the current dirty page scan
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
  */
-static bool get_queued_page(RAMState *rs, MigrationState *ms,
-                            PageSearchStatus *pss,
-                            ram_addr_t *ram_addr_abs)
+static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
 {
     RAMBlock  *block;
     ram_addr_t offset;
     bool dirty;
 
     do {
-        block = unqueue_page(ms, &offset, ram_addr_abs);
+        block = unqueue_page(rs, &offset);
         /*
          * We're sending this page, and since it's postcopy nothing else
          * will dirty it, and we must make sure it doesn't get sent again
@@ -1164,19 +1139,15 @@ static bool get_queued_page(RAMState *rs, MigrationState *ms,
          * search already sent it.
          */
         if (block) {
-            unsigned long *bitmap;
-            bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
-            dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
+            unsigned long page;
+
+            page = offset >> TARGET_PAGE_BITS;
+            dirty = test_bit(page, block->bmap);
             if (!dirty) {
-                trace_get_queued_page_not_dirty(
-                    block->idstr, (uint64_t)offset,
-                    (uint64_t)*ram_addr_abs,
-                    test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
-                         atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
+                trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
+                       page, test_bit(page, block->unsentmap));
             } else {
-                trace_get_queued_page(block->idstr,
-                                      (uint64_t)offset,
-                                      (uint64_t)*ram_addr_abs);
+                trace_get_queued_page(block->idstr, (uint64_t)offset, page);
             }
         }
 
@@ -1197,7 +1168,7 @@ static bool get_queued_page(RAMState *rs, MigrationState *ms,
          * it just requested.
          */
         pss->block = block;
-        pss->offset = offset;
+        pss->page = offset >> TARGET_PAGE_BITS;
     }
 
     return !!block;
@@ -1210,18 +1181,18 @@ static bool get_queued_page(RAMState *rs, MigrationState *ms,
  * It should be empty at the end anyway, but in error cases there may
  * be some left.  in case that there is any page left, we drop it.
  *
- * @ms: current migration state
  */
-void migration_page_queue_free(MigrationState *ms)
+void migration_page_queue_free(void)
 {
-    struct MigrationSrcPageRequest *mspr, *next_mspr;
+    struct RAMSrcPageRequest *mspr, *next_mspr;
+    RAMState *rs = &ram_state;
     /* This queue generally should be empty - but in the case of a failed
      * migration might have some droppings in.
      */
     rcu_read_lock();
-    QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
+    QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
         memory_region_unref(mspr->rb->mr);
-        QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
+        QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
         g_free(mspr);
     }
     rcu_read_unlock();
@@ -1234,22 +1205,21 @@ void migration_page_queue_free(MigrationState *ms)
  *
  * Returns zero on success or negative on error
  *
- * @ms: current migration state
  * @rbname: Name of the RAMBLock of the request. NULL means the
  *          same that last one.
  * @start: starting address from the start of the RAMBlock
  * @len: length (in bytes) to send
  */
-int ram_save_queue_pages(MigrationState *ms, const char *rbname,
-                         ram_addr_t start, ram_addr_t len)
+int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
 {
     RAMBlock *ramblock;
+    RAMState *rs = &ram_state;
 
-    ms->postcopy_requests++;
+    rs->postcopy_requests++;
     rcu_read_lock();
     if (!rbname) {
         /* Reuse last RAMBlock */
-        ramblock = ms->last_req_rb;
+        ramblock = rs->last_req_rb;
 
         if (!ramblock) {
             /*
@@ -1267,7 +1237,7 @@ int ram_save_queue_pages(MigrationState *ms, const char *rbname,
             error_report("ram_save_queue_pages no block '%s'", rbname);
             goto err;
         }
-        ms->last_req_rb = ramblock;
+        rs->last_req_rb = ramblock;
     }
     trace_ram_save_queue_pages(ramblock->idstr, start, len);
     if (start+len > ramblock->used_length) {
@@ -1277,16 +1247,16 @@ int ram_save_queue_pages(MigrationState *ms, const char *rbname,
         goto err;
     }
 
-    struct MigrationSrcPageRequest *new_entry =
-        g_malloc0(sizeof(struct MigrationSrcPageRequest));
+    struct RAMSrcPageRequest *new_entry =
+        g_malloc0(sizeof(struct RAMSrcPageRequest));
     new_entry->rb = ramblock;
     new_entry->offset = start;
     new_entry->len = len;
 
     memory_region_ref(ramblock->mr);
-    qemu_mutex_lock(&ms->src_page_req_mutex);
-    QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
-    qemu_mutex_unlock(&ms->src_page_req_mutex);
+    qemu_mutex_lock(&rs->src_page_req_mutex);
+    QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
+    qemu_mutex_unlock(&rs->src_page_req_mutex);
     rcu_read_unlock();
 
     return 0;
@@ -1303,45 +1273,33 @@ err:
  *
  * @rs: current RAM state
  * @ms: current migration state
- * @f: QEMUFile where to send the data
  * @pss: data about the page we want to send
  * @last_stage: if we are at the completion stage
- * @bytes_transferred: increase it with the number of transferred bytes
- * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space
  */
-static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
-                                PageSearchStatus *pss,
-                                bool last_stage,
-                                uint64_t *bytes_transferred,
-                                ram_addr_t dirty_ram_abs)
+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
+                                bool last_stage)
 {
     int res = 0;
 
     /* Check the pages is dirty and if it is send it */
-    if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
-        unsigned long *unsentmap;
-        if (compression_switch && migrate_use_compression()) {
-            res = ram_save_compressed_page(rs, ms, f, pss,
-                                           last_stage,
-                                           bytes_transferred);
+    if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
+        /*
+         * If xbzrle is on, stop using the data compression after first
+         * round of migration even if compression is enabled. In theory,
+         * xbzrle can do better than compression.
+         */
+        if (migrate_use_compression() &&
+            (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
+            res = ram_save_compressed_page(rs, pss, last_stage);
         } else {
-            res = ram_save_page(rs, ms, f, pss, last_stage,
-                                bytes_transferred);
+            res = ram_save_page(rs, pss, last_stage);
         }
 
         if (res < 0) {
             return res;
         }
-        unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
-        if (unsentmap) {
-            clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
-        }
-        /* Only update last_sent_block if a block was actually sent; xbzrle
-         * might have decided the page was identical so didn't bother writing
-         * to the stream.
-         */
-        if (res > 0) {
-            rs->last_sent_block = pss->block;
+        if (pss->block->unsentmap) {
+            clear_bit(pss->page, pss->block->unsentmap);
         }
     }
 
@@ -1356,40 +1314,36 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
  * a host page in which case the remainder of the hostpage is sent.
  * Only dirty target pages are sent. Note that the host page size may
  * be a huge page for this block.
+ * The saving stops at the boundary of the used_length of the block
+ * if the RAMBlock isn't a multiple of the host page size.
  *
  * Returns the number of pages written or negative on error
  *
  * @rs: current RAM state
  * @ms: current migration state
- * @f: QEMUFile where to send the data
  * @pss: data about the page we want to send
  * @last_stage: if we are at the completion stage
- * @bytes_transferred: increase it with the number of transferred bytes
- * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
  */
-static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
-                              PageSearchStatus *pss,
-                              bool last_stage,
-                              uint64_t *bytes_transferred,
-                              ram_addr_t dirty_ram_abs)
+static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
+                              bool last_stage)
 {
     int tmppages, pages = 0;
-    size_t pagesize = qemu_ram_pagesize(pss->block);
+    size_t pagesize_bits =
+        qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
 
     do {
-        tmppages = ram_save_target_page(rs, ms, f, pss, last_stage,
-                                        bytes_transferred, dirty_ram_abs);
+        tmppages = ram_save_target_page(rs, pss, last_stage);
         if (tmppages < 0) {
             return tmppages;
         }
 
         pages += tmppages;
-        pss->offset += TARGET_PAGE_SIZE;
-        dirty_ram_abs += TARGET_PAGE_SIZE;
-    } while (pss->offset & (pagesize - 1));
+        pss->page++;
+    } while ((pss->page & (pagesize_bits - 1)) &&
+             offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
 
     /* The offset we leave with is the last one we looked at */
-    pss->offset -= TARGET_PAGE_SIZE;
+    pss->page--;
     return pages;
 }
 
@@ -1401,23 +1355,17 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
  * Returns the number of pages written where zero means no dirty pages
  *
  * @rs: current RAM state
- * @f: QEMUFile where to send the data
  * @last_stage: if we are at the completion stage
- * @bytes_transferred: increase it with the number of transferred bytes
  *
  * On systems where host-page-size > target-page-size it will send all the
  * pages in a host page that are dirty.
  */
 
-static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
-                                   uint64_t *bytes_transferred)
+static int ram_find_and_save_block(RAMState *rs, bool last_stage)
 {
     PageSearchStatus pss;
-    MigrationState *ms = migrate_get_current();
     int pages = 0;
     bool again, found;
-    ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
-                                 ram_addr_t space */
 
     /* No dirty page as there is zero RAM */
     if (!ram_bytes_total()) {
@@ -1425,7 +1373,7 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
     }
 
     pss.block = rs->last_seen_block;
-    pss.offset = rs->last_offset;
+    pss.page = rs->last_page;
     pss.complete_round = false;
 
     if (!pss.block) {
@@ -1434,22 +1382,20 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
 
     do {
         again = true;
-        found = get_queued_page(rs, ms, &pss, &dirty_ram_abs);
+        found = get_queued_page(rs, &pss);
 
         if (!found) {
             /* priority queue empty, so just search for something dirty */
-            found = find_dirty_block(rs, f, &pss, &again, &dirty_ram_abs);
+            found = find_dirty_block(rs, &pss, &again);
         }
 
         if (found) {
-            pages = ram_save_host_page(rs, ms, f, &pss,
-                                       last_stage, bytes_transferred,
-                                       dirty_ram_abs);
+            pages = ram_save_host_page(rs, &pss, last_stage);
         }
     } while (!pages && again);
 
     rs->last_seen_block = pss.block;
-    rs->last_offset = pss.offset;
+    rs->last_page = pss.page;
 
     return pages;
 }
@@ -1457,38 +1403,26 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
 void acct_update_position(QEMUFile *f, size_t size, bool zero)
 {
     uint64_t pages = size / TARGET_PAGE_SIZE;
+    RAMState *rs = &ram_state;
+
     if (zero) {
-        acct_info.dup_pages += pages;
+        rs->zero_pages += pages;
     } else {
-        acct_info.norm_pages += pages;
-        bytes_transferred += size;
+        rs->norm_pages += pages;
+        rs->bytes_transferred += size;
         qemu_update_position(f, size);
     }
 }
 
-static ram_addr_t ram_save_remaining(void)
-{
-    return migration_dirty_pages;
-}
-
-uint64_t ram_bytes_remaining(void)
-{
-    return ram_save_remaining() * TARGET_PAGE_SIZE;
-}
-
-uint64_t ram_bytes_transferred(void)
-{
-    return bytes_transferred;
-}
-
 uint64_t ram_bytes_total(void)
 {
     RAMBlock *block;
     uint64_t total = 0;
 
     rcu_read_lock();
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
+    RAMBLOCK_FOREACH(block) {
         total += block->used_length;
+    }
     rcu_read_unlock();
     return total;
 }
@@ -1499,23 +1433,20 @@ void free_xbzrle_decoded_buf(void)
     xbzrle_decoded_buf = NULL;
 }
 
-static void migration_bitmap_free(struct BitmapRcu *bmap)
-{
-    g_free(bmap->bmap);
-    g_free(bmap->unsentmap);
-    g_free(bmap);
-}
-
 static void ram_migration_cleanup(void *opaque)
 {
+    RAMBlock *block;
+
     /* caller have hold iothread lock or is in a bh, so there is
      * no writing race against this migration_bitmap
      */
-    struct BitmapRcu *bitmap = migration_bitmap_rcu;
-    atomic_rcu_set(&migration_bitmap_rcu, NULL);
-    if (bitmap) {
-        memory_global_dirty_log_stop();
-        call_rcu(bitmap, migration_bitmap_free, rcu);
+    memory_global_dirty_log_stop();
+
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        g_free(block->bmap);
+        block->bmap = NULL;
+        g_free(block->unsentmap);
+        block->unsentmap = NULL;
     }
 
     XBZRLE_cache_lock();
@@ -1535,71 +1466,34 @@ static void ram_state_reset(RAMState *rs)
 {
     rs->last_seen_block = NULL;
     rs->last_sent_block = NULL;
-    rs->last_offset = 0;
+    rs->last_page = 0;
     rs->last_version = ram_list.version;
     rs->ram_bulk_stage = true;
 }
 
 #define MAX_WAIT 50 /* ms, half buffered_file limit */
 
-void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
-{
-    /* called in qemu main thread, so there is
-     * no writing race against this migration_bitmap
-     */
-    if (migration_bitmap_rcu) {
-        struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
-        bitmap = g_new(struct BitmapRcu, 1);
-        bitmap->bmap = bitmap_new(new);
-
-        /* prevent migration_bitmap content from being set bit
-         * by migration_bitmap_sync_range() at the same time.
-         * it is safe to migration if migration_bitmap is cleared bit
-         * at the same time.
-         */
-        qemu_mutex_lock(&migration_bitmap_mutex);
-        bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
-        bitmap_set(bitmap->bmap, old, new - old);
-
-        /* We don't have a way to safely extend the sentmap
-         * with RCU; so mark it as missing, entry to postcopy
-         * will fail.
-         */
-        bitmap->unsentmap = NULL;
-
-        atomic_rcu_set(&migration_bitmap_rcu, bitmap);
-        qemu_mutex_unlock(&migration_bitmap_mutex);
-        migration_dirty_pages += new - old;
-        call_rcu(old_bitmap, migration_bitmap_free, rcu);
-    }
-}
-
 /*
  * 'expected' is the value you expect the bitmap mostly to be full
  * of; it won't bother printing lines that are all this value.
  * If 'todump' is null the migration bitmap is dumped.
  */
-void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
+void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
+                           unsigned long pages)
 {
-    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
-
     int64_t cur;
     int64_t linelen = 128;
     char linebuf[129];
 
-    if (!todump) {
-        todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
-    }
-
-    for (cur = 0; cur < ram_pages; cur += linelen) {
+    for (cur = 0; cur < pages; cur += linelen) {
         int64_t curb;
         bool found = false;
         /*
          * Last line; catch the case where the line length
          * is longer than remaining ram
          */
-        if (cur + linelen > ram_pages) {
-            linelen = ram_pages - cur;
+        if (cur + linelen > pages) {
+            linelen = pages - cur;
         }
         for (curb = 0; curb < linelen; curb++) {
             bool thisbit = test_bit(cur + curb, todump);
@@ -1618,16 +1512,15 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
 void ram_postcopy_migrated_memory_release(MigrationState *ms)
 {
     struct RAMBlock *block;
-    unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-        unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
-        unsigned long run_start = find_next_zero_bit(bitmap, range, first);
+    RAMBLOCK_FOREACH(block) {
+        unsigned long *bitmap = block->bmap;
+        unsigned long range = block->used_length >> TARGET_PAGE_BITS;
+        unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
 
         while (run_start < range) {
             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
-            ram_discard_range(NULL, block->idstr, run_start << TARGET_PAGE_BITS,
+            ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
                               (run_end - run_start) << TARGET_PAGE_BITS);
             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
         }
@@ -1650,15 +1543,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
  */
 static int postcopy_send_discard_bm_ram(MigrationState *ms,
                                         PostcopyDiscardState *pds,
-                                        unsigned long start,
-                                        unsigned long length)
+                                        RAMBlock *block)
 {
-    unsigned long end = start + length; /* one after the end */
+    unsigned long end = block->used_length >> TARGET_PAGE_BITS;
     unsigned long current;
-    unsigned long *unsentmap;
+    unsigned long *unsentmap = block->unsentmap;
 
-    unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
-    for (current = start; current < end; ) {
+    for (current = 0; current < end; ) {
         unsigned long one = find_next_bit(unsentmap, end, current);
 
         if (one <= end) {
@@ -1700,19 +1591,16 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
     struct RAMBlock *block;
     int ret;
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-        PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
-                                                               first,
-                                                               block->idstr);
+    RAMBLOCK_FOREACH(block) {
+        PostcopyDiscardState *pds =
+            postcopy_discard_send_init(ms, block->idstr);
 
         /*
          * Postcopy sends chunks of bitmap over the wire, but it
          * just needs indexes at this point, avoids it having
          * target page specific code.
          */
-        ret = postcopy_send_discard_bm_ram(ms, pds, first,
-                                    block->used_length >> TARGET_PAGE_BITS);
+        ret = postcopy_send_discard_bm_ram(ms, pds, block);
         postcopy_discard_send_finish(ms, pds);
         if (ret) {
             return ret;
@@ -1742,12 +1630,11 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
                                           RAMBlock *block,
                                           PostcopyDiscardState *pds)
 {
-    unsigned long *bitmap;
-    unsigned long *unsentmap;
+    RAMState *rs = &ram_state;
+    unsigned long *bitmap = block->bmap;
+    unsigned long *unsentmap = block->unsentmap;
     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
-    unsigned long first = block->offset >> TARGET_PAGE_BITS;
-    unsigned long len = block->used_length >> TARGET_PAGE_BITS;
-    unsigned long last = first + (len - 1);
+    unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
     unsigned long run_start;
 
     if (block->page_size == TARGET_PAGE_SIZE) {
@@ -1755,18 +1642,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
         return;
     }
 
-    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
-    unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
-
     if (unsent_pass) {
         /* Find a sent page */
-        run_start = find_next_zero_bit(unsentmap, last + 1, first);
+        run_start = find_next_zero_bit(unsentmap, pages, 0);
     } else {
         /* Find a dirty page */
-        run_start = find_next_bit(bitmap, last + 1, first);
+        run_start = find_next_bit(bitmap, pages, 0);
     }
 
-    while (run_start <= last) {
+    while (run_start < pages) {
         bool do_fixup = false;
         unsigned long fixup_start_addr;
         unsigned long host_offset;
@@ -1786,9 +1670,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
             /* Find the end of this run */
             unsigned long run_end;
             if (unsent_pass) {
-                run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
+                run_end = find_next_bit(unsentmap, pages, run_start + 1);
             } else {
-                run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
+                run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
             }
             /*
              * If the end isn't at the start of a host page, then the
@@ -1839,17 +1723,16 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
                  * Remark them as dirty, updating the count for any pages
                  * that weren't previously dirty.
                  */
-                migration_dirty_pages += !test_and_set_bit(page, bitmap);
+                rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
             }
         }
 
         if (unsent_pass) {
             /* Find the next sent page for the next iteration */
-            run_start = find_next_zero_bit(unsentmap, last + 1,
-                                           run_start);
+            run_start = find_next_zero_bit(unsentmap, pages, run_start);
         } else {
             /* Find the next dirty page for the next iteration */
-            run_start = find_next_bit(bitmap, last + 1, run_start);
+            run_start = find_next_bit(bitmap, pages, run_start);
         }
     }
 }
@@ -1866,34 +1749,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
  * Returns zero on success
  *
  * @ms: current migration state
+ * @block: block we want to work with
  */
-static int postcopy_chunk_hostpages(MigrationState *ms)
+static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
 {
-    RAMState *rs = &ram_state;
-    struct RAMBlock *block;
-
-    /* Easiest way to make sure we don't resume in the middle of a host-page */
-    rs->last_seen_block = NULL;
-    rs->last_sent_block = NULL;
-    rs->last_offset     = 0;
-
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
+    PostcopyDiscardState *pds =
+        postcopy_discard_send_init(ms, block->idstr);
 
-        PostcopyDiscardState *pds =
-                         postcopy_discard_send_init(ms, first, block->idstr);
-
-        /* First pass: Discard all partially sent host pages */
-        postcopy_chunk_hostpages_pass(ms, true, block, pds);
-        /*
-         * Second pass: Ensure that all partially dirty host pages are made
-         * fully dirty.
-         */
-        postcopy_chunk_hostpages_pass(ms, false, block, pds);
-
-        postcopy_discard_send_finish(ms, pds);
-    } /* ram_list loop */
+    /* First pass: Discard all partially sent host pages */
+    postcopy_chunk_hostpages_pass(ms, true, block, pds);
+    /*
+     * Second pass: Ensure that all partially dirty host pages are made
+     * fully dirty.
+     */
+    postcopy_chunk_hostpages_pass(ms, false, block, pds);
 
+    postcopy_discard_send_finish(ms, pds);
     return 0;
 }
 
@@ -1914,44 +1785,50 @@ static int postcopy_chunk_hostpages(MigrationState *ms)
  */
 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
 {
+    RAMState *rs = &ram_state;
+    RAMBlock *block;
     int ret;
-    unsigned long *bitmap, *unsentmap;
 
     rcu_read_lock();
 
     /* This should be our last sync, the src is now paused */
-    migration_bitmap_sync();
+    migration_bitmap_sync(rs);
 
-    unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
-    if (!unsentmap) {
-        /* We don't have a safe way to resize the sentmap, so
-         * if the bitmap was resized it will be NULL at this
-         * point.
-         */
-        error_report("migration ram resized during precopy phase");
-        rcu_read_unlock();
-        return -EINVAL;
-    }
-
-    /* Deal with TPS != HPS and huge pages */
-    ret = postcopy_chunk_hostpages(ms);
-    if (ret) {
-        rcu_read_unlock();
-        return ret;
-    }
-
-    /*
-     * Update the unsentmap to be unsentmap = unsentmap | dirty
-     */
-    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
-    bitmap_or(unsentmap, unsentmap, bitmap,
-               last_ram_offset() >> TARGET_PAGE_BITS);
+    /* Easiest way to make sure we don't resume in the middle of a host-page */
+    rs->last_seen_block = NULL;
+    rs->last_sent_block = NULL;
+    rs->last_page = 0;
 
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
+        unsigned long *bitmap = block->bmap;
+        unsigned long *unsentmap = block->unsentmap;
+
+        if (!unsentmap) {
+            /* We don't have a safe way to resize the sentmap, so
+             * if the bitmap was resized it will be NULL at this
+             * point.
+             */
+            error_report("migration ram resized during precopy phase");
+            rcu_read_unlock();
+            return -EINVAL;
+        }
+        /* Deal with TPS != HPS and huge pages */
+        ret = postcopy_chunk_hostpages(ms, block);
+        if (ret) {
+            rcu_read_unlock();
+            return ret;
+        }
 
-    trace_ram_postcopy_send_discard_bitmap();
+        /*
+         * Update the unsentmap to be unsentmap = unsentmap | dirty
+         */
+        bitmap_or(unsentmap, unsentmap, bitmap, pages);
 #ifdef DEBUG_POSTCOPY
-    ram_debug_dump_bitmap(unsentmap, true);
+        ram_debug_dump_bitmap(unsentmap, true, pages);
 #endif
+    }
+    trace_ram_postcopy_send_discard_bitmap();
 
     ret = postcopy_each_ram_send_discard(ms);
     rcu_read_unlock();
@@ -1964,15 +1841,12 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
  *
  * Returns zero on success
  *
- * @mis: current migration incoming state
  * @rbname: name of the RAMBlock of the request. NULL means the
  *          same that last one.
  * @start: RAMBlock starting page
  * @length: RAMBlock size
  */
-int ram_discard_range(MigrationIncomingState *mis,
-                      const char *rbname,
-                      uint64_t start, size_t length)
+int ram_discard_range(const char *rbname, uint64_t start, size_t length)
 {
     int ret = -1;
 
@@ -1994,14 +1868,12 @@ err:
     return ret;
 }
 
-static int ram_save_init_globals(RAMState *rs)
+static int ram_state_init(RAMState *rs)
 {
-    int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
-
-    dirty_rate_high_cnt = 0;
-    bitmap_sync_count = 0;
-    migration_bitmap_sync_init();
-    qemu_mutex_init(&migration_bitmap_mutex);
+    memset(rs, 0, sizeof(*rs));
+    qemu_mutex_init(&rs->bitmap_mutex);
+    qemu_mutex_init(&rs->src_page_req_mutex);
+    QSIMPLEQ_INIT(&rs->src_page_requests);
 
     if (migrate_use_xbzrle()) {
         XBZRLE_cache_lock();
@@ -2030,8 +1902,6 @@ static int ram_save_init_globals(RAMState *rs)
             XBZRLE.encoded_buf = NULL;
             return -1;
         }
-
-        acct_clear();
     }
 
     /* For memory_global_dirty_log_start below.  */
@@ -2039,19 +1909,21 @@ static int ram_save_init_globals(RAMState *rs)
 
     qemu_mutex_lock_ramlist();
     rcu_read_lock();
-    bytes_transferred = 0;
     ram_state_reset(rs);
 
-    migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
     /* Skip setting bitmap if there is no RAM */
     if (ram_bytes_total()) {
-        ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
-        migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
-        bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
+        RAMBlock *block;
+
+        QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
 
-        if (migrate_postcopy_ram()) {
-            migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
-            bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
+            block->bmap = bitmap_new(pages);
+            bitmap_set(block->bmap, 0, pages);
+            if (migrate_postcopy_ram()) {
+                block->unsentmap = bitmap_new(pages);
+                bitmap_set(block->unsentmap, 0, pages);
+            }
         }
     }
 
@@ -2059,10 +1931,10 @@ static int ram_save_init_globals(RAMState *rs)
      * Count the total number of pages used by ram blocks not including any
      * gaps due to alignment or unplugs.
      */
-    migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
+    rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
 
     memory_global_dirty_log_start();
-    migration_bitmap_sync();
+    migration_bitmap_sync(rs);
     qemu_mutex_unlock_ramlist();
     qemu_mutex_unlock_iothread();
     rcu_read_unlock();
@@ -2092,16 +1964,17 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 
     /* migration has already setup the bitmap, reuse it. */
     if (!migration_in_colo_state()) {
-        if (ram_save_init_globals(rs) < 0) {
+        if (ram_state_init(rs) < 0) {
             return -1;
          }
     }
+    rs->f = f;
 
     rcu_read_lock();
 
     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+    RAMBLOCK_FOREACH(block) {
         qemu_put_byte(f, strlen(block->idstr));
         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
         qemu_put_be64(f, block->used_length);
@@ -2151,13 +2024,13 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     while ((ret = qemu_file_rate_limit(f)) == 0) {
         int pages;
 
-        pages = ram_find_and_save_block(rs, f, false, &bytes_transferred);
+        pages = ram_find_and_save_block(rs, false);
         /* no more pages to sent */
         if (pages == 0) {
             done = 1;
             break;
         }
-        acct_info.iterations++;
+        rs->iterations++;
 
         /* we want to check in the 1st loop, just in case it was the 1st time
            and we had to sync the dirty bitmap.
@@ -2173,7 +2046,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
         }
         i++;
     }
-    flush_compressed_data(f);
+    flush_compressed_data(rs);
     rcu_read_unlock();
 
     /*
@@ -2183,7 +2056,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
 
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
-    bytes_transferred += 8;
+    rs->bytes_transferred += 8;
 
     ret = qemu_file_get_error(f);
     if (ret < 0) {
@@ -2209,8 +2082,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 
     rcu_read_lock();
 
-    if (!migration_in_postcopy(migrate_get_current())) {
-        migration_bitmap_sync();
+    if (!migration_in_postcopy()) {
+        migration_bitmap_sync(rs);
     }
 
     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@@ -2221,15 +2094,14 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
     while (true) {
         int pages;
 
-        pages = ram_find_and_save_block(rs, f, !migration_in_colo_state(),
-                                        &bytes_transferred);
+        pages = ram_find_and_save_block(rs, !migration_in_colo_state());
         /* no more blocks to sent */
         if (pages == 0) {
             break;
         }
     }
 
-    flush_compressed_data(f);
+    flush_compressed_data(rs);
     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
 
     rcu_read_unlock();
@@ -2243,18 +2115,19 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
                              uint64_t *non_postcopiable_pending,
                              uint64_t *postcopiable_pending)
 {
+    RAMState *rs = opaque;
     uint64_t remaining_size;
 
-    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
+    remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
 
-    if (!migration_in_postcopy(migrate_get_current()) &&
+    if (!migration_in_postcopy() &&
         remaining_size < max_size) {
         qemu_mutex_lock_iothread();
         rcu_read_lock();
-        migration_bitmap_sync();
+        migration_bitmap_sync(rs);
         rcu_read_unlock();
         qemu_mutex_unlock_iothread();
-        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
+        remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
     }
 
     /* We can do postcopy, and all the data is postcopiable */
@@ -2505,7 +2378,7 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
  */
 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
 {
-    size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
+    unsigned long ram_pages = last_ram_page();
 
     return postcopy_ram_incoming_init(mis, ram_pages);
 }
@@ -2545,7 +2418,7 @@ static int ram_load_postcopy(QEMUFile *f)
 
         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
         place_needed = false;
-        if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
+        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
             block = ram_block_from_stream(f, flags);
 
             host = host_from_ram_block_offset(block, addr);
@@ -2592,7 +2465,7 @@ static int ram_load_postcopy(QEMUFile *f)
         last_host = host;
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
-        case RAM_SAVE_FLAG_COMPRESS:
+        case RAM_SAVE_FLAG_ZERO:
             ch = qemu_get_byte(f);
             memset(page_buffer, ch, TARGET_PAGE_SIZE);
             if (ch) {
@@ -2681,7 +2554,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
         flags = addr & ~TARGET_PAGE_MASK;
         addr &= TARGET_PAGE_MASK;
 
-        if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
+        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
             RAMBlock *block = ram_block_from_stream(f, flags);
 
@@ -2691,6 +2564,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
                 ret = -EINVAL;
                 break;
             }
+            trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
         }
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
@@ -2742,7 +2616,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
             }
             break;
 
-        case RAM_SAVE_FLAG_COMPRESS:
+        case RAM_SAVE_FLAG_ZERO:
             ch = qemu_get_byte(f);
             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
             break;
This page took 0.094802 seconds and 4 git commands to generate.