]> Git Repo - qemu.git/blobdiff - migration/ram.c
migration: export capabilities to props
[qemu.git] / migration / ram.c
index 1a3d9e6d875220cfbcc65de09725bb7cb5746e1b..1b08296d1bca83ca644d6c44d5a948c31d08d61f 100644 (file)
  * THE SOFTWARE.
  */
 #include "qemu/osdep.h"
-#include "qemu-common.h"
 #include "cpu.h"
 #include <zlib.h>
 #include "qapi-event.h"
 #include "qemu/cutils.h"
 #include "qemu/bitops.h"
 #include "qemu/bitmap.h"
-#include "qemu/timer.h"
 #include "qemu/main-loop.h"
 #include "xbzrle.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "migration/vmstate.h"
+#include "ram.h"
+#include "migration.h"
+#include "migration/register.h"
+#include "migration/misc.h"
+#include "qemu-file.h"
 #include "postcopy-ram.h"
-#include "exec/address-spaces.h"
 #include "migration/page_cache.h"
 #include "qemu/error-report.h"
 #include "trace.h"
 /* 0x80 is reserved in migration.h start with 0x100 next */
 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
 
-static uint8_t *ZERO_TARGET_PAGE;
-
 static inline bool is_zero_range(uint8_t *p, uint64_t size)
 {
     return buffer_is_zero(p, size);
 }
 
+XBZRLECacheStats xbzrle_counters;
+
 /* struct contains XBZRLE cache and a static page
    used by the compression */
 static struct {
@@ -84,11 +83,12 @@ static struct {
     /* Cache for XBZRLE, Protected by lock. */
     PageCache *cache;
     QemuMutex lock;
+    /* it will store a page full of zeros */
+    uint8_t *zero_target_page;
+    /* buffer used for XBZRLE decoding */
+    uint8_t *decoded_buf;
 } XBZRLE;
 
-/* buffer used for XBZRLE decoding */
-static uint8_t *xbzrle_decoded_buf;
-
 static void XBZRLE_cache_lock(void)
 {
     if (migrate_use_xbzrle())
@@ -175,8 +175,6 @@ struct RAMState {
     bool ram_bulk_stage;
     /* How many times we have dirty too many pages */
     int dirty_rate_high_cnt;
-    /* How many times we have synchronized the bitmap */
-    uint64_t bitmap_sync_count;
     /* these variables are used for bitmap sync */
     /* last time we did a full bitmap_sync */
     int64_t time_last_bitmap_sync;
@@ -188,33 +186,11 @@ struct RAMState {
     uint64_t xbzrle_cache_miss_prev;
     /* number of iterations at the beginning of period */
     uint64_t iterations_prev;
-    /* Accounting fields */
-    /* number of zero pages.  It used to be pages filled by the same char. */
-    uint64_t zero_pages;
-    /* number of normal transferred pages */
-    uint64_t norm_pages;
     /* Iterations since start */
     uint64_t iterations;
-    /* xbzrle transmitted bytes.  Notice that this is with
-     * compression, they can't be calculated from the pages */
-    uint64_t xbzrle_bytes;
-    /* xbzrle transmmited pages */
-    uint64_t xbzrle_pages;
-    /* xbzrle number of cache miss */
-    uint64_t xbzrle_cache_miss;
-    /* xbzrle miss rate */
-    double xbzrle_cache_miss_rate;
-    /* xbzrle number of overflows */
-    uint64_t xbzrle_overflows;
-    /* number of dirty bits in the bitmap */
-    uint64_t migration_dirty_pages;
-    /* total number of bytes transferred */
-    uint64_t bytes_transferred;
-    /* number of dirtied pages in the last second */
-    uint64_t dirty_pages_rate;
-    /* Count of requests incoming from destination */
-    uint64_t postcopy_requests;
     /* protects modification of the bitmap */
+    uint64_t migration_dirty_pages;
+    /* number of dirty bits in the bitmap */
     QemuMutex bitmap_mutex;
     /* The RAMBlock used in the last src_page_requests */
     RAMBlock *last_req_rb;
@@ -224,67 +200,14 @@ struct RAMState {
 };
 typedef struct RAMState RAMState;
 
-static RAMState ram_state;
-
-uint64_t dup_mig_pages_transferred(void)
-{
-    return ram_state.zero_pages;
-}
-
-uint64_t norm_mig_pages_transferred(void)
-{
-    return ram_state.norm_pages;
-}
-
-uint64_t xbzrle_mig_bytes_transferred(void)
-{
-    return ram_state.xbzrle_bytes;
-}
-
-uint64_t xbzrle_mig_pages_transferred(void)
-{
-    return ram_state.xbzrle_pages;
-}
-
-uint64_t xbzrle_mig_pages_cache_miss(void)
-{
-    return ram_state.xbzrle_cache_miss;
-}
-
-double xbzrle_mig_cache_miss_rate(void)
-{
-    return ram_state.xbzrle_cache_miss_rate;
-}
-
-uint64_t xbzrle_mig_pages_overflow(void)
-{
-    return ram_state.xbzrle_overflows;
-}
-
-uint64_t ram_bytes_transferred(void)
-{
-    return ram_state.bytes_transferred;
-}
+static RAMState *ram_state;
 
 uint64_t ram_bytes_remaining(void)
 {
-    return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
-}
-
-uint64_t ram_dirty_sync_count(void)
-{
-    return ram_state.bitmap_sync_count;
+    return ram_state->migration_dirty_pages * TARGET_PAGE_SIZE;
 }
 
-uint64_t ram_dirty_pages_rate(void)
-{
-    return ram_state.dirty_pages_rate;
-}
-
-uint64_t ram_postcopy_requests(void)
-{
-    return ram_state.postcopy_requests;
-}
+MigrationStats ram_counters;
 
 /* used by the search for pages to send */
 struct PageSearchStatus {
@@ -383,7 +306,7 @@ static inline void terminate_compression_threads(void)
     }
 }
 
-void migrate_compress_threads_join(void)
+static void compress_threads_save_cleanup(void)
 {
     int i, thread_count;
 
@@ -406,7 +329,7 @@ void migrate_compress_threads_join(void)
     comp_param = NULL;
 }
 
-void migrate_compress_threads_create(void)
+static void compress_threads_save_setup(void)
 {
     int i, thread_count;
 
@@ -510,8 +433,8 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
 
     /* We don't care if this fails to allocate a new cache page
      * as long as it updated an old one */
-    cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
-                 rs->bitmap_sync_count);
+    cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
+                 ram_counters.dirty_sync_count);
 }
 
 #define ENCODING_FLAG_XBZRLE 0x1
@@ -537,11 +460,12 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
     int encoded_len = 0, bytes_xbzrle;
     uint8_t *prev_cached_page;
 
-    if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
-        rs->xbzrle_cache_miss++;
+    if (!cache_is_cached(XBZRLE.cache, current_addr,
+                         ram_counters.dirty_sync_count)) {
+        xbzrle_counters.cache_miss++;
         if (!last_stage) {
             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
-                             rs->bitmap_sync_count) == -1) {
+                             ram_counters.dirty_sync_count) == -1) {
                 return -1;
             } else {
                 /* update *current_data when the page has been
@@ -566,7 +490,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
         return 0;
     } else if (encoded_len == -1) {
         trace_save_xbzrle_page_overflow();
-        rs->xbzrle_overflows++;
+        xbzrle_counters.overflow++;
         /* update data in the cache */
         if (!last_stage) {
             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
@@ -587,9 +511,9 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
     qemu_put_be16(rs->f, encoded_len);
     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
     bytes_xbzrle += encoded_len + 1 + 2;
-    rs->xbzrle_pages++;
-    rs->xbzrle_bytes += bytes_xbzrle;
-    rs->bytes_transferred += bytes_xbzrle;
+    xbzrle_counters.pages++;
+    xbzrle_counters.bytes += bytes_xbzrle;
+    ram_counters.transferred += bytes_xbzrle;
 
     return 1;
 }
@@ -671,7 +595,7 @@ static void migration_bitmap_sync(RAMState *rs)
     int64_t end_time;
     uint64_t bytes_xfer_now;
 
-    rs->bitmap_sync_count++;
+    ram_counters.dirty_sync_count++;
 
     if (!rs->time_last_bitmap_sync) {
         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
@@ -695,9 +619,9 @@ static void migration_bitmap_sync(RAMState *rs)
     /* more than 1 second = 1000 millisecons */
     if (end_time > rs->time_last_bitmap_sync + 1000) {
         /* calculate period counters */
-        rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
+        ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
             / (end_time - rs->time_last_bitmap_sync);
-        bytes_xfer_now = ram_bytes_transferred();
+        bytes_xfer_now = ram_counters.transferred;
 
         if (migrate_auto_converge()) {
             /* The following detection logic can be refined later. For now:
@@ -708,7 +632,7 @@ static void migration_bitmap_sync(RAMState *rs)
 
             if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
-                (rs->dirty_rate_high_cnt++ >= 2)) {
+                (++rs->dirty_rate_high_cnt >= 2)) {
                     trace_migration_throttle();
                     rs->dirty_rate_high_cnt = 0;
                     mig_throttle_guest_down();
@@ -717,13 +641,13 @@ static void migration_bitmap_sync(RAMState *rs)
 
         if (migrate_use_xbzrle()) {
             if (rs->iterations_prev != rs->iterations) {
-                rs->xbzrle_cache_miss_rate =
-                   (double)(rs->xbzrle_cache_miss -
+                xbzrle_counters.cache_miss_rate =
+                   (double)(xbzrle_counters.cache_miss -
                             rs->xbzrle_cache_miss_prev) /
                    (rs->iterations - rs->iterations_prev);
             }
             rs->iterations_prev = rs->iterations;
-            rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
+            rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
         }
 
         /* reset period counters */
@@ -732,7 +656,7 @@ static void migration_bitmap_sync(RAMState *rs)
         rs->bytes_xfer_prev = bytes_xfer_now;
     }
     if (migrate_use_events()) {
-        qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
+        qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
     }
 }
 
@@ -752,11 +676,11 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
     int pages = -1;
 
     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
-        rs->zero_pages++;
-        rs->bytes_transferred +=
+        ram_counters.duplicate++;
+        ram_counters.transferred +=
             save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
         qemu_put_byte(rs->f, 0);
-        rs->bytes_transferred += 1;
+        ram_counters.transferred += 1;
         pages = 1;
     }
 
@@ -804,7 +728,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
     ret = ram_control_save_page(rs->f, block->offset,
                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
-        rs->bytes_transferred += bytes_xmit;
+        ram_counters.transferred += bytes_xmit;
         pages = 1;
     }
 
@@ -815,9 +739,9 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
-                rs->norm_pages++;
+                ram_counters.normal++;
             } else if (bytes_xmit == 0) {
-                rs->zero_pages++;
+                ram_counters.duplicate++;
             }
         }
     } else {
@@ -843,8 +767,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
 
     /* XBZRLE overflow or normal page */
     if (pages == -1) {
-        rs->bytes_transferred += save_page_header(rs, rs->f, block,
-                                                  offset | RAM_SAVE_FLAG_PAGE);
+        ram_counters.transferred +=
+            save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_PAGE);
         if (send_async) {
             qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
                                   migrate_release_ram() &
@@ -852,9 +776,9 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
         } else {
             qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
         }
-        rs->bytes_transferred += TARGET_PAGE_SIZE;
+        ram_counters.transferred += TARGET_PAGE_SIZE;
         pages = 1;
-        rs->norm_pages++;
+        ram_counters.normal++;
     }
 
     XBZRLE_cache_unlock();
@@ -865,7 +789,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
                                 ram_addr_t offset)
 {
-    RAMState *rs = &ram_state;
+    RAMState *rs = ram_state;
     int bytes_sent, blen;
     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
 
@@ -906,7 +830,7 @@ static void flush_compressed_data(RAMState *rs)
         qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
-            rs->bytes_transferred += len;
+            ram_counters.transferred += len;
         }
         qemu_mutex_unlock(&comp_param[idx].mutex);
     }
@@ -936,8 +860,8 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
                 qemu_cond_signal(&comp_param[idx].cond);
                 qemu_mutex_unlock(&comp_param[idx].mutex);
                 pages = 1;
-                rs->norm_pages++;
-                rs->bytes_transferred += bytes_xmit;
+                ram_counters.normal++;
+                ram_counters.transferred += bytes_xmit;
                 break;
             }
         }
@@ -977,15 +901,15 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
     ret = ram_control_save_page(rs->f, block->offset,
                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
-        rs->bytes_transferred += bytes_xmit;
+        ram_counters.transferred += bytes_xmit;
         pages = 1;
     }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
-                rs->norm_pages++;
+                ram_counters.normal++;
             } else if (bytes_xmit == 0) {
-                rs->zero_pages++;
+                ram_counters.duplicate++;
             }
         }
     } else {
@@ -1005,8 +929,8 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
                 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
                                                  migrate_compress_level());
                 if (blen > 0) {
-                    rs->bytes_transferred += bytes_xmit + blen;
-                    rs->norm_pages++;
+                    ram_counters.transferred += bytes_xmit + blen;
+                    ram_counters.normal++;
                     pages = 1;
                 } else {
                     qemu_file_set_error(rs->f, blen);
@@ -1182,10 +1106,9 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
  * be some left.  in case that there is any page left, we drop it.
  *
  */
-void migration_page_queue_free(void)
+static void migration_page_queue_free(RAMState *rs)
 {
     struct RAMSrcPageRequest *mspr, *next_mspr;
-    RAMState *rs = &ram_state;
     /* This queue generally should be empty - but in the case of a failed
      * migration might have some droppings in.
      */
@@ -1213,9 +1136,9 @@ void migration_page_queue_free(void)
 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
 {
     RAMBlock *ramblock;
-    RAMState *rs = &ram_state;
+    RAMState *rs = ram_state;
 
-    rs->postcopy_requests++;
+    ram_counters.postcopy_requests++;
     rcu_read_lock();
     if (!rbname) {
         /* Reuse last RAMBlock */
@@ -1403,13 +1326,12 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
 void acct_update_position(QEMUFile *f, size_t size, bool zero)
 {
     uint64_t pages = size / TARGET_PAGE_SIZE;
-    RAMState *rs = &ram_state;
 
     if (zero) {
-        rs->zero_pages += pages;
+        ram_counters.duplicate += pages;
     } else {
-        rs->norm_pages += pages;
-        rs->bytes_transferred += size;
+        ram_counters.normal += pages;
+        ram_counters.transferred += size;
         qemu_update_position(f, size);
     }
 }
@@ -1427,14 +1349,20 @@ uint64_t ram_bytes_total(void)
     return total;
 }
 
-void free_xbzrle_decoded_buf(void)
+static void xbzrle_load_setup(void)
 {
-    g_free(xbzrle_decoded_buf);
-    xbzrle_decoded_buf = NULL;
+    XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
 }
 
-static void ram_migration_cleanup(void *opaque)
+static void xbzrle_load_cleanup(void)
 {
+    g_free(XBZRLE.decoded_buf);
+    XBZRLE.decoded_buf = NULL;
+}
+
+static void ram_save_cleanup(void *opaque)
+{
+    RAMState **rsp = opaque;
     RAMBlock *block;
 
     /* caller have hold iothread lock or is in a bh, so there is
@@ -1454,12 +1382,17 @@ static void ram_migration_cleanup(void *opaque)
         cache_fini(XBZRLE.cache);
         g_free(XBZRLE.encoded_buf);
         g_free(XBZRLE.current_buf);
-        g_free(ZERO_TARGET_PAGE);
+        g_free(XBZRLE.zero_target_page);
         XBZRLE.cache = NULL;
         XBZRLE.encoded_buf = NULL;
         XBZRLE.current_buf = NULL;
+        XBZRLE.zero_target_page = NULL;
     }
     XBZRLE_cache_unlock();
+    migration_page_queue_free(*rsp);
+    compress_threads_save_cleanup();
+    g_free(*rsp);
+    *rsp = NULL;
 }
 
 static void ram_state_reset(RAMState *rs)
@@ -1630,7 +1563,7 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
                                           RAMBlock *block,
                                           PostcopyDiscardState *pds)
 {
-    RAMState *rs = &ram_state;
+    RAMState *rs = ram_state;
     unsigned long *bitmap = block->bmap;
     unsigned long *unsentmap = block->unsentmap;
     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
@@ -1785,7 +1718,7 @@ static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
  */
 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
 {
-    RAMState *rs = &ram_state;
+    RAMState *rs = ram_state;
     RAMBlock *block;
     int ret;
 
@@ -1868,22 +1801,25 @@ err:
     return ret;
 }
 
-static int ram_state_init(RAMState *rs)
+static int ram_state_init(RAMState **rsp)
 {
-    memset(rs, 0, sizeof(*rs));
-    qemu_mutex_init(&rs->bitmap_mutex);
-    qemu_mutex_init(&rs->src_page_req_mutex);
-    QSIMPLEQ_INIT(&rs->src_page_requests);
+    *rsp = g_new0(RAMState, 1);
+
+    qemu_mutex_init(&(*rsp)->bitmap_mutex);
+    qemu_mutex_init(&(*rsp)->src_page_req_mutex);
+    QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
 
     if (migrate_use_xbzrle()) {
         XBZRLE_cache_lock();
-        ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
+        XBZRLE.zero_target_page = g_malloc0(TARGET_PAGE_SIZE);
         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
                                   TARGET_PAGE_SIZE,
                                   TARGET_PAGE_SIZE);
         if (!XBZRLE.cache) {
             XBZRLE_cache_unlock();
             error_report("Error creating cache");
+            g_free(*rsp);
+            *rsp = NULL;
             return -1;
         }
         XBZRLE_cache_unlock();
@@ -1892,6 +1828,8 @@ static int ram_state_init(RAMState *rs)
         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
         if (!XBZRLE.encoded_buf) {
             error_report("Error allocating encoded_buf");
+            g_free(*rsp);
+            *rsp = NULL;
             return -1;
         }
 
@@ -1900,6 +1838,8 @@ static int ram_state_init(RAMState *rs)
             error_report("Error allocating current_buf");
             g_free(XBZRLE.encoded_buf);
             XBZRLE.encoded_buf = NULL;
+            g_free(*rsp);
+            *rsp = NULL;
             return -1;
         }
     }
@@ -1909,7 +1849,7 @@ static int ram_state_init(RAMState *rs)
 
     qemu_mutex_lock_ramlist();
     rcu_read_lock();
-    ram_state_reset(rs);
+    ram_state_reset(*rsp);
 
     /* Skip setting bitmap if there is no RAM */
     if (ram_bytes_total()) {
@@ -1931,10 +1871,10 @@ static int ram_state_init(RAMState *rs)
      * Count the total number of pages used by ram blocks not including any
      * gaps due to alignment or unplugs.
      */
-    rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
+    (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
 
     memory_global_dirty_log_start();
-    migration_bitmap_sync(rs);
+    migration_bitmap_sync(*rsp);
     qemu_mutex_unlock_ramlist();
     qemu_mutex_unlock_iothread();
     rcu_read_unlock();
@@ -1959,16 +1899,16 @@ static int ram_state_init(RAMState *rs)
  */
 static int ram_save_setup(QEMUFile *f, void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMState **rsp = opaque;
     RAMBlock *block;
 
     /* migration has already setup the bitmap, reuse it. */
     if (!migration_in_colo_state()) {
-        if (ram_state_init(rs) < 0) {
+        if (ram_state_init(rsp) != 0) {
             return -1;
-         }
+        }
     }
-    rs->f = f;
+    (*rsp)->f = f;
 
     rcu_read_lock();
 
@@ -1984,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     }
 
     rcu_read_unlock();
+    compress_threads_save_setup();
 
     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@@ -2003,7 +1944,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
  */
 static int ram_save_iterate(QEMUFile *f, void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMState **temp = opaque;
+    RAMState *rs = *temp;
     int ret;
     int i;
     int64_t t0;
@@ -2056,7 +1998,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
 
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
-    rs->bytes_transferred += 8;
+    ram_counters.transferred += 8;
 
     ret = qemu_file_get_error(f);
     if (ret < 0) {
@@ -2078,7 +2020,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
  */
 static int ram_save_complete(QEMUFile *f, void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMState **temp = opaque;
+    RAMState *rs = *temp;
 
     rcu_read_lock();
 
@@ -2115,7 +2058,8 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
                              uint64_t *non_postcopiable_pending,
                              uint64_t *postcopiable_pending)
 {
-    RAMState *rs = opaque;
+    RAMState **temp = opaque;
+    RAMState *rs = *temp;
     uint64_t remaining_size;
 
     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
@@ -2140,11 +2084,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
     int xh_flags;
     uint8_t *loaded_data;
 
-    if (!xbzrle_decoded_buf) {
-        xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
-    }
-    loaded_data = xbzrle_decoded_buf;
-
     /* extract RLE header */
     xh_flags = qemu_get_byte(f);
     xh_len = qemu_get_be16(f);
@@ -2158,7 +2097,9 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
         error_report("Failed to load XBZRLE page - len overflow!");
         return -1;
     }
+    loaded_data = XBZRLE.decoded_buf;
     /* load data and decode */
+    /* it can change loaded_data to point to an internal buffer */
     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
 
     /* decode RLE */
@@ -2292,10 +2233,13 @@ static void wait_for_decompress_done(void)
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
-void migrate_decompress_threads_create(void)
+static void compress_threads_load_setup(void)
 {
     int i, thread_count;
 
+    if (!migrate_use_compression()) {
+        return;
+    }
     thread_count = migrate_decompress_threads();
     decompress_threads = g_new0(QemuThread, thread_count);
     decomp_param = g_new0(DecompressParam, thread_count);
@@ -2313,10 +2257,13 @@ void migrate_decompress_threads_create(void)
     }
 }
 
-void migrate_decompress_threads_join(void)
+static void compress_threads_load_cleanup(void)
 {
     int i, thread_count;
 
+    if (!migrate_use_compression()) {
+        return;
+    }
     thread_count = migrate_decompress_threads();
     for (i = 0; i < thread_count; i++) {
         qemu_mutex_lock(&decomp_param[i].mutex);
@@ -2365,6 +2312,28 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
+/**
+ * ram_load_setup: Setup RAM for migration incoming side
+ *
+ * Returns zero to indicate success and negative for error
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: RAMState pointer
+ */
+static int ram_load_setup(QEMUFile *f, void *opaque)
+{
+    xbzrle_load_setup();
+    compress_threads_load_setup();
+    return 0;
+}
+
+static int ram_load_cleanup(void *opaque)
+{
+    xbzrle_load_cleanup();
+    compress_threads_load_cleanup();
+    return 0;
+}
+
 /**
  * ram_postcopy_incoming_init: allocate postcopy data structures
  *
@@ -2517,7 +2486,7 @@ static int ram_load_postcopy(QEMUFile *f)
 
 static int ram_load(QEMUFile *f, void *opaque, int version_id)
 {
-    int flags = 0, ret = 0;
+    int flags = 0, ret = 0, invalid_flags = 0;
     static uint64_t seq_iter;
     int len = 0;
     /*
@@ -2534,6 +2503,9 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
         ret = -EINVAL;
     }
 
+    if (!migrate_use_compression()) {
+        invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
+    }
     /* This RCU critical section can be very long running.
      * When RCU reclaims in the code start to become numerous,
      * it will be necessary to reduce the granularity of this
@@ -2554,6 +2526,15 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
         flags = addr & ~TARGET_PAGE_MASK;
         addr &= TARGET_PAGE_MASK;
 
+        if (flags & invalid_flags) {
+            if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
+                error_report("Received an unexpected compressed page");
+            }
+
+            ret = -EINVAL;
+            break;
+        }
+
         if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
             RAMBlock *block = ram_block_from_stream(f, flags);
@@ -2667,13 +2648,15 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
 }
 
 static SaveVMHandlers savevm_ram_handlers = {
-    .save_live_setup = ram_save_setup,
+    .save_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
     .save_live_complete_postcopy = ram_save_complete,
     .save_live_complete_precopy = ram_save_complete,
     .save_live_pending = ram_save_pending,
     .load_state = ram_load,
-    .cleanup = ram_migration_cleanup,
+    .save_cleanup = ram_save_cleanup,
+    .load_setup = ram_load_setup,
+    .load_cleanup = ram_load_cleanup,
 };
 
 void ram_mig_init(void)
This page took 0.051115 seconds and 4 git commands to generate.