]> Git Repo - qemu.git/commitdiff
migration: Fix multi-thread compression bug
authorLiang Li <[email protected]>
Thu, 5 May 2016 07:32:51 +0000 (15:32 +0800)
committerAmit Shah <[email protected]>
Fri, 17 Jun 2016 12:54:01 +0000 (18:24 +0530)
Recently, a bug related to multiple thread compression feature for
live migration is reported. The destination side will be blocked
during live migration if there are heavy workload in host and
memory intensive workload in guest, this is most likely to happen
when there is one decompression thread.

Some parts of the decompression code are incorrect:
1. The main thread receives data from source side will enter a busy
loop to wait for a free decompression thread.
2. A lock is needed to protect the decomp_param[idx]->start, because
it is checked in the main thread and is updated in the decompression
thread.

Fix these two issues by following the code pattern for compression.

Signed-off-by: Liang Li <[email protected]>
Reported-by: Daniel P. Berrange <[email protected]>
Reviewed-by: Daniel P. Berrange <[email protected]>
Reviewed-by: Dr. David Alan Gilbert <[email protected]>
Reviewed-by: Juan Quintela <[email protected]>
Tested-by: Daniel P. Berrange <[email protected]>
Signed-off-by: Liang Li <[email protected]>
Message-Id: <1462433579[email protected]>
Signed-off-by: Amit Shah <[email protected]>
migration/ram.c

index 42fb8ac6d6b663a0d918ff0290aceae37405a48b..f3fe6c7aae3b3e2ee7abfcca15ce4209759910c4 100644 (file)
@@ -265,6 +265,7 @@ typedef struct CompressParam CompressParam;
 
 struct DecompressParam {
     bool start;
+    bool done;
     QemuMutex mutex;
     QemuCond cond;
     void *des;
@@ -289,6 +290,8 @@ static bool quit_comp_thread;
 static bool quit_decomp_thread;
 static DecompressParam *decomp_param;
 static QemuThread *decompress_threads;
+static QemuMutex decomp_done_lock;
+static QemuCond decomp_done_cond;
 
 static int do_compress_ram_page(CompressParam *param);
 
@@ -834,6 +837,7 @@ static inline void start_compression(CompressParam *param)
 
 static inline void start_decompression(DecompressParam *param)
 {
+    param->done = false;
     qemu_mutex_lock(&param->mutex);
     param->start = true;
     qemu_cond_signal(&param->cond);
@@ -2196,19 +2200,24 @@ static void *do_data_decompress(void *opaque)
         qemu_mutex_lock(&param->mutex);
         while (!param->start && !quit_decomp_thread) {
             qemu_cond_wait(&param->cond, &param->mutex);
+        }
+        if (!quit_decomp_thread) {
             pagesize = TARGET_PAGE_SIZE;
-            if (!quit_decomp_thread) {
-                /* uncompress() will return failed in some case, especially
-                 * when the page is dirted when doing the compression, it's
-                 * not a problem because the dirty page will be retransferred
-                 * and uncompress() won't break the data in other pages.
-                 */
-                uncompress((Bytef *)param->des, &pagesize,
-                           (const Bytef *)param->compbuf, param->len);
-            }
-            param->start = false;
+            /* uncompress() will return failed in some case, especially
+             * when the page is dirted when doing the compression, it's
+             * not a problem because the dirty page will be retransferred
+             * and uncompress() won't break the data in other pages.
+             */
+            uncompress((Bytef *)param->des, &pagesize,
+                       (const Bytef *)param->compbuf, param->len);
         }
+        param->start = false;
         qemu_mutex_unlock(&param->mutex);
+
+        qemu_mutex_lock(&decomp_done_lock);
+        param->done = true;
+        qemu_cond_signal(&decomp_done_cond);
+        qemu_mutex_unlock(&decomp_done_lock);
     }
 
     return NULL;
@@ -2222,10 +2231,13 @@ void migrate_decompress_threads_create(void)
     decompress_threads = g_new0(QemuThread, thread_count);
     decomp_param = g_new0(DecompressParam, thread_count);
     quit_decomp_thread = false;
+    qemu_mutex_init(&decomp_done_lock);
+    qemu_cond_init(&decomp_done_cond);
     for (i = 0; i < thread_count; i++) {
         qemu_mutex_init(&decomp_param[i].mutex);
         qemu_cond_init(&decomp_param[i].cond);
         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
+        decomp_param[i].done = true;
         qemu_thread_create(decompress_threads + i, "decompress",
                            do_data_decompress, decomp_param + i,
                            QEMU_THREAD_JOINABLE);
@@ -2261,9 +2273,10 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
     int idx, thread_count;
 
     thread_count = migrate_decompress_threads();
+    qemu_mutex_lock(&decomp_done_lock);
     while (true) {
         for (idx = 0; idx < thread_count; idx++) {
-            if (!decomp_param[idx].start) {
+            if (decomp_param[idx].done) {
                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
                 decomp_param[idx].des = host;
                 decomp_param[idx].len = len;
@@ -2273,8 +2286,11 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
         }
         if (idx < thread_count) {
             break;
+        } else {
+            qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
         }
     }
+    qemu_mutex_unlock(&decomp_done_lock);
 }
 
 /*
This page took 0.03237 seconds and 4 git commands to generate.