]> Git Repo - qemu.git/blame - migration/block.c
migration: Split migration/channel.c for channel operations
[qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
c163b5ca 18#include "qemu-common.h"
bfb197e0
MA
19#include "block/block.h"
20#include "qemu/error-report.h"
21#include "qemu/main-loop.h"
c163b5ca 22#include "hw/hw.h"
f348b6d1 23#include "qemu/cutils.h"
1de7afc9
PB
24#include "qemu/queue.h"
25#include "qemu/timer.h"
caf71f86
PB
26#include "migration/block.h"
27#include "migration/migration.h"
9c17d615 28#include "sysemu/blockdev.h"
c9ebaf74 29#include "sysemu/block-backend.h"
c163b5ca 30
50717e94
PB
31#define BLOCK_SIZE (1 << 20)
32#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
33
34#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
35#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 36#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 37#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca
LS
38
39#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca 40
f77dcdbc
WC
41#define MAX_INFLIGHT_IO 512
42
c163b5ca
LS
43//#define DEBUG_BLK_MIGRATION
44
45#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 46#define DPRINTF(fmt, ...) \
c163b5ca
LS
47 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
48#else
d0f2c4c6 49#define DPRINTF(fmt, ...) \
c163b5ca
LS
50 do { } while (0)
51#endif
52
a55eb92c 53typedef struct BlkMigDevState {
323920c4 54 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
55 BlockBackend *blk;
56 char *blk_name;
a55eb92c 57 int shared_base;
a55eb92c 58 int64_t total_sectors;
5e5328be 59 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 60 Error *blocker;
323920c4
PB
61
62 /* Only used by migration thread. Does not need a lock. */
63 int bulk_completed;
64 int64_t cur_sector;
65 int64_t cur_dirty;
66
ef0716df
PB
67 /* Data in the aio_bitmap is protected by block migration lock.
68 * Allocation and free happen during setup and cleanup respectively.
69 */
33656af7 70 unsigned long *aio_bitmap;
ef0716df
PB
71
72 /* Protected by block migration lock. */
323920c4 73 int64_t completed_sectors;
ef0716df
PB
74
75 /* During migration this is protected by iothread lock / AioContext.
76 * Allocation and free happen during setup and cleanup respectively.
77 */
e4654d2d 78 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
79} BlkMigDevState;
80
c163b5ca 81typedef struct BlkMigBlock {
323920c4 82 /* Only used by migration thread. */
c163b5ca
LS
83 uint8_t *buf;
84 BlkMigDevState *bmds;
85 int64_t sector;
33656af7 86 int nr_sectors;
c163b5ca
LS
87 struct iovec iov;
88 QEMUIOVector qiov;
7c84b1b8 89 BlockAIOCB *aiocb;
323920c4 90
52e850de 91 /* Protected by block migration lock. */
c163b5ca 92 int ret;
5e5328be 93 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
94} BlkMigBlock;
95
96typedef struct BlkMigState {
5e5328be 97 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 98 int64_t total_sector_sum;
323004a3 99 bool zero_blocks;
323920c4 100
52e850de 101 /* Protected by lock. */
5e5328be 102 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
103 int submitted;
104 int read_done;
323920c4
PB
105
106 /* Only used by migration thread. Does not need a lock. */
c163b5ca 107 int transferred;
01e61e2d 108 int prev_progress;
e970ec0b 109 int bulk_completed;
52e850de 110
ef0716df 111 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
52e850de 112 QemuMutex lock;
c163b5ca
LS
113} BlkMigState;
114
d11ecd3d 115static BlkMigState block_mig_state;
c163b5ca 116
52e850de
PB
117static void blk_mig_lock(void)
118{
119 qemu_mutex_lock(&block_mig_state.lock);
120}
121
122static void blk_mig_unlock(void)
123{
124 qemu_mutex_unlock(&block_mig_state.lock);
125}
126
32c835ba
PB
127/* Must run outside of the iothread lock during the bulk phase,
128 * or the VM will stall.
129 */
130
13f0b67f
JK
131static void blk_send(QEMUFile *f, BlkMigBlock * blk)
132{
133 int len;
323004a3
PL
134 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
135
136 if (block_mig_state.zero_blocks &&
137 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
138 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
139 }
13f0b67f
JK
140
141 /* sector number and flags */
142 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 143 | flags);
13f0b67f
JK
144
145 /* device name */
ebd2f9e7 146 len = strlen(blk->bmds->blk_name);
13f0b67f 147 qemu_put_byte(f, len);
ebd2f9e7 148 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 149
323004a3
PL
150 /* if a block is zero we need to flush here since the network
151 * bandwidth is now a lot higher than the storage device bandwidth.
152 * thus if we queue zero blocks we slow down the migration */
153 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
154 qemu_fflush(f);
155 return;
156 }
157
13f0b67f
JK
158 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
159}
160
25f23643
JK
161int blk_mig_active(void)
162{
163 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
164}
165
166uint64_t blk_mig_bytes_transferred(void)
167{
168 BlkMigDevState *bmds;
169 uint64_t sum = 0;
170
52e850de 171 blk_mig_lock();
25f23643
JK
172 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
173 sum += bmds->completed_sectors;
174 }
52e850de 175 blk_mig_unlock();
25f23643
JK
176 return sum << BDRV_SECTOR_BITS;
177}
178
179uint64_t blk_mig_bytes_remaining(void)
180{
181 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
182}
183
184uint64_t blk_mig_bytes_total(void)
185{
186 BlkMigDevState *bmds;
187 uint64_t sum = 0;
188
189 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
190 sum += bmds->total_sectors;
191 }
192 return sum << BDRV_SECTOR_BITS;
193}
194
52e850de
PB
195
196/* Called with migration lock held. */
197
33656af7
MT
198static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
199{
200 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
201
ebd2f9e7 202 if (sector < blk_nb_sectors(bmds->blk)) {
33656af7
MT
203 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
204 (1UL << (chunk % (sizeof(unsigned long) * 8))));
205 } else {
206 return 0;
207 }
208}
209
52e850de
PB
210/* Called with migration lock held. */
211
33656af7
MT
212static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
213 int nb_sectors, int set)
214{
215 int64_t start, end;
216 unsigned long val, idx, bit;
217
218 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
219 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
220
221 for (; start <= end; start++) {
222 idx = start / (sizeof(unsigned long) * 8);
223 bit = start % (sizeof(unsigned long) * 8);
224 val = bmds->aio_bitmap[idx];
225 if (set) {
62155e2b 226 val |= 1UL << bit;
33656af7 227 } else {
62155e2b 228 val &= ~(1UL << bit);
33656af7
MT
229 }
230 bmds->aio_bitmap[idx] = val;
231 }
232}
233
234static void alloc_aio_bitmap(BlkMigDevState *bmds)
235{
ebd2f9e7 236 BlockBackend *bb = bmds->blk;
33656af7
MT
237 int64_t bitmap_size;
238
ebd2f9e7 239 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
240 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
241
7267c094 242 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
243}
244
52e850de
PB
245/* Never hold migration lock when yielding to the main loop! */
246
c163b5ca
LS
247static void blk_mig_read_cb(void *opaque, int ret)
248{
249 BlkMigBlock *blk = opaque;
a55eb92c 250
52e850de 251 blk_mig_lock();
c163b5ca 252 blk->ret = ret;
a55eb92c 253
5e5328be 254 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 255 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 256
d11ecd3d
JK
257 block_mig_state.submitted--;
258 block_mig_state.read_done++;
259 assert(block_mig_state.submitted >= 0);
52e850de 260 blk_mig_unlock();
c163b5ca
LS
261}
262
32c835ba
PB
263/* Called with no lock taken. */
264
539de124 265static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 266{
57cce12d
JK
267 int64_t total_sectors = bmds->total_sectors;
268 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 269 BlockBackend *bb = bmds->blk;
c163b5ca 270 BlkMigBlock *blk;
13f0b67f 271 int nr_sectors;
a55eb92c 272
57cce12d 273 if (bmds->shared_base) {
32c835ba 274 qemu_mutex_lock_iothread();
ebd2f9e7 275 aio_context_acquire(blk_get_aio_context(bb));
7d66b1fb
EB
276 /* Skip unallocated sectors; intentionally treats failure as
277 * an allocated sector */
b1d10856 278 while (cur_sector < total_sectors &&
ebd2f9e7
KW
279 !bdrv_is_allocated(blk_bs(bb), cur_sector,
280 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca
LS
281 cur_sector += nr_sectors;
282 }
ebd2f9e7 283 aio_context_release(blk_get_aio_context(bb));
32c835ba 284 qemu_mutex_unlock_iothread();
c163b5ca 285 }
a55eb92c
JK
286
287 if (cur_sector >= total_sectors) {
82801d8f 288 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
289 return 1;
290 }
a55eb92c 291
82801d8f 292 bmds->completed_sectors = cur_sector;
a55eb92c 293
57cce12d
JK
294 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
295
6ea44308
JK
296 /* we are going to transfer a full block even if it is not allocated */
297 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 298
6ea44308 299 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 300 nr_sectors = total_sectors - cur_sector;
c163b5ca 301 }
a55eb92c 302
5839e53b 303 blk = g_new(BlkMigBlock, 1);
7267c094 304 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
305 blk->bmds = bmds;
306 blk->sector = cur_sector;
33656af7 307 blk->nr_sectors = nr_sectors;
a55eb92c 308
e970ec0b
LS
309 blk->iov.iov_base = blk->buf;
310 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
311 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 312
52e850de 313 blk_mig_lock();
13197e3c 314 block_mig_state.submitted++;
52e850de 315 blk_mig_unlock();
13197e3c 316
ef0716df
PB
317 /* We do not know if bs is under the main thread (and thus does
318 * not acquire the AioContext when doing AIO) or rather under
319 * dataplane. Thus acquire both the iothread mutex and the
320 * AioContext.
321 *
322 * This is ugly and will disappear when we make bdrv_* thread-safe,
323 * without the need to acquire the AioContext.
324 */
32c835ba 325 qemu_mutex_lock_iothread();
ebd2f9e7
KW
326 aio_context_acquire(blk_get_aio_context(bmds->blk));
327 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
328 0, blk_mig_read_cb, blk);
d76cac7d 329
20dca810 330 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
ebd2f9e7 331 aio_context_release(blk_get_aio_context(bmds->blk));
32c835ba 332 qemu_mutex_unlock_iothread();
a55eb92c 333
32c835ba 334 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 335 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
336}
337
32c835ba
PB
338/* Called with iothread lock taken. */
339
b8afb520 340static int set_dirty_tracking(void)
c163b5ca
LS
341{
342 BlkMigDevState *bmds;
b8afb520
FZ
343 int ret;
344
345 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
346 aio_context_acquire(blk_get_aio_context(bmds->blk));
347 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
348 BLOCK_SIZE, NULL, NULL);
349 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520
FZ
350 if (!bmds->dirty_bitmap) {
351 ret = -errno;
352 goto fail;
353 }
354 }
355 return 0;
5e5328be 356
b8afb520 357fail:
5e5328be 358 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 359 if (bmds->dirty_bitmap) {
ebd2f9e7
KW
360 aio_context_acquire(blk_get_aio_context(bmds->blk));
361 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
362 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520 363 }
e4654d2d 364 }
b8afb520 365 return ret;
e4654d2d
FZ
366}
367
ef0716df
PB
368/* Called with iothread lock taken. */
369
e4654d2d
FZ
370static void unset_dirty_tracking(void)
371{
372 BlkMigDevState *bmds;
373
374 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
375 aio_context_acquire(blk_get_aio_context(bmds->blk));
376 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
377 aio_context_release(blk_get_aio_context(bmds->blk));
c163b5ca 378 }
c163b5ca
LS
379}
380
6f5ef23a 381static int init_blk_migration(QEMUFile *f)
c163b5ca 382{
fea68bb6 383 BlockDriverState *bs;
5e5328be 384 BlkMigDevState *bmds;
792773b2 385 int64_t sectors;
88be7b4b 386 BdrvNextIterator it;
ebd2f9e7
KW
387 int i, num_bs = 0;
388 struct {
389 BlkMigDevState *bmds;
390 BlockDriverState *bs;
391 } *bmds_bs;
6f5ef23a
KW
392 Error *local_err = NULL;
393 int ret;
a55eb92c 394
fea68bb6
MA
395 block_mig_state.submitted = 0;
396 block_mig_state.read_done = 0;
397 block_mig_state.transferred = 0;
398 block_mig_state.total_sector_sum = 0;
399 block_mig_state.prev_progress = -1;
400 block_mig_state.bulk_completed = 0;
401 block_mig_state.zero_blocks = migrate_zero_blocks();
402
88be7b4b 403 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
404 num_bs++;
405 }
406 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
407
408 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
409 if (bdrv_is_read_only(bs)) {
410 continue;
411 }
412
57322b78 413 sectors = bdrv_nb_sectors(bs);
31f54f24 414 if (sectors <= 0) {
6f5ef23a 415 ret = sectors;
ebd2f9e7 416 goto out;
b66460e4
SH
417 }
418
5839e53b 419 bmds = g_new0(BlkMigDevState, 1);
6f5ef23a 420 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 421 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
422 bmds->bulk_completed = 0;
423 bmds->total_sectors = sectors;
424 bmds->completed_sectors = 0;
ce7c817c 425 bmds->shared_base = migrate_use_block_incremental();
ebd2f9e7
KW
426
427 assert(i < num_bs);
428 bmds_bs[i].bmds = bmds;
429 bmds_bs[i].bs = bs;
b66460e4
SH
430
431 block_mig_state.total_sector_sum += sectors;
432
433 if (bmds->shared_base) {
539de124 434 DPRINTF("Start migration for %s with shared base image\n",
bfb197e0 435 bdrv_get_device_name(bs));
b66460e4 436 } else {
bfb197e0 437 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
b66460e4
SH
438 }
439
440 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
441 }
ebd2f9e7
KW
442
443 /* Can only insert new BDSes now because doing so while iterating block
444 * devices may end up in a deadlock (iterating the new BDSes, too). */
445 for (i = 0; i < num_bs; i++) {
446 BlkMigDevState *bmds = bmds_bs[i].bmds;
447 BlockDriverState *bs = bmds_bs[i].bs;
448
449 if (bmds) {
6f5ef23a
KW
450 ret = blk_insert_bs(bmds->blk, bs, &local_err);
451 if (ret < 0) {
452 error_report_err(local_err);
453 goto out;
454 }
ebd2f9e7
KW
455
456 alloc_aio_bitmap(bmds);
457 error_setg(&bmds->blocker, "block device is in use by migration");
458 bdrv_op_block_all(bs, bmds->blocker);
459 }
460 }
461
6f5ef23a 462 ret = 0;
ebd2f9e7
KW
463out:
464 g_free(bmds_bs);
6f5ef23a 465 return ret;
b66460e4
SH
466}
467
32c835ba
PB
468/* Called with no lock taken. */
469
539de124 470static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 471{
82801d8f 472 int64_t completed_sector_sum = 0;
c163b5ca 473 BlkMigDevState *bmds;
01e61e2d 474 int progress;
82801d8f 475 int ret = 0;
c163b5ca 476
5e5328be 477 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 478 if (bmds->bulk_completed == 0) {
539de124 479 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
480 /* completed bulk section for this device */
481 bmds->bulk_completed = 1;
c163b5ca 482 }
82801d8f
JK
483 completed_sector_sum += bmds->completed_sectors;
484 ret = 1;
485 break;
486 } else {
487 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
488 }
489 }
a55eb92c 490
8b6b2afc
PR
491 if (block_mig_state.total_sector_sum != 0) {
492 progress = completed_sector_sum * 100 /
493 block_mig_state.total_sector_sum;
494 } else {
495 progress = 100;
496 }
01e61e2d
JK
497 if (progress != block_mig_state.prev_progress) {
498 block_mig_state.prev_progress = progress;
499 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
500 | BLK_MIG_FLAG_PROGRESS);
539de124 501 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
502 }
503
504 return ret;
c163b5ca
LS
505}
506
d76cac7d 507static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
508{
509 BlkMigDevState *bmds;
d76cac7d
LS
510
511 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
512 bmds->cur_dirty = 0;
513 }
514}
515
ef0716df 516/* Called with iothread lock and AioContext taken. */
32c835ba 517
539de124
LC
518static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
519 int is_async)
d76cac7d
LS
520{
521 BlkMigBlock *blk;
ebd2f9e7 522 BlockDriverState *bs = blk_bs(bmds->blk);
d76cac7d 523 int64_t total_sectors = bmds->total_sectors;
c163b5ca 524 int64_t sector;
d76cac7d 525 int nr_sectors;
dcd1d224 526 int ret = -EIO;
a55eb92c 527
d76cac7d 528 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 529 blk_mig_lock();
62155e2b 530 if (bmds_aio_inflight(bmds, sector)) {
52e850de 531 blk_mig_unlock();
ebd2f9e7 532 blk_drain(bmds->blk);
52e850de
PB
533 } else {
534 blk_mig_unlock();
62155e2b 535 }
ebd2f9e7 536 if (bdrv_get_dirty(bs, bmds->dirty_bitmap, sector)) {
575a58d7 537
d76cac7d
LS
538 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
539 nr_sectors = total_sectors - sector;
540 } else {
541 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
542 }
5839e53b 543 blk = g_new(BlkMigBlock, 1);
7267c094 544 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
545 blk->bmds = bmds;
546 blk->sector = sector;
33656af7 547 blk->nr_sectors = nr_sectors;
d76cac7d 548
889ae39c 549 if (is_async) {
d76cac7d
LS
550 blk->iov.iov_base = blk->buf;
551 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
552 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
553
ebd2f9e7
KW
554 blk->aiocb = blk_aio_preadv(bmds->blk,
555 sector * BDRV_SECTOR_SIZE,
556 &blk->qiov, 0, blk_mig_read_cb,
557 blk);
52e850de
PB
558
559 blk_mig_lock();
d76cac7d 560 block_mig_state.submitted++;
33656af7 561 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 562 blk_mig_unlock();
d76cac7d 563 } else {
ebd2f9e7
KW
564 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
565 nr_sectors * BDRV_SECTOR_SIZE);
dcd1d224 566 if (ret < 0) {
d76cac7d 567 goto error;
c163b5ca 568 }
d76cac7d 569 blk_send(f, blk);
a55eb92c 570
7267c094
AL
571 g_free(blk->buf);
572 g_free(blk);
a55eb92c 573 }
d76cac7d 574
20dca810 575 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
1cf6aa74
LC
576 sector += nr_sectors;
577 bmds->cur_dirty = sector;
578
d76cac7d 579 break;
c163b5ca 580 }
d76cac7d
LS
581 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
582 bmds->cur_dirty = sector;
c163b5ca 583 }
575a58d7 584
d76cac7d
LS
585 return (bmds->cur_dirty >= bmds->total_sectors);
586
889ae39c 587error:
539de124 588 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
589 g_free(blk->buf);
590 g_free(blk);
43be3a25 591 return ret;
d76cac7d
LS
592}
593
32c835ba
PB
594/* Called with iothread lock taken.
595 *
596 * return value:
ceb2bd09
JQ
597 * 0: too much data for max_downtime
598 * 1: few enough data for max_downtime
599*/
539de124 600static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
601{
602 BlkMigDevState *bmds;
ceb2bd09 603 int ret = 1;
d76cac7d
LS
604
605 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 606 aio_context_acquire(blk_get_aio_context(bmds->blk));
ceb2bd09 607 ret = mig_save_device_dirty(f, bmds, is_async);
ebd2f9e7 608 aio_context_release(blk_get_aio_context(bmds->blk));
43be3a25 609 if (ret <= 0) {
d76cac7d
LS
610 break;
611 }
612 }
613
614 return ret;
c163b5ca
LS
615}
616
32c835ba
PB
617/* Called with no locks taken. */
618
59feec42 619static int flush_blks(QEMUFile *f)
c163b5ca 620{
5e5328be 621 BlkMigBlock *blk;
59feec42 622 int ret = 0;
a55eb92c 623
d0f2c4c6 624 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
625 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
626 block_mig_state.transferred);
a55eb92c 627
52e850de 628 blk_mig_lock();
5e5328be
JK
629 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
630 if (qemu_file_rate_limit(f)) {
631 break;
632 }
4b640365 633 if (blk->ret < 0) {
59feec42 634 ret = blk->ret;
4b640365
JK
635 break;
636 }
a55eb92c 637
5e5328be 638 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 639 blk_mig_unlock();
13197e3c 640 blk_send(f, blk);
52e850de 641 blk_mig_lock();
13197e3c 642
7267c094
AL
643 g_free(blk->buf);
644 g_free(blk);
a55eb92c 645
d11ecd3d
JK
646 block_mig_state.read_done--;
647 block_mig_state.transferred++;
648 assert(block_mig_state.read_done >= 0);
c163b5ca 649 }
52e850de 650 blk_mig_unlock();
c163b5ca 651
d0f2c4c6 652 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
653 block_mig_state.submitted, block_mig_state.read_done,
654 block_mig_state.transferred);
59feec42 655 return ret;
c163b5ca
LS
656}
657
32c835ba
PB
658/* Called with iothread lock taken. */
659
889ae39c
LS
660static int64_t get_remaining_dirty(void)
661{
662 BlkMigDevState *bmds;
663 int64_t dirty = 0;
664
665 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 666 aio_context_acquire(blk_get_aio_context(bmds->blk));
20dca810 667 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
ebd2f9e7 668 aio_context_release(blk_get_aio_context(bmds->blk));
889ae39c
LS
669 }
670
acc906c6 671 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
672}
673
32c835ba
PB
674/* Called with iothread lock taken. */
675
6ad2a215 676static void block_migration_cleanup(void *opaque)
4ec7fcc7 677{
82801d8f
JK
678 BlkMigDevState *bmds;
679 BlkMigBlock *blk;
ef0716df 680 AioContext *ctx;
4ec7fcc7 681
946d58be
KW
682 bdrv_drain_all();
683
e4654d2d 684 unset_dirty_tracking();
8f794c55 685
82801d8f
JK
686 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
687 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
ebd2f9e7 688 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
3718d8ab 689 error_free(bmds->blocker);
ef0716df 690
ebd2f9e7
KW
691 /* Save ctx, because bmds->blk can disappear during blk_unref. */
692 ctx = blk_get_aio_context(bmds->blk);
ef0716df 693 aio_context_acquire(ctx);
ebd2f9e7 694 blk_unref(bmds->blk);
ef0716df
PB
695 aio_context_release(ctx);
696
ebd2f9e7 697 g_free(bmds->blk_name);
7267c094
AL
698 g_free(bmds->aio_bitmap);
699 g_free(bmds);
4ec7fcc7
JK
700 }
701
ef0716df 702 blk_mig_lock();
82801d8f
JK
703 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
704 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
705 g_free(blk->buf);
706 g_free(blk);
4ec7fcc7 707 }
52e850de 708 blk_mig_unlock();
4ec7fcc7
JK
709}
710
d1315aac 711static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 712{
2975725f
JQ
713 int ret;
714
d1315aac
JQ
715 DPRINTF("Enter save live setup submitted %d transferred %d\n",
716 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 717
9b095037 718 qemu_mutex_lock_iothread();
6f5ef23a
KW
719 ret = init_blk_migration(f);
720 if (ret < 0) {
721 qemu_mutex_unlock_iothread();
722 return ret;
723 }
d1315aac
JQ
724
725 /* start track dirty blocks */
b8afb520
FZ
726 ret = set_dirty_tracking();
727
ef0716df
PB
728 qemu_mutex_unlock_iothread();
729
b8afb520 730 if (ret) {
b8afb520
FZ
731 return ret;
732 }
733
59feec42 734 ret = flush_blks(f);
d1315aac 735 blk_mig_reset_dirty_cursor();
d1315aac
JQ
736 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
737
d418cf57 738 return ret;
d1315aac
JQ
739}
740
16310a3c 741static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
742{
743 int ret;
6aaa9dae 744 int64_t last_ftell = qemu_ftell(f);
ebd9fbd7 745 int64_t delta_ftell;
d1315aac 746
16310a3c
JQ
747 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
748 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 749
59feec42 750 ret = flush_blks(f);
2975725f 751 if (ret) {
2975725f 752 return ret;
4b640365
JK
753 }
754
d76cac7d
LS
755 blk_mig_reset_dirty_cursor();
756
16310a3c 757 /* control the rate of transfer */
52e850de 758 blk_mig_lock();
16310a3c
JQ
759 while ((block_mig_state.submitted +
760 block_mig_state.read_done) * BLOCK_SIZE <
f77dcdbc
WC
761 qemu_file_get_rate_limit(f) &&
762 (block_mig_state.submitted +
763 block_mig_state.read_done) <
764 MAX_INFLIGHT_IO) {
52e850de 765 blk_mig_unlock();
16310a3c
JQ
766 if (block_mig_state.bulk_completed == 0) {
767 /* first finish the bulk phase */
768 if (blk_mig_save_bulked_block(f) == 0) {
769 /* finished saving bulk on all devices */
770 block_mig_state.bulk_completed = 1;
771 }
13197e3c 772 ret = 0;
16310a3c 773 } else {
32c835ba
PB
774 /* Always called with iothread lock taken for
775 * simplicity, block_save_complete also calls it.
776 */
777 qemu_mutex_lock_iothread();
43be3a25 778 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 779 qemu_mutex_unlock_iothread();
13197e3c
PB
780 }
781 if (ret < 0) {
782 return ret;
783 }
52e850de 784 blk_mig_lock();
13197e3c
PB
785 if (ret != 0) {
786 /* no more dirty blocks */
787 break;
a55eb92c 788 }
16310a3c 789 }
52e850de 790 blk_mig_unlock();
a55eb92c 791
59feec42 792 ret = flush_blks(f);
16310a3c 793 if (ret) {
16310a3c 794 return ret;
4b640365
JK
795 }
796
16310a3c 797 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
ebd9fbd7
GH
798 delta_ftell = qemu_ftell(f) - last_ftell;
799 if (delta_ftell > 0) {
800 return 1;
801 } else if (delta_ftell < 0) {
802 return -1;
803 } else {
804 return 0;
805 }
16310a3c
JQ
806}
807
32c835ba
PB
808/* Called with iothread lock taken. */
809
16310a3c
JQ
810static int block_save_complete(QEMUFile *f, void *opaque)
811{
812 int ret;
813
814 DPRINTF("Enter save live complete submitted %d transferred %d\n",
815 block_mig_state.submitted, block_mig_state.transferred);
816
59feec42 817 ret = flush_blks(f);
16310a3c 818 if (ret) {
16310a3c
JQ
819 return ret;
820 }
a55eb92c 821
16310a3c 822 blk_mig_reset_dirty_cursor();
01e61e2d 823
16310a3c
JQ
824 /* we know for sure that save bulk is completed and
825 all async read completed */
52e850de 826 blk_mig_lock();
16310a3c 827 assert(block_mig_state.submitted == 0);
52e850de 828 blk_mig_unlock();
16310a3c 829
43be3a25
JQ
830 do {
831 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
832 if (ret < 0) {
833 return ret;
834 }
43be3a25 835 } while (ret == 0);
4b640365 836
43be3a25
JQ
837 /* report completion */
838 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 839
16310a3c
JQ
840 DPRINTF("Block migration completed\n");
841
a55eb92c
JK
842 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
843
16310a3c 844 return 0;
c163b5ca
LS
845}
846
c31b098f
DDAG
847static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
848 uint64_t *non_postcopiable_pending,
849 uint64_t *postcopiable_pending)
e4ed1541 850{
6aaa9dae 851 /* Estimate pending number of bytes to send */
13197e3c
PB
852 uint64_t pending;
853
32c835ba 854 qemu_mutex_lock_iothread();
ef0716df
PB
855 pending = get_remaining_dirty();
856 qemu_mutex_unlock_iothread();
857
52e850de 858 blk_mig_lock();
ef0716df
PB
859 pending += block_mig_state.submitted * BLOCK_SIZE +
860 block_mig_state.read_done * BLOCK_SIZE;
861 blk_mig_unlock();
6aaa9dae
SH
862
863 /* Report at least one block pending during bulk phase */
04636dc4
VSO
864 if (pending <= max_size && !block_mig_state.bulk_completed) {
865 pending = max_size + BLOCK_SIZE;
6aaa9dae 866 }
e4ed1541 867
6aaa9dae 868 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
c31b098f
DDAG
869 /* We don't do postcopy */
870 *non_postcopiable_pending += pending;
e4ed1541
JQ
871}
872
c163b5ca
LS
873static int block_load(QEMUFile *f, void *opaque, int version_id)
874{
01e61e2d 875 static int banner_printed;
c163b5ca
LS
876 int len, flags;
877 char device_name[256];
878 int64_t addr;
ad2964b4 879 BlockBackend *blk, *blk_prev = NULL;;
9bd9c7f5 880 Error *local_err = NULL;
c163b5ca 881 uint8_t *buf;
77358b59
PR
882 int64_t total_sectors = 0;
883 int nr_sectors;
42802d47 884 int ret;
3928d50b
LC
885 BlockDriverInfo bdi;
886 int cluster_size = BLOCK_SIZE;
a55eb92c 887
c163b5ca 888 do {
c163b5ca 889 addr = qemu_get_be64(f);
a55eb92c 890
6ea44308
JK
891 flags = addr & ~BDRV_SECTOR_MASK;
892 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
893
894 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
895 /* get device name */
896 len = qemu_get_byte(f);
c163b5ca
LS
897 qemu_get_buffer(f, (uint8_t *)device_name, len);
898 device_name[len] = '\0';
a55eb92c 899
c9ebaf74
FZ
900 blk = blk_by_name(device_name);
901 if (!blk) {
4b640365
JK
902 fprintf(stderr, "Error unknown block device %s\n",
903 device_name);
904 return -EINVAL;
905 }
a55eb92c 906
ad2964b4
KW
907 if (blk != blk_prev) {
908 blk_prev = blk;
909 total_sectors = blk_nb_sectors(blk);
77358b59 910 if (total_sectors <= 0) {
6daf194d 911 error_report("Error getting length of block device %s",
77358b59
PR
912 device_name);
913 return -EINVAL;
914 }
9bd9c7f5 915
ad2964b4 916 blk_invalidate_cache(blk, &local_err);
9bd9c7f5
KW
917 if (local_err) {
918 error_report_err(local_err);
919 return -EINVAL;
920 }
3928d50b
LC
921
922 ret = bdrv_get_info(blk_bs(blk), &bdi);
923 if (ret == 0 && bdi.cluster_size > 0 &&
924 bdi.cluster_size <= BLOCK_SIZE &&
925 BLOCK_SIZE % bdi.cluster_size == 0) {
926 cluster_size = bdi.cluster_size;
927 } else {
928 cluster_size = BLOCK_SIZE;
929 }
77358b59
PR
930 }
931
932 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
933 nr_sectors = total_sectors - addr;
934 } else {
935 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
936 }
937
323004a3 938 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
939 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
940 nr_sectors * BDRV_SECTOR_SIZE,
941 BDRV_REQ_MAY_UNMAP);
323004a3 942 } else {
3928d50b
LC
943 int i;
944 int64_t cur_addr;
945 uint8_t *cur_buf;
946
323004a3
PL
947 buf = g_malloc(BLOCK_SIZE);
948 qemu_get_buffer(f, buf, BLOCK_SIZE);
3928d50b
LC
949 for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
950 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
951 cur_buf = buf + i * cluster_size;
952
953 if ((!block_mig_state.zero_blocks ||
954 cluster_size < BLOCK_SIZE) &&
955 buffer_is_zero(cur_buf, cluster_size)) {
956 ret = blk_pwrite_zeroes(blk, cur_addr,
957 cluster_size,
958 BDRV_REQ_MAY_UNMAP);
959 } else {
960 ret = blk_pwrite(blk, cur_addr, cur_buf,
961 cluster_size, 0);
962 }
963 if (ret < 0) {
964 break;
965 }
966 }
323004a3
PL
967 g_free(buf);
968 }
575a58d7 969
b02bea3a
YT
970 if (ret < 0) {
971 return ret;
972 }
01e61e2d
JK
973 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
974 if (!banner_printed) {
975 printf("Receiving block device images\n");
976 banner_printed = 1;
977 }
978 printf("Completed %d %%%c", (int)addr,
979 (addr == 100) ? '\n' : '\r');
980 fflush(stdout);
a55eb92c 981 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 982 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
983 return -EINVAL;
984 }
42802d47
JQ
985 ret = qemu_file_get_error(f);
986 if (ret != 0) {
987 return ret;
c163b5ca 988 }
a55eb92c
JK
989 } while (!(flags & BLK_MIG_FLAG_EOS));
990
c163b5ca
LS
991 return 0;
992}
993
6bd68781
JQ
994static bool block_is_active(void *opaque)
995{
ce7c817c 996 return migrate_use_block();
6bd68781
JQ
997}
998
7a46d042 999static SaveVMHandlers savevm_block_handlers = {
d1315aac 1000 .save_live_setup = block_save_setup,
16310a3c 1001 .save_live_iterate = block_save_iterate,
a3e06c3d 1002 .save_live_complete_precopy = block_save_complete,
e4ed1541 1003 .save_live_pending = block_save_pending,
7908c78d 1004 .load_state = block_load,
6ad2a215 1005 .cleanup = block_migration_cleanup,
6bd68781 1006 .is_active = block_is_active,
7908c78d
JQ
1007};
1008
c163b5ca 1009void blk_mig_init(void)
a55eb92c 1010{
5e5328be
JK
1011 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1012 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 1013 qemu_mutex_init(&block_mig_state.lock);
5e5328be 1014
7908c78d
JQ
1015 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1016 &block_mig_state);
c163b5ca 1017}
This page took 0.582687 seconds and 4 git commands to generate.