]> Git Repo - qemu.git/blame - migration/block.c
migration: Remove qemu-file.h from vmstate.h
[qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
c163b5ca 18#include "qemu-common.h"
bfb197e0
MA
19#include "block/block.h"
20#include "qemu/error-report.h"
21#include "qemu/main-loop.h"
c163b5ca 22#include "hw/hw.h"
f348b6d1 23#include "qemu/cutils.h"
1de7afc9
PB
24#include "qemu/queue.h"
25#include "qemu/timer.h"
caf71f86
PB
26#include "migration/block.h"
27#include "migration/migration.h"
9c17d615 28#include "sysemu/blockdev.h"
82b9d0f0 29#include "migration/qemu-file.h"
c9ebaf74 30#include "sysemu/block-backend.h"
c163b5ca 31
50717e94
PB
32#define BLOCK_SIZE (1 << 20)
33#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
34
35#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
36#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 37#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 38#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca
LS
39
40#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca 41
f77dcdbc
WC
42#define MAX_INFLIGHT_IO 512
43
c163b5ca
LS
44//#define DEBUG_BLK_MIGRATION
45
46#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 47#define DPRINTF(fmt, ...) \
c163b5ca
LS
48 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
49#else
d0f2c4c6 50#define DPRINTF(fmt, ...) \
c163b5ca
LS
51 do { } while (0)
52#endif
53
a55eb92c 54typedef struct BlkMigDevState {
323920c4 55 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
56 BlockBackend *blk;
57 char *blk_name;
a55eb92c 58 int shared_base;
a55eb92c 59 int64_t total_sectors;
5e5328be 60 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 61 Error *blocker;
323920c4
PB
62
63 /* Only used by migration thread. Does not need a lock. */
64 int bulk_completed;
65 int64_t cur_sector;
66 int64_t cur_dirty;
67
ef0716df
PB
68 /* Data in the aio_bitmap is protected by block migration lock.
69 * Allocation and free happen during setup and cleanup respectively.
70 */
33656af7 71 unsigned long *aio_bitmap;
ef0716df
PB
72
73 /* Protected by block migration lock. */
323920c4 74 int64_t completed_sectors;
ef0716df
PB
75
76 /* During migration this is protected by iothread lock / AioContext.
77 * Allocation and free happen during setup and cleanup respectively.
78 */
e4654d2d 79 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
80} BlkMigDevState;
81
c163b5ca 82typedef struct BlkMigBlock {
323920c4 83 /* Only used by migration thread. */
c163b5ca
LS
84 uint8_t *buf;
85 BlkMigDevState *bmds;
86 int64_t sector;
33656af7 87 int nr_sectors;
c163b5ca
LS
88 struct iovec iov;
89 QEMUIOVector qiov;
7c84b1b8 90 BlockAIOCB *aiocb;
323920c4 91
52e850de 92 /* Protected by block migration lock. */
c163b5ca 93 int ret;
5e5328be 94 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
95} BlkMigBlock;
96
97typedef struct BlkMigState {
5e5328be 98 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 99 int64_t total_sector_sum;
323004a3 100 bool zero_blocks;
323920c4 101
52e850de 102 /* Protected by lock. */
5e5328be 103 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
104 int submitted;
105 int read_done;
323920c4
PB
106
107 /* Only used by migration thread. Does not need a lock. */
c163b5ca 108 int transferred;
01e61e2d 109 int prev_progress;
e970ec0b 110 int bulk_completed;
52e850de 111
ef0716df 112 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
52e850de 113 QemuMutex lock;
c163b5ca
LS
114} BlkMigState;
115
d11ecd3d 116static BlkMigState block_mig_state;
c163b5ca 117
52e850de
PB
118static void blk_mig_lock(void)
119{
120 qemu_mutex_lock(&block_mig_state.lock);
121}
122
123static void blk_mig_unlock(void)
124{
125 qemu_mutex_unlock(&block_mig_state.lock);
126}
127
32c835ba
PB
128/* Must run outside of the iothread lock during the bulk phase,
129 * or the VM will stall.
130 */
131
13f0b67f
JK
132static void blk_send(QEMUFile *f, BlkMigBlock * blk)
133{
134 int len;
323004a3
PL
135 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
136
137 if (block_mig_state.zero_blocks &&
138 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
139 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
140 }
13f0b67f
JK
141
142 /* sector number and flags */
143 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 144 | flags);
13f0b67f
JK
145
146 /* device name */
ebd2f9e7 147 len = strlen(blk->bmds->blk_name);
13f0b67f 148 qemu_put_byte(f, len);
ebd2f9e7 149 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 150
323004a3
PL
151 /* if a block is zero we need to flush here since the network
152 * bandwidth is now a lot higher than the storage device bandwidth.
153 * thus if we queue zero blocks we slow down the migration */
154 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
155 qemu_fflush(f);
156 return;
157 }
158
13f0b67f
JK
159 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
160}
161
25f23643
JK
162int blk_mig_active(void)
163{
164 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
165}
166
167uint64_t blk_mig_bytes_transferred(void)
168{
169 BlkMigDevState *bmds;
170 uint64_t sum = 0;
171
52e850de 172 blk_mig_lock();
25f23643
JK
173 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
174 sum += bmds->completed_sectors;
175 }
52e850de 176 blk_mig_unlock();
25f23643
JK
177 return sum << BDRV_SECTOR_BITS;
178}
179
180uint64_t blk_mig_bytes_remaining(void)
181{
182 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
183}
184
185uint64_t blk_mig_bytes_total(void)
186{
187 BlkMigDevState *bmds;
188 uint64_t sum = 0;
189
190 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
191 sum += bmds->total_sectors;
192 }
193 return sum << BDRV_SECTOR_BITS;
194}
195
52e850de
PB
196
197/* Called with migration lock held. */
198
33656af7
MT
199static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
200{
201 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
202
ebd2f9e7 203 if (sector < blk_nb_sectors(bmds->blk)) {
33656af7
MT
204 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
205 (1UL << (chunk % (sizeof(unsigned long) * 8))));
206 } else {
207 return 0;
208 }
209}
210
52e850de
PB
211/* Called with migration lock held. */
212
33656af7
MT
213static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
214 int nb_sectors, int set)
215{
216 int64_t start, end;
217 unsigned long val, idx, bit;
218
219 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
220 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
221
222 for (; start <= end; start++) {
223 idx = start / (sizeof(unsigned long) * 8);
224 bit = start % (sizeof(unsigned long) * 8);
225 val = bmds->aio_bitmap[idx];
226 if (set) {
62155e2b 227 val |= 1UL << bit;
33656af7 228 } else {
62155e2b 229 val &= ~(1UL << bit);
33656af7
MT
230 }
231 bmds->aio_bitmap[idx] = val;
232 }
233}
234
235static void alloc_aio_bitmap(BlkMigDevState *bmds)
236{
ebd2f9e7 237 BlockBackend *bb = bmds->blk;
33656af7
MT
238 int64_t bitmap_size;
239
ebd2f9e7 240 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
241 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
242
7267c094 243 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
244}
245
52e850de
PB
246/* Never hold migration lock when yielding to the main loop! */
247
c163b5ca
LS
248static void blk_mig_read_cb(void *opaque, int ret)
249{
250 BlkMigBlock *blk = opaque;
a55eb92c 251
52e850de 252 blk_mig_lock();
c163b5ca 253 blk->ret = ret;
a55eb92c 254
5e5328be 255 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 256 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 257
d11ecd3d
JK
258 block_mig_state.submitted--;
259 block_mig_state.read_done++;
260 assert(block_mig_state.submitted >= 0);
52e850de 261 blk_mig_unlock();
c163b5ca
LS
262}
263
32c835ba
PB
264/* Called with no lock taken. */
265
539de124 266static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 267{
57cce12d
JK
268 int64_t total_sectors = bmds->total_sectors;
269 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 270 BlockBackend *bb = bmds->blk;
c163b5ca 271 BlkMigBlock *blk;
13f0b67f 272 int nr_sectors;
a55eb92c 273
57cce12d 274 if (bmds->shared_base) {
32c835ba 275 qemu_mutex_lock_iothread();
ebd2f9e7 276 aio_context_acquire(blk_get_aio_context(bb));
7d66b1fb
EB
277 /* Skip unallocated sectors; intentionally treats failure as
278 * an allocated sector */
b1d10856 279 while (cur_sector < total_sectors &&
ebd2f9e7
KW
280 !bdrv_is_allocated(blk_bs(bb), cur_sector,
281 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca
LS
282 cur_sector += nr_sectors;
283 }
ebd2f9e7 284 aio_context_release(blk_get_aio_context(bb));
32c835ba 285 qemu_mutex_unlock_iothread();
c163b5ca 286 }
a55eb92c
JK
287
288 if (cur_sector >= total_sectors) {
82801d8f 289 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
290 return 1;
291 }
a55eb92c 292
82801d8f 293 bmds->completed_sectors = cur_sector;
a55eb92c 294
57cce12d
JK
295 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
296
6ea44308
JK
297 /* we are going to transfer a full block even if it is not allocated */
298 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 299
6ea44308 300 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 301 nr_sectors = total_sectors - cur_sector;
c163b5ca 302 }
a55eb92c 303
5839e53b 304 blk = g_new(BlkMigBlock, 1);
7267c094 305 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
306 blk->bmds = bmds;
307 blk->sector = cur_sector;
33656af7 308 blk->nr_sectors = nr_sectors;
a55eb92c 309
e970ec0b
LS
310 blk->iov.iov_base = blk->buf;
311 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
312 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 313
52e850de 314 blk_mig_lock();
13197e3c 315 block_mig_state.submitted++;
52e850de 316 blk_mig_unlock();
13197e3c 317
ef0716df
PB
318 /* We do not know if bs is under the main thread (and thus does
319 * not acquire the AioContext when doing AIO) or rather under
320 * dataplane. Thus acquire both the iothread mutex and the
321 * AioContext.
322 *
323 * This is ugly and will disappear when we make bdrv_* thread-safe,
324 * without the need to acquire the AioContext.
325 */
32c835ba 326 qemu_mutex_lock_iothread();
ebd2f9e7
KW
327 aio_context_acquire(blk_get_aio_context(bmds->blk));
328 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
329 0, blk_mig_read_cb, blk);
d76cac7d 330
20dca810 331 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
ebd2f9e7 332 aio_context_release(blk_get_aio_context(bmds->blk));
32c835ba 333 qemu_mutex_unlock_iothread();
a55eb92c 334
32c835ba 335 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 336 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
337}
338
32c835ba
PB
339/* Called with iothread lock taken. */
340
b8afb520 341static int set_dirty_tracking(void)
c163b5ca
LS
342{
343 BlkMigDevState *bmds;
b8afb520
FZ
344 int ret;
345
346 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
347 aio_context_acquire(blk_get_aio_context(bmds->blk));
348 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
349 BLOCK_SIZE, NULL, NULL);
350 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520
FZ
351 if (!bmds->dirty_bitmap) {
352 ret = -errno;
353 goto fail;
354 }
355 }
356 return 0;
5e5328be 357
b8afb520 358fail:
5e5328be 359 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 360 if (bmds->dirty_bitmap) {
ebd2f9e7
KW
361 aio_context_acquire(blk_get_aio_context(bmds->blk));
362 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
363 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520 364 }
e4654d2d 365 }
b8afb520 366 return ret;
e4654d2d
FZ
367}
368
ef0716df
PB
369/* Called with iothread lock taken. */
370
e4654d2d
FZ
371static void unset_dirty_tracking(void)
372{
373 BlkMigDevState *bmds;
374
375 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
376 aio_context_acquire(blk_get_aio_context(bmds->blk));
377 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
378 aio_context_release(blk_get_aio_context(bmds->blk));
c163b5ca 379 }
c163b5ca
LS
380}
381
6f5ef23a 382static int init_blk_migration(QEMUFile *f)
c163b5ca 383{
fea68bb6 384 BlockDriverState *bs;
5e5328be 385 BlkMigDevState *bmds;
792773b2 386 int64_t sectors;
88be7b4b 387 BdrvNextIterator it;
ebd2f9e7
KW
388 int i, num_bs = 0;
389 struct {
390 BlkMigDevState *bmds;
391 BlockDriverState *bs;
392 } *bmds_bs;
6f5ef23a
KW
393 Error *local_err = NULL;
394 int ret;
a55eb92c 395
fea68bb6
MA
396 block_mig_state.submitted = 0;
397 block_mig_state.read_done = 0;
398 block_mig_state.transferred = 0;
399 block_mig_state.total_sector_sum = 0;
400 block_mig_state.prev_progress = -1;
401 block_mig_state.bulk_completed = 0;
402 block_mig_state.zero_blocks = migrate_zero_blocks();
403
88be7b4b 404 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
405 num_bs++;
406 }
407 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
408
409 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
410 if (bdrv_is_read_only(bs)) {
411 continue;
412 }
413
57322b78 414 sectors = bdrv_nb_sectors(bs);
31f54f24 415 if (sectors <= 0) {
6f5ef23a 416 ret = sectors;
ebd2f9e7 417 goto out;
b66460e4
SH
418 }
419
5839e53b 420 bmds = g_new0(BlkMigDevState, 1);
6f5ef23a 421 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 422 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
423 bmds->bulk_completed = 0;
424 bmds->total_sectors = sectors;
425 bmds->completed_sectors = 0;
ce7c817c 426 bmds->shared_base = migrate_use_block_incremental();
ebd2f9e7
KW
427
428 assert(i < num_bs);
429 bmds_bs[i].bmds = bmds;
430 bmds_bs[i].bs = bs;
b66460e4
SH
431
432 block_mig_state.total_sector_sum += sectors;
433
434 if (bmds->shared_base) {
539de124 435 DPRINTF("Start migration for %s with shared base image\n",
bfb197e0 436 bdrv_get_device_name(bs));
b66460e4 437 } else {
bfb197e0 438 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
b66460e4
SH
439 }
440
441 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
442 }
ebd2f9e7
KW
443
444 /* Can only insert new BDSes now because doing so while iterating block
445 * devices may end up in a deadlock (iterating the new BDSes, too). */
446 for (i = 0; i < num_bs; i++) {
447 BlkMigDevState *bmds = bmds_bs[i].bmds;
448 BlockDriverState *bs = bmds_bs[i].bs;
449
450 if (bmds) {
6f5ef23a
KW
451 ret = blk_insert_bs(bmds->blk, bs, &local_err);
452 if (ret < 0) {
453 error_report_err(local_err);
454 goto out;
455 }
ebd2f9e7
KW
456
457 alloc_aio_bitmap(bmds);
458 error_setg(&bmds->blocker, "block device is in use by migration");
459 bdrv_op_block_all(bs, bmds->blocker);
460 }
461 }
462
6f5ef23a 463 ret = 0;
ebd2f9e7
KW
464out:
465 g_free(bmds_bs);
6f5ef23a 466 return ret;
b66460e4
SH
467}
468
32c835ba
PB
469/* Called with no lock taken. */
470
539de124 471static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 472{
82801d8f 473 int64_t completed_sector_sum = 0;
c163b5ca 474 BlkMigDevState *bmds;
01e61e2d 475 int progress;
82801d8f 476 int ret = 0;
c163b5ca 477
5e5328be 478 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 479 if (bmds->bulk_completed == 0) {
539de124 480 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
481 /* completed bulk section for this device */
482 bmds->bulk_completed = 1;
c163b5ca 483 }
82801d8f
JK
484 completed_sector_sum += bmds->completed_sectors;
485 ret = 1;
486 break;
487 } else {
488 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
489 }
490 }
a55eb92c 491
8b6b2afc
PR
492 if (block_mig_state.total_sector_sum != 0) {
493 progress = completed_sector_sum * 100 /
494 block_mig_state.total_sector_sum;
495 } else {
496 progress = 100;
497 }
01e61e2d
JK
498 if (progress != block_mig_state.prev_progress) {
499 block_mig_state.prev_progress = progress;
500 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
501 | BLK_MIG_FLAG_PROGRESS);
539de124 502 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
503 }
504
505 return ret;
c163b5ca
LS
506}
507
d76cac7d 508static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
509{
510 BlkMigDevState *bmds;
d76cac7d
LS
511
512 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
513 bmds->cur_dirty = 0;
514 }
515}
516
ef0716df 517/* Called with iothread lock and AioContext taken. */
32c835ba 518
539de124
LC
519static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
520 int is_async)
d76cac7d
LS
521{
522 BlkMigBlock *blk;
ebd2f9e7 523 BlockDriverState *bs = blk_bs(bmds->blk);
d76cac7d 524 int64_t total_sectors = bmds->total_sectors;
c163b5ca 525 int64_t sector;
d76cac7d 526 int nr_sectors;
dcd1d224 527 int ret = -EIO;
a55eb92c 528
d76cac7d 529 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 530 blk_mig_lock();
62155e2b 531 if (bmds_aio_inflight(bmds, sector)) {
52e850de 532 blk_mig_unlock();
ebd2f9e7 533 blk_drain(bmds->blk);
52e850de
PB
534 } else {
535 blk_mig_unlock();
62155e2b 536 }
ebd2f9e7 537 if (bdrv_get_dirty(bs, bmds->dirty_bitmap, sector)) {
575a58d7 538
d76cac7d
LS
539 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
540 nr_sectors = total_sectors - sector;
541 } else {
542 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
543 }
5839e53b 544 blk = g_new(BlkMigBlock, 1);
7267c094 545 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
546 blk->bmds = bmds;
547 blk->sector = sector;
33656af7 548 blk->nr_sectors = nr_sectors;
d76cac7d 549
889ae39c 550 if (is_async) {
d76cac7d
LS
551 blk->iov.iov_base = blk->buf;
552 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
553 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
554
ebd2f9e7
KW
555 blk->aiocb = blk_aio_preadv(bmds->blk,
556 sector * BDRV_SECTOR_SIZE,
557 &blk->qiov, 0, blk_mig_read_cb,
558 blk);
52e850de
PB
559
560 blk_mig_lock();
d76cac7d 561 block_mig_state.submitted++;
33656af7 562 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 563 blk_mig_unlock();
d76cac7d 564 } else {
ebd2f9e7
KW
565 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
566 nr_sectors * BDRV_SECTOR_SIZE);
dcd1d224 567 if (ret < 0) {
d76cac7d 568 goto error;
c163b5ca 569 }
d76cac7d 570 blk_send(f, blk);
a55eb92c 571
7267c094
AL
572 g_free(blk->buf);
573 g_free(blk);
a55eb92c 574 }
d76cac7d 575
20dca810 576 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
1cf6aa74
LC
577 sector += nr_sectors;
578 bmds->cur_dirty = sector;
579
d76cac7d 580 break;
c163b5ca 581 }
d76cac7d
LS
582 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
583 bmds->cur_dirty = sector;
c163b5ca 584 }
575a58d7 585
d76cac7d
LS
586 return (bmds->cur_dirty >= bmds->total_sectors);
587
889ae39c 588error:
539de124 589 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
590 g_free(blk->buf);
591 g_free(blk);
43be3a25 592 return ret;
d76cac7d
LS
593}
594
32c835ba
PB
595/* Called with iothread lock taken.
596 *
597 * return value:
ceb2bd09
JQ
598 * 0: too much data for max_downtime
599 * 1: few enough data for max_downtime
600*/
539de124 601static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
602{
603 BlkMigDevState *bmds;
ceb2bd09 604 int ret = 1;
d76cac7d
LS
605
606 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 607 aio_context_acquire(blk_get_aio_context(bmds->blk));
ceb2bd09 608 ret = mig_save_device_dirty(f, bmds, is_async);
ebd2f9e7 609 aio_context_release(blk_get_aio_context(bmds->blk));
43be3a25 610 if (ret <= 0) {
d76cac7d
LS
611 break;
612 }
613 }
614
615 return ret;
c163b5ca
LS
616}
617
32c835ba
PB
618/* Called with no locks taken. */
619
59feec42 620static int flush_blks(QEMUFile *f)
c163b5ca 621{
5e5328be 622 BlkMigBlock *blk;
59feec42 623 int ret = 0;
a55eb92c 624
d0f2c4c6 625 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
626 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
627 block_mig_state.transferred);
a55eb92c 628
52e850de 629 blk_mig_lock();
5e5328be
JK
630 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
631 if (qemu_file_rate_limit(f)) {
632 break;
633 }
4b640365 634 if (blk->ret < 0) {
59feec42 635 ret = blk->ret;
4b640365
JK
636 break;
637 }
a55eb92c 638
5e5328be 639 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 640 blk_mig_unlock();
13197e3c 641 blk_send(f, blk);
52e850de 642 blk_mig_lock();
13197e3c 643
7267c094
AL
644 g_free(blk->buf);
645 g_free(blk);
a55eb92c 646
d11ecd3d
JK
647 block_mig_state.read_done--;
648 block_mig_state.transferred++;
649 assert(block_mig_state.read_done >= 0);
c163b5ca 650 }
52e850de 651 blk_mig_unlock();
c163b5ca 652
d0f2c4c6 653 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
654 block_mig_state.submitted, block_mig_state.read_done,
655 block_mig_state.transferred);
59feec42 656 return ret;
c163b5ca
LS
657}
658
32c835ba
PB
659/* Called with iothread lock taken. */
660
889ae39c
LS
661static int64_t get_remaining_dirty(void)
662{
663 BlkMigDevState *bmds;
664 int64_t dirty = 0;
665
666 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 667 aio_context_acquire(blk_get_aio_context(bmds->blk));
20dca810 668 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
ebd2f9e7 669 aio_context_release(blk_get_aio_context(bmds->blk));
889ae39c
LS
670 }
671
acc906c6 672 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
673}
674
32c835ba
PB
675/* Called with iothread lock taken. */
676
6ad2a215 677static void block_migration_cleanup(void *opaque)
4ec7fcc7 678{
82801d8f
JK
679 BlkMigDevState *bmds;
680 BlkMigBlock *blk;
ef0716df 681 AioContext *ctx;
4ec7fcc7 682
946d58be
KW
683 bdrv_drain_all();
684
e4654d2d 685 unset_dirty_tracking();
8f794c55 686
82801d8f
JK
687 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
688 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
ebd2f9e7 689 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
3718d8ab 690 error_free(bmds->blocker);
ef0716df 691
ebd2f9e7
KW
692 /* Save ctx, because bmds->blk can disappear during blk_unref. */
693 ctx = blk_get_aio_context(bmds->blk);
ef0716df 694 aio_context_acquire(ctx);
ebd2f9e7 695 blk_unref(bmds->blk);
ef0716df
PB
696 aio_context_release(ctx);
697
ebd2f9e7 698 g_free(bmds->blk_name);
7267c094
AL
699 g_free(bmds->aio_bitmap);
700 g_free(bmds);
4ec7fcc7
JK
701 }
702
ef0716df 703 blk_mig_lock();
82801d8f
JK
704 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
705 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
706 g_free(blk->buf);
707 g_free(blk);
4ec7fcc7 708 }
52e850de 709 blk_mig_unlock();
4ec7fcc7
JK
710}
711
d1315aac 712static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 713{
2975725f
JQ
714 int ret;
715
d1315aac
JQ
716 DPRINTF("Enter save live setup submitted %d transferred %d\n",
717 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 718
9b095037 719 qemu_mutex_lock_iothread();
6f5ef23a
KW
720 ret = init_blk_migration(f);
721 if (ret < 0) {
722 qemu_mutex_unlock_iothread();
723 return ret;
724 }
d1315aac
JQ
725
726 /* start track dirty blocks */
b8afb520
FZ
727 ret = set_dirty_tracking();
728
ef0716df
PB
729 qemu_mutex_unlock_iothread();
730
b8afb520 731 if (ret) {
b8afb520
FZ
732 return ret;
733 }
734
59feec42 735 ret = flush_blks(f);
d1315aac 736 blk_mig_reset_dirty_cursor();
d1315aac
JQ
737 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
738
d418cf57 739 return ret;
d1315aac
JQ
740}
741
16310a3c 742static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
743{
744 int ret;
6aaa9dae 745 int64_t last_ftell = qemu_ftell(f);
ebd9fbd7 746 int64_t delta_ftell;
d1315aac 747
16310a3c
JQ
748 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
749 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 750
59feec42 751 ret = flush_blks(f);
2975725f 752 if (ret) {
2975725f 753 return ret;
4b640365
JK
754 }
755
d76cac7d
LS
756 blk_mig_reset_dirty_cursor();
757
16310a3c 758 /* control the rate of transfer */
52e850de 759 blk_mig_lock();
16310a3c
JQ
760 while ((block_mig_state.submitted +
761 block_mig_state.read_done) * BLOCK_SIZE <
f77dcdbc
WC
762 qemu_file_get_rate_limit(f) &&
763 (block_mig_state.submitted +
764 block_mig_state.read_done) <
765 MAX_INFLIGHT_IO) {
52e850de 766 blk_mig_unlock();
16310a3c
JQ
767 if (block_mig_state.bulk_completed == 0) {
768 /* first finish the bulk phase */
769 if (blk_mig_save_bulked_block(f) == 0) {
770 /* finished saving bulk on all devices */
771 block_mig_state.bulk_completed = 1;
772 }
13197e3c 773 ret = 0;
16310a3c 774 } else {
32c835ba
PB
775 /* Always called with iothread lock taken for
776 * simplicity, block_save_complete also calls it.
777 */
778 qemu_mutex_lock_iothread();
43be3a25 779 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 780 qemu_mutex_unlock_iothread();
13197e3c
PB
781 }
782 if (ret < 0) {
783 return ret;
784 }
52e850de 785 blk_mig_lock();
13197e3c
PB
786 if (ret != 0) {
787 /* no more dirty blocks */
788 break;
a55eb92c 789 }
16310a3c 790 }
52e850de 791 blk_mig_unlock();
a55eb92c 792
59feec42 793 ret = flush_blks(f);
16310a3c 794 if (ret) {
16310a3c 795 return ret;
4b640365
JK
796 }
797
16310a3c 798 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
ebd9fbd7
GH
799 delta_ftell = qemu_ftell(f) - last_ftell;
800 if (delta_ftell > 0) {
801 return 1;
802 } else if (delta_ftell < 0) {
803 return -1;
804 } else {
805 return 0;
806 }
16310a3c
JQ
807}
808
32c835ba
PB
809/* Called with iothread lock taken. */
810
16310a3c
JQ
811static int block_save_complete(QEMUFile *f, void *opaque)
812{
813 int ret;
814
815 DPRINTF("Enter save live complete submitted %d transferred %d\n",
816 block_mig_state.submitted, block_mig_state.transferred);
817
59feec42 818 ret = flush_blks(f);
16310a3c 819 if (ret) {
16310a3c
JQ
820 return ret;
821 }
a55eb92c 822
16310a3c 823 blk_mig_reset_dirty_cursor();
01e61e2d 824
16310a3c
JQ
825 /* we know for sure that save bulk is completed and
826 all async read completed */
52e850de 827 blk_mig_lock();
16310a3c 828 assert(block_mig_state.submitted == 0);
52e850de 829 blk_mig_unlock();
16310a3c 830
43be3a25
JQ
831 do {
832 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
833 if (ret < 0) {
834 return ret;
835 }
43be3a25 836 } while (ret == 0);
4b640365 837
43be3a25
JQ
838 /* report completion */
839 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 840
16310a3c
JQ
841 DPRINTF("Block migration completed\n");
842
a55eb92c
JK
843 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
844
16310a3c 845 return 0;
c163b5ca
LS
846}
847
c31b098f
DDAG
848static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
849 uint64_t *non_postcopiable_pending,
850 uint64_t *postcopiable_pending)
e4ed1541 851{
6aaa9dae 852 /* Estimate pending number of bytes to send */
13197e3c
PB
853 uint64_t pending;
854
32c835ba 855 qemu_mutex_lock_iothread();
ef0716df
PB
856 pending = get_remaining_dirty();
857 qemu_mutex_unlock_iothread();
858
52e850de 859 blk_mig_lock();
ef0716df
PB
860 pending += block_mig_state.submitted * BLOCK_SIZE +
861 block_mig_state.read_done * BLOCK_SIZE;
862 blk_mig_unlock();
6aaa9dae
SH
863
864 /* Report at least one block pending during bulk phase */
04636dc4
VSO
865 if (pending <= max_size && !block_mig_state.bulk_completed) {
866 pending = max_size + BLOCK_SIZE;
6aaa9dae 867 }
e4ed1541 868
6aaa9dae 869 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
c31b098f
DDAG
870 /* We don't do postcopy */
871 *non_postcopiable_pending += pending;
e4ed1541
JQ
872}
873
c163b5ca
LS
874static int block_load(QEMUFile *f, void *opaque, int version_id)
875{
01e61e2d 876 static int banner_printed;
c163b5ca
LS
877 int len, flags;
878 char device_name[256];
879 int64_t addr;
ad2964b4 880 BlockBackend *blk, *blk_prev = NULL;;
9bd9c7f5 881 Error *local_err = NULL;
c163b5ca 882 uint8_t *buf;
77358b59
PR
883 int64_t total_sectors = 0;
884 int nr_sectors;
42802d47 885 int ret;
3928d50b
LC
886 BlockDriverInfo bdi;
887 int cluster_size = BLOCK_SIZE;
a55eb92c 888
c163b5ca 889 do {
c163b5ca 890 addr = qemu_get_be64(f);
a55eb92c 891
6ea44308
JK
892 flags = addr & ~BDRV_SECTOR_MASK;
893 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
894
895 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
896 /* get device name */
897 len = qemu_get_byte(f);
c163b5ca
LS
898 qemu_get_buffer(f, (uint8_t *)device_name, len);
899 device_name[len] = '\0';
a55eb92c 900
c9ebaf74
FZ
901 blk = blk_by_name(device_name);
902 if (!blk) {
4b640365
JK
903 fprintf(stderr, "Error unknown block device %s\n",
904 device_name);
905 return -EINVAL;
906 }
a55eb92c 907
ad2964b4
KW
908 if (blk != blk_prev) {
909 blk_prev = blk;
910 total_sectors = blk_nb_sectors(blk);
77358b59 911 if (total_sectors <= 0) {
6daf194d 912 error_report("Error getting length of block device %s",
77358b59
PR
913 device_name);
914 return -EINVAL;
915 }
9bd9c7f5 916
ad2964b4 917 blk_invalidate_cache(blk, &local_err);
9bd9c7f5
KW
918 if (local_err) {
919 error_report_err(local_err);
920 return -EINVAL;
921 }
3928d50b
LC
922
923 ret = bdrv_get_info(blk_bs(blk), &bdi);
924 if (ret == 0 && bdi.cluster_size > 0 &&
925 bdi.cluster_size <= BLOCK_SIZE &&
926 BLOCK_SIZE % bdi.cluster_size == 0) {
927 cluster_size = bdi.cluster_size;
928 } else {
929 cluster_size = BLOCK_SIZE;
930 }
77358b59
PR
931 }
932
933 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
934 nr_sectors = total_sectors - addr;
935 } else {
936 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
937 }
938
323004a3 939 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
940 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
941 nr_sectors * BDRV_SECTOR_SIZE,
942 BDRV_REQ_MAY_UNMAP);
323004a3 943 } else {
3928d50b
LC
944 int i;
945 int64_t cur_addr;
946 uint8_t *cur_buf;
947
323004a3
PL
948 buf = g_malloc(BLOCK_SIZE);
949 qemu_get_buffer(f, buf, BLOCK_SIZE);
3928d50b
LC
950 for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
951 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
952 cur_buf = buf + i * cluster_size;
953
954 if ((!block_mig_state.zero_blocks ||
955 cluster_size < BLOCK_SIZE) &&
956 buffer_is_zero(cur_buf, cluster_size)) {
957 ret = blk_pwrite_zeroes(blk, cur_addr,
958 cluster_size,
959 BDRV_REQ_MAY_UNMAP);
960 } else {
961 ret = blk_pwrite(blk, cur_addr, cur_buf,
962 cluster_size, 0);
963 }
964 if (ret < 0) {
965 break;
966 }
967 }
323004a3
PL
968 g_free(buf);
969 }
575a58d7 970
b02bea3a
YT
971 if (ret < 0) {
972 return ret;
973 }
01e61e2d
JK
974 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
975 if (!banner_printed) {
976 printf("Receiving block device images\n");
977 banner_printed = 1;
978 }
979 printf("Completed %d %%%c", (int)addr,
980 (addr == 100) ? '\n' : '\r');
981 fflush(stdout);
a55eb92c 982 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 983 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
984 return -EINVAL;
985 }
42802d47
JQ
986 ret = qemu_file_get_error(f);
987 if (ret != 0) {
988 return ret;
c163b5ca 989 }
a55eb92c
JK
990 } while (!(flags & BLK_MIG_FLAG_EOS));
991
c163b5ca
LS
992 return 0;
993}
994
6bd68781
JQ
995static bool block_is_active(void *opaque)
996{
ce7c817c 997 return migrate_use_block();
6bd68781
JQ
998}
999
7a46d042 1000static SaveVMHandlers savevm_block_handlers = {
d1315aac 1001 .save_live_setup = block_save_setup,
16310a3c 1002 .save_live_iterate = block_save_iterate,
a3e06c3d 1003 .save_live_complete_precopy = block_save_complete,
e4ed1541 1004 .save_live_pending = block_save_pending,
7908c78d 1005 .load_state = block_load,
6ad2a215 1006 .cleanup = block_migration_cleanup,
6bd68781 1007 .is_active = block_is_active,
7908c78d
JQ
1008};
1009
c163b5ca 1010void blk_mig_init(void)
a55eb92c 1011{
5e5328be
JK
1012 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1013 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 1014 qemu_mutex_init(&block_mig_state.lock);
5e5328be 1015
7908c78d
JQ
1016 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1017 &block_mig_state);
c163b5ca 1018}
This page took 0.585554 seconds and 4 git commands to generate.