]> Git Repo - qemu.git/blame - block-migration.c
input: bind devices and input routing
[qemu.git] / block-migration.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
16#include "qemu-common.h"
737e150e 17#include "block/block_int.h"
c163b5ca 18#include "hw/hw.h"
1de7afc9
PB
19#include "qemu/queue.h"
20#include "qemu/timer.h"
caf71f86
PB
21#include "migration/block.h"
22#include "migration/migration.h"
9c17d615 23#include "sysemu/blockdev.h"
c163b5ca 24#include <assert.h>
c163b5ca 25
50717e94
PB
26#define BLOCK_SIZE (1 << 20)
27#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
28
29#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
30#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 31#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 32#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca
LS
33
34#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca
LS
35
36//#define DEBUG_BLK_MIGRATION
37
38#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 39#define DPRINTF(fmt, ...) \
c163b5ca
LS
40 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
41#else
d0f2c4c6 42#define DPRINTF(fmt, ...) \
c163b5ca
LS
43 do { } while (0)
44#endif
45
a55eb92c 46typedef struct BlkMigDevState {
323920c4 47 /* Written during setup phase. Can be read without a lock. */
a55eb92c 48 BlockDriverState *bs;
a55eb92c 49 int shared_base;
a55eb92c 50 int64_t total_sectors;
5e5328be 51 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
323920c4
PB
52
53 /* Only used by migration thread. Does not need a lock. */
54 int bulk_completed;
55 int64_t cur_sector;
56 int64_t cur_dirty;
57
52e850de 58 /* Protected by block migration lock. */
33656af7 59 unsigned long *aio_bitmap;
323920c4 60 int64_t completed_sectors;
e4654d2d 61 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
62} BlkMigDevState;
63
c163b5ca 64typedef struct BlkMigBlock {
323920c4 65 /* Only used by migration thread. */
c163b5ca
LS
66 uint8_t *buf;
67 BlkMigDevState *bmds;
68 int64_t sector;
33656af7 69 int nr_sectors;
c163b5ca
LS
70 struct iovec iov;
71 QEMUIOVector qiov;
72 BlockDriverAIOCB *aiocb;
323920c4 73
52e850de 74 /* Protected by block migration lock. */
c163b5ca 75 int ret;
5e5328be 76 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
77} BlkMigBlock;
78
79typedef struct BlkMigState {
323920c4 80 /* Written during setup phase. Can be read without a lock. */
c163b5ca
LS
81 int blk_enable;
82 int shared_base;
5e5328be 83 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 84 int64_t total_sector_sum;
323004a3 85 bool zero_blocks;
323920c4 86
52e850de 87 /* Protected by lock. */
5e5328be 88 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
89 int submitted;
90 int read_done;
323920c4
PB
91
92 /* Only used by migration thread. Does not need a lock. */
c163b5ca 93 int transferred;
01e61e2d 94 int prev_progress;
e970ec0b 95 int bulk_completed;
52e850de
PB
96
97 /* Lock must be taken _inside_ the iothread lock. */
98 QemuMutex lock;
c163b5ca
LS
99} BlkMigState;
100
d11ecd3d 101static BlkMigState block_mig_state;
c163b5ca 102
52e850de
PB
103static void blk_mig_lock(void)
104{
105 qemu_mutex_lock(&block_mig_state.lock);
106}
107
108static void blk_mig_unlock(void)
109{
110 qemu_mutex_unlock(&block_mig_state.lock);
111}
112
32c835ba
PB
113/* Must run outside of the iothread lock during the bulk phase,
114 * or the VM will stall.
115 */
116
13f0b67f
JK
117static void blk_send(QEMUFile *f, BlkMigBlock * blk)
118{
119 int len;
323004a3
PL
120 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
121
122 if (block_mig_state.zero_blocks &&
123 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
124 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
125 }
13f0b67f
JK
126
127 /* sector number and flags */
128 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 129 | flags);
13f0b67f
JK
130
131 /* device name */
132 len = strlen(blk->bmds->bs->device_name);
133 qemu_put_byte(f, len);
134 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
135
323004a3
PL
136 /* if a block is zero we need to flush here since the network
137 * bandwidth is now a lot higher than the storage device bandwidth.
138 * thus if we queue zero blocks we slow down the migration */
139 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
140 qemu_fflush(f);
141 return;
142 }
143
13f0b67f
JK
144 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
145}
146
25f23643
JK
147int blk_mig_active(void)
148{
149 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
150}
151
152uint64_t blk_mig_bytes_transferred(void)
153{
154 BlkMigDevState *bmds;
155 uint64_t sum = 0;
156
52e850de 157 blk_mig_lock();
25f23643
JK
158 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
159 sum += bmds->completed_sectors;
160 }
52e850de 161 blk_mig_unlock();
25f23643
JK
162 return sum << BDRV_SECTOR_BITS;
163}
164
165uint64_t blk_mig_bytes_remaining(void)
166{
167 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
168}
169
170uint64_t blk_mig_bytes_total(void)
171{
172 BlkMigDevState *bmds;
173 uint64_t sum = 0;
174
175 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
176 sum += bmds->total_sectors;
177 }
178 return sum << BDRV_SECTOR_BITS;
179}
180
52e850de
PB
181
182/* Called with migration lock held. */
183
33656af7
MT
184static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
185{
186 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
187
62155e2b 188 if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
33656af7
MT
189 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
190 (1UL << (chunk % (sizeof(unsigned long) * 8))));
191 } else {
192 return 0;
193 }
194}
195
52e850de
PB
196/* Called with migration lock held. */
197
33656af7
MT
198static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
199 int nb_sectors, int set)
200{
201 int64_t start, end;
202 unsigned long val, idx, bit;
203
204 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
205 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
206
207 for (; start <= end; start++) {
208 idx = start / (sizeof(unsigned long) * 8);
209 bit = start % (sizeof(unsigned long) * 8);
210 val = bmds->aio_bitmap[idx];
211 if (set) {
62155e2b 212 val |= 1UL << bit;
33656af7 213 } else {
62155e2b 214 val &= ~(1UL << bit);
33656af7
MT
215 }
216 bmds->aio_bitmap[idx] = val;
217 }
218}
219
220static void alloc_aio_bitmap(BlkMigDevState *bmds)
221{
222 BlockDriverState *bs = bmds->bs;
223 int64_t bitmap_size;
224
225 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
226 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
227 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
228
7267c094 229 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
230}
231
52e850de
PB
232/* Never hold migration lock when yielding to the main loop! */
233
c163b5ca
LS
234static void blk_mig_read_cb(void *opaque, int ret)
235{
236 BlkMigBlock *blk = opaque;
a55eb92c 237
52e850de 238 blk_mig_lock();
c163b5ca 239 blk->ret = ret;
a55eb92c 240
5e5328be 241 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 242 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 243
d11ecd3d
JK
244 block_mig_state.submitted--;
245 block_mig_state.read_done++;
246 assert(block_mig_state.submitted >= 0);
52e850de 247 blk_mig_unlock();
c163b5ca
LS
248}
249
32c835ba
PB
250/* Called with no lock taken. */
251
539de124 252static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 253{
57cce12d
JK
254 int64_t total_sectors = bmds->total_sectors;
255 int64_t cur_sector = bmds->cur_sector;
256 BlockDriverState *bs = bmds->bs;
c163b5ca 257 BlkMigBlock *blk;
13f0b67f 258 int nr_sectors;
a55eb92c 259
57cce12d 260 if (bmds->shared_base) {
32c835ba 261 qemu_mutex_lock_iothread();
b1d10856 262 while (cur_sector < total_sectors &&
57cce12d
JK
263 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
264 &nr_sectors)) {
c163b5ca
LS
265 cur_sector += nr_sectors;
266 }
32c835ba 267 qemu_mutex_unlock_iothread();
c163b5ca 268 }
a55eb92c
JK
269
270 if (cur_sector >= total_sectors) {
82801d8f 271 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
272 return 1;
273 }
a55eb92c 274
82801d8f 275 bmds->completed_sectors = cur_sector;
a55eb92c 276
57cce12d
JK
277 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
278
6ea44308
JK
279 /* we are going to transfer a full block even if it is not allocated */
280 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 281
6ea44308 282 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 283 nr_sectors = total_sectors - cur_sector;
c163b5ca 284 }
a55eb92c 285
7267c094
AL
286 blk = g_malloc(sizeof(BlkMigBlock));
287 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
288 blk->bmds = bmds;
289 blk->sector = cur_sector;
33656af7 290 blk->nr_sectors = nr_sectors;
a55eb92c 291
e970ec0b
LS
292 blk->iov.iov_base = blk->buf;
293 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
294 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 295
52e850de 296 blk_mig_lock();
13197e3c 297 block_mig_state.submitted++;
52e850de 298 blk_mig_unlock();
13197e3c 299
32c835ba 300 qemu_mutex_lock_iothread();
e970ec0b
LS
301 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
302 nr_sectors, blk_mig_read_cb, blk);
d76cac7d 303
13f0b67f 304 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
32c835ba 305 qemu_mutex_unlock_iothread();
a55eb92c 306
32c835ba 307 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 308 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
309}
310
32c835ba
PB
311/* Called with iothread lock taken. */
312
b8afb520 313static int set_dirty_tracking(void)
c163b5ca
LS
314{
315 BlkMigDevState *bmds;
b8afb520
FZ
316 int ret;
317
318 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
319 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
320 NULL);
321 if (!bmds->dirty_bitmap) {
322 ret = -errno;
323 goto fail;
324 }
325 }
326 return 0;
5e5328be 327
b8afb520 328fail:
5e5328be 329 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520
FZ
330 if (bmds->dirty_bitmap) {
331 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
332 }
e4654d2d 333 }
b8afb520 334 return ret;
e4654d2d
FZ
335}
336
337static void unset_dirty_tracking(void)
338{
339 BlkMigDevState *bmds;
340
341 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
342 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
c163b5ca 343 }
c163b5ca
LS
344}
345
b66460e4 346static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
c163b5ca 347{
5e5328be 348 BlkMigDevState *bmds;
792773b2 349 int64_t sectors;
a55eb92c 350
d246673d 351 if (!bdrv_is_read_only(bs)) {
b66460e4 352 sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
31f54f24 353 if (sectors <= 0) {
b66460e4
SH
354 return;
355 }
356
7267c094 357 bmds = g_malloc0(sizeof(BlkMigDevState));
b66460e4
SH
358 bmds->bs = bs;
359 bmds->bulk_completed = 0;
360 bmds->total_sectors = sectors;
361 bmds->completed_sectors = 0;
362 bmds->shared_base = block_mig_state.shared_base;
33656af7 363 alloc_aio_bitmap(bmds);
8591675f 364 bdrv_set_in_use(bs, 1);
8442cfd0 365 bdrv_ref(bs);
b66460e4
SH
366
367 block_mig_state.total_sector_sum += sectors;
368
369 if (bmds->shared_base) {
539de124
LC
370 DPRINTF("Start migration for %s with shared base image\n",
371 bs->device_name);
b66460e4 372 } else {
539de124 373 DPRINTF("Start full migration for %s\n", bs->device_name);
b66460e4
SH
374 }
375
376 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
377 }
378}
379
539de124 380static void init_blk_migration(QEMUFile *f)
b66460e4 381{
69d63a97
JK
382 block_mig_state.submitted = 0;
383 block_mig_state.read_done = 0;
384 block_mig_state.transferred = 0;
82801d8f 385 block_mig_state.total_sector_sum = 0;
01e61e2d 386 block_mig_state.prev_progress = -1;
e970ec0b 387 block_mig_state.bulk_completed = 0;
323004a3 388 block_mig_state.zero_blocks = migrate_zero_blocks();
69d63a97 389
539de124 390 bdrv_iterate(init_blk_migration_it, NULL);
c163b5ca
LS
391}
392
32c835ba
PB
393/* Called with no lock taken. */
394
539de124 395static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 396{
82801d8f 397 int64_t completed_sector_sum = 0;
c163b5ca 398 BlkMigDevState *bmds;
01e61e2d 399 int progress;
82801d8f 400 int ret = 0;
c163b5ca 401
5e5328be 402 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 403 if (bmds->bulk_completed == 0) {
539de124 404 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
405 /* completed bulk section for this device */
406 bmds->bulk_completed = 1;
c163b5ca 407 }
82801d8f
JK
408 completed_sector_sum += bmds->completed_sectors;
409 ret = 1;
410 break;
411 } else {
412 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
413 }
414 }
a55eb92c 415
8b6b2afc
PR
416 if (block_mig_state.total_sector_sum != 0) {
417 progress = completed_sector_sum * 100 /
418 block_mig_state.total_sector_sum;
419 } else {
420 progress = 100;
421 }
01e61e2d
JK
422 if (progress != block_mig_state.prev_progress) {
423 block_mig_state.prev_progress = progress;
424 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
425 | BLK_MIG_FLAG_PROGRESS);
539de124 426 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
427 }
428
429 return ret;
c163b5ca
LS
430}
431
d76cac7d 432static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
433{
434 BlkMigDevState *bmds;
d76cac7d
LS
435
436 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
437 bmds->cur_dirty = 0;
438 }
439}
440
32c835ba
PB
441/* Called with iothread lock taken. */
442
539de124
LC
443static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
444 int is_async)
d76cac7d
LS
445{
446 BlkMigBlock *blk;
447 int64_t total_sectors = bmds->total_sectors;
c163b5ca 448 int64_t sector;
d76cac7d 449 int nr_sectors;
dcd1d224 450 int ret = -EIO;
a55eb92c 451
d76cac7d 452 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 453 blk_mig_lock();
62155e2b 454 if (bmds_aio_inflight(bmds, sector)) {
52e850de 455 blk_mig_unlock();
922453bc 456 bdrv_drain_all();
52e850de
PB
457 } else {
458 blk_mig_unlock();
62155e2b 459 }
e4654d2d 460 if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) {
575a58d7 461
d76cac7d
LS
462 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
463 nr_sectors = total_sectors - sector;
464 } else {
465 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
466 }
7267c094
AL
467 blk = g_malloc(sizeof(BlkMigBlock));
468 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
469 blk->bmds = bmds;
470 blk->sector = sector;
33656af7 471 blk->nr_sectors = nr_sectors;
d76cac7d 472
889ae39c 473 if (is_async) {
d76cac7d
LS
474 blk->iov.iov_base = blk->buf;
475 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
476 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
477
478 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
479 nr_sectors, blk_mig_read_cb, blk);
52e850de
PB
480
481 blk_mig_lock();
d76cac7d 482 block_mig_state.submitted++;
33656af7 483 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 484 blk_mig_unlock();
d76cac7d 485 } else {
dcd1d224
JQ
486 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
487 if (ret < 0) {
d76cac7d 488 goto error;
c163b5ca 489 }
d76cac7d 490 blk_send(f, blk);
a55eb92c 491
7267c094
AL
492 g_free(blk->buf);
493 g_free(blk);
a55eb92c 494 }
d76cac7d
LS
495
496 bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
497 break;
c163b5ca 498 }
d76cac7d
LS
499 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
500 bmds->cur_dirty = sector;
c163b5ca 501 }
575a58d7 502
d76cac7d
LS
503 return (bmds->cur_dirty >= bmds->total_sectors);
504
889ae39c 505error:
539de124 506 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
507 g_free(blk->buf);
508 g_free(blk);
43be3a25 509 return ret;
d76cac7d
LS
510}
511
32c835ba
PB
512/* Called with iothread lock taken.
513 *
514 * return value:
ceb2bd09
JQ
515 * 0: too much data for max_downtime
516 * 1: few enough data for max_downtime
517*/
539de124 518static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
519{
520 BlkMigDevState *bmds;
ceb2bd09 521 int ret = 1;
d76cac7d
LS
522
523 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ceb2bd09 524 ret = mig_save_device_dirty(f, bmds, is_async);
43be3a25 525 if (ret <= 0) {
d76cac7d
LS
526 break;
527 }
528 }
529
530 return ret;
c163b5ca
LS
531}
532
32c835ba
PB
533/* Called with no locks taken. */
534
59feec42 535static int flush_blks(QEMUFile *f)
c163b5ca 536{
5e5328be 537 BlkMigBlock *blk;
59feec42 538 int ret = 0;
a55eb92c 539
d0f2c4c6 540 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
541 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
542 block_mig_state.transferred);
a55eb92c 543
52e850de 544 blk_mig_lock();
5e5328be
JK
545 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
546 if (qemu_file_rate_limit(f)) {
547 break;
548 }
4b640365 549 if (blk->ret < 0) {
59feec42 550 ret = blk->ret;
4b640365
JK
551 break;
552 }
a55eb92c 553
5e5328be 554 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 555 blk_mig_unlock();
13197e3c 556 blk_send(f, blk);
52e850de 557 blk_mig_lock();
13197e3c 558
7267c094
AL
559 g_free(blk->buf);
560 g_free(blk);
a55eb92c 561
d11ecd3d
JK
562 block_mig_state.read_done--;
563 block_mig_state.transferred++;
564 assert(block_mig_state.read_done >= 0);
c163b5ca 565 }
52e850de 566 blk_mig_unlock();
c163b5ca 567
d0f2c4c6 568 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
569 block_mig_state.submitted, block_mig_state.read_done,
570 block_mig_state.transferred);
59feec42 571 return ret;
c163b5ca
LS
572}
573
32c835ba
PB
574/* Called with iothread lock taken. */
575
889ae39c
LS
576static int64_t get_remaining_dirty(void)
577{
578 BlkMigDevState *bmds;
579 int64_t dirty = 0;
580
581 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
e4654d2d 582 dirty += bdrv_get_dirty_count(bmds->bs, bmds->dirty_bitmap);
889ae39c
LS
583 }
584
acc906c6 585 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
586}
587
32c835ba
PB
588/* Called with iothread lock taken. */
589
539de124 590static void blk_mig_cleanup(void)
4ec7fcc7 591{
82801d8f
JK
592 BlkMigDevState *bmds;
593 BlkMigBlock *blk;
4ec7fcc7 594
946d58be
KW
595 bdrv_drain_all();
596
e4654d2d 597 unset_dirty_tracking();
8f794c55 598
52e850de 599 blk_mig_lock();
82801d8f
JK
600 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
601 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
8591675f 602 bdrv_set_in_use(bmds->bs, 0);
8442cfd0 603 bdrv_unref(bmds->bs);
7267c094
AL
604 g_free(bmds->aio_bitmap);
605 g_free(bmds);
4ec7fcc7
JK
606 }
607
82801d8f
JK
608 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
609 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
610 g_free(blk->buf);
611 g_free(blk);
4ec7fcc7 612 }
52e850de 613 blk_mig_unlock();
4ec7fcc7
JK
614}
615
9b5bfab0
JQ
616static void block_migration_cancel(void *opaque)
617{
618 blk_mig_cleanup();
619}
620
d1315aac 621static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 622{
2975725f
JQ
623 int ret;
624
d1315aac
JQ
625 DPRINTF("Enter save live setup submitted %d transferred %d\n",
626 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 627
9b095037 628 qemu_mutex_lock_iothread();
d1315aac
JQ
629
630 /* start track dirty blocks */
b8afb520
FZ
631 ret = set_dirty_tracking();
632
633 if (ret) {
634 qemu_mutex_unlock_iothread();
635 return ret;
636 }
637
638 init_blk_migration(f);
639
9b095037 640 qemu_mutex_unlock_iothread();
d1315aac 641
59feec42 642 ret = flush_blks(f);
d1315aac 643 blk_mig_reset_dirty_cursor();
d1315aac
JQ
644 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
645
d418cf57 646 return ret;
d1315aac
JQ
647}
648
16310a3c 649static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
650{
651 int ret;
6aaa9dae 652 int64_t last_ftell = qemu_ftell(f);
d1315aac 653
16310a3c
JQ
654 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
655 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 656
59feec42 657 ret = flush_blks(f);
2975725f 658 if (ret) {
2975725f 659 return ret;
4b640365
JK
660 }
661
d76cac7d
LS
662 blk_mig_reset_dirty_cursor();
663
16310a3c 664 /* control the rate of transfer */
52e850de 665 blk_mig_lock();
16310a3c
JQ
666 while ((block_mig_state.submitted +
667 block_mig_state.read_done) * BLOCK_SIZE <
668 qemu_file_get_rate_limit(f)) {
52e850de 669 blk_mig_unlock();
16310a3c
JQ
670 if (block_mig_state.bulk_completed == 0) {
671 /* first finish the bulk phase */
672 if (blk_mig_save_bulked_block(f) == 0) {
673 /* finished saving bulk on all devices */
674 block_mig_state.bulk_completed = 1;
675 }
13197e3c 676 ret = 0;
16310a3c 677 } else {
32c835ba
PB
678 /* Always called with iothread lock taken for
679 * simplicity, block_save_complete also calls it.
680 */
681 qemu_mutex_lock_iothread();
43be3a25 682 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 683 qemu_mutex_unlock_iothread();
13197e3c
PB
684 }
685 if (ret < 0) {
686 return ret;
687 }
52e850de 688 blk_mig_lock();
13197e3c
PB
689 if (ret != 0) {
690 /* no more dirty blocks */
691 break;
a55eb92c 692 }
16310a3c 693 }
52e850de 694 blk_mig_unlock();
a55eb92c 695
59feec42 696 ret = flush_blks(f);
16310a3c 697 if (ret) {
16310a3c 698 return ret;
4b640365
JK
699 }
700
16310a3c 701 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
6aaa9dae 702 return qemu_ftell(f) - last_ftell;
16310a3c
JQ
703}
704
32c835ba
PB
705/* Called with iothread lock taken. */
706
16310a3c
JQ
707static int block_save_complete(QEMUFile *f, void *opaque)
708{
709 int ret;
710
711 DPRINTF("Enter save live complete submitted %d transferred %d\n",
712 block_mig_state.submitted, block_mig_state.transferred);
713
59feec42 714 ret = flush_blks(f);
16310a3c 715 if (ret) {
16310a3c
JQ
716 return ret;
717 }
a55eb92c 718
16310a3c 719 blk_mig_reset_dirty_cursor();
01e61e2d 720
16310a3c
JQ
721 /* we know for sure that save bulk is completed and
722 all async read completed */
52e850de 723 blk_mig_lock();
16310a3c 724 assert(block_mig_state.submitted == 0);
52e850de 725 blk_mig_unlock();
16310a3c 726
43be3a25
JQ
727 do {
728 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
729 if (ret < 0) {
730 return ret;
731 }
43be3a25 732 } while (ret == 0);
4b640365 733
43be3a25
JQ
734 /* report completion */
735 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 736
16310a3c
JQ
737 DPRINTF("Block migration completed\n");
738
a55eb92c
JK
739 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
740
d418cf57 741 blk_mig_cleanup();
16310a3c 742 return 0;
c163b5ca
LS
743}
744
e4ed1541
JQ
745static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
746{
6aaa9dae 747 /* Estimate pending number of bytes to send */
13197e3c
PB
748 uint64_t pending;
749
32c835ba 750 qemu_mutex_lock_iothread();
52e850de 751 blk_mig_lock();
13197e3c 752 pending = get_remaining_dirty() +
6aaa9dae
SH
753 block_mig_state.submitted * BLOCK_SIZE +
754 block_mig_state.read_done * BLOCK_SIZE;
755
756 /* Report at least one block pending during bulk phase */
757 if (pending == 0 && !block_mig_state.bulk_completed) {
758 pending = BLOCK_SIZE;
759 }
52e850de 760 blk_mig_unlock();
32c835ba 761 qemu_mutex_unlock_iothread();
e4ed1541 762
6aaa9dae
SH
763 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
764 return pending;
e4ed1541
JQ
765}
766
c163b5ca
LS
767static int block_load(QEMUFile *f, void *opaque, int version_id)
768{
01e61e2d 769 static int banner_printed;
c163b5ca
LS
770 int len, flags;
771 char device_name[256];
772 int64_t addr;
77358b59 773 BlockDriverState *bs, *bs_prev = NULL;
c163b5ca 774 uint8_t *buf;
77358b59
PR
775 int64_t total_sectors = 0;
776 int nr_sectors;
42802d47 777 int ret;
a55eb92c 778
c163b5ca 779 do {
c163b5ca 780 addr = qemu_get_be64(f);
a55eb92c 781
6ea44308
JK
782 flags = addr & ~BDRV_SECTOR_MASK;
783 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
784
785 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
786 /* get device name */
787 len = qemu_get_byte(f);
c163b5ca
LS
788 qemu_get_buffer(f, (uint8_t *)device_name, len);
789 device_name[len] = '\0';
a55eb92c 790
c163b5ca 791 bs = bdrv_find(device_name);
4b640365
JK
792 if (!bs) {
793 fprintf(stderr, "Error unknown block device %s\n",
794 device_name);
795 return -EINVAL;
796 }
a55eb92c 797
77358b59
PR
798 if (bs != bs_prev) {
799 bs_prev = bs;
800 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
801 if (total_sectors <= 0) {
6daf194d 802 error_report("Error getting length of block device %s",
77358b59
PR
803 device_name);
804 return -EINVAL;
805 }
806 }
807
808 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
809 nr_sectors = total_sectors - addr;
810 } else {
811 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
812 }
813
323004a3 814 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
d32f35cb
PL
815 ret = bdrv_write_zeroes(bs, addr, nr_sectors,
816 BDRV_REQ_MAY_UNMAP);
323004a3
PL
817 } else {
818 buf = g_malloc(BLOCK_SIZE);
819 qemu_get_buffer(f, buf, BLOCK_SIZE);
820 ret = bdrv_write(bs, addr, buf, nr_sectors);
821 g_free(buf);
822 }
575a58d7 823
b02bea3a
YT
824 if (ret < 0) {
825 return ret;
826 }
01e61e2d
JK
827 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
828 if (!banner_printed) {
829 printf("Receiving block device images\n");
830 banner_printed = 1;
831 }
832 printf("Completed %d %%%c", (int)addr,
833 (addr == 100) ? '\n' : '\r');
834 fflush(stdout);
a55eb92c 835 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 836 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
837 return -EINVAL;
838 }
42802d47
JQ
839 ret = qemu_file_get_error(f);
840 if (ret != 0) {
841 return ret;
c163b5ca 842 }
a55eb92c
JK
843 } while (!(flags & BLK_MIG_FLAG_EOS));
844
c163b5ca
LS
845 return 0;
846}
847
6607ae23 848static void block_set_params(const MigrationParams *params, void *opaque)
c163b5ca 849{
6607ae23
IY
850 block_mig_state.blk_enable = params->blk;
851 block_mig_state.shared_base = params->shared;
a55eb92c 852
c163b5ca 853 /* shared base means that blk_enable = 1 */
6607ae23 854 block_mig_state.blk_enable |= params->shared;
c163b5ca
LS
855}
856
6bd68781
JQ
857static bool block_is_active(void *opaque)
858{
859 return block_mig_state.blk_enable == 1;
860}
861
7908c78d
JQ
862SaveVMHandlers savevm_block_handlers = {
863 .set_params = block_set_params,
d1315aac 864 .save_live_setup = block_save_setup,
16310a3c
JQ
865 .save_live_iterate = block_save_iterate,
866 .save_live_complete = block_save_complete,
e4ed1541 867 .save_live_pending = block_save_pending,
7908c78d 868 .load_state = block_load,
9b5bfab0 869 .cancel = block_migration_cancel,
6bd68781 870 .is_active = block_is_active,
7908c78d
JQ
871};
872
c163b5ca 873void blk_mig_init(void)
a55eb92c 874{
5e5328be
JK
875 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
876 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 877 qemu_mutex_init(&block_mig_state.lock);
5e5328be 878
7908c78d
JQ
879 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
880 &block_mig_state);
c163b5ca 881}
This page took 0.422024 seconds and 4 git commands to generate.