]> Git Repo - qemu.git/blob - block/mirror.c
Merge remote-tracking branch 'kwolf/tags/for-upstream' into staging
[qemu.git] / block / mirror.c
1 /*
2  * Image mirroring
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Paolo Bonzini  <[email protected]>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "trace.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
24
25 #define SLICE_TIME    100000000ULL /* ns */
26 #define MAX_IN_FLIGHT 16
27 #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */
28 #define DEFAULT_MIRROR_BUF_SIZE \
29     (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE)
30
31 /* The mirroring buffer is a list of granularity-sized chunks.
32  * Free chunks are organized in a list.
33  */
34 typedef struct MirrorBuffer {
35     QSIMPLEQ_ENTRY(MirrorBuffer) next;
36 } MirrorBuffer;
37
38 typedef struct MirrorBlockJob {
39     BlockJob common;
40     RateLimit limit;
41     BlockBackend *target;
42     BlockDriverState *mirror_top_bs;
43     BlockDriverState *source;
44     BlockDriverState *base;
45
46     /* The name of the graph node to replace */
47     char *replaces;
48     /* The BDS to replace */
49     BlockDriverState *to_replace;
50     /* Used to block operations on the drive-mirror-replace target */
51     Error *replace_blocker;
52     bool is_none_mode;
53     BlockMirrorBackingMode backing_mode;
54     BlockdevOnError on_source_error, on_target_error;
55     bool synced;
56     bool should_complete;
57     int64_t granularity;
58     size_t buf_size;
59     int64_t bdev_length;
60     unsigned long *cow_bitmap;
61     BdrvDirtyBitmap *dirty_bitmap;
62     BdrvDirtyBitmapIter *dbi;
63     uint8_t *buf;
64     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
65     int buf_free_count;
66
67     uint64_t last_pause_ns;
68     unsigned long *in_flight_bitmap;
69     int in_flight;
70     int64_t sectors_in_flight;
71     int ret;
72     bool unmap;
73     bool waiting_for_io;
74     int target_cluster_sectors;
75     int max_iov;
76     bool initial_zeroing_ongoing;
77 } MirrorBlockJob;
78
79 typedef struct MirrorOp {
80     MirrorBlockJob *s;
81     QEMUIOVector qiov;
82     int64_t sector_num;
83     int nb_sectors;
84 } MirrorOp;
85
86 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
87                                             int error)
88 {
89     s->synced = false;
90     if (read) {
91         return block_job_error_action(&s->common, s->on_source_error,
92                                       true, error);
93     } else {
94         return block_job_error_action(&s->common, s->on_target_error,
95                                       false, error);
96     }
97 }
98
99 static void mirror_iteration_done(MirrorOp *op, int ret)
100 {
101     MirrorBlockJob *s = op->s;
102     struct iovec *iov;
103     int64_t chunk_num;
104     int i, nb_chunks, sectors_per_chunk;
105
106     trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
107
108     s->in_flight--;
109     s->sectors_in_flight -= op->nb_sectors;
110     iov = op->qiov.iov;
111     for (i = 0; i < op->qiov.niov; i++) {
112         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
113         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
114         s->buf_free_count++;
115     }
116
117     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
118     chunk_num = op->sector_num / sectors_per_chunk;
119     nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk);
120     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
121     if (ret >= 0) {
122         if (s->cow_bitmap) {
123             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
124         }
125         if (!s->initial_zeroing_ongoing) {
126             s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
127         }
128     }
129     qemu_iovec_destroy(&op->qiov);
130     g_free(op);
131
132     if (s->waiting_for_io) {
133         qemu_coroutine_enter(s->common.co);
134     }
135 }
136
137 static void mirror_write_complete(void *opaque, int ret)
138 {
139     MirrorOp *op = opaque;
140     MirrorBlockJob *s = op->s;
141
142     aio_context_acquire(blk_get_aio_context(s->common.blk));
143     if (ret < 0) {
144         BlockErrorAction action;
145
146         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
147         action = mirror_error_action(s, false, -ret);
148         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
149             s->ret = ret;
150         }
151     }
152     mirror_iteration_done(op, ret);
153     aio_context_release(blk_get_aio_context(s->common.blk));
154 }
155
156 static void mirror_read_complete(void *opaque, int ret)
157 {
158     MirrorOp *op = opaque;
159     MirrorBlockJob *s = op->s;
160
161     aio_context_acquire(blk_get_aio_context(s->common.blk));
162     if (ret < 0) {
163         BlockErrorAction action;
164
165         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
166         action = mirror_error_action(s, true, -ret);
167         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168             s->ret = ret;
169         }
170
171         mirror_iteration_done(op, ret);
172     } else {
173         blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
174                         0, mirror_write_complete, op);
175     }
176     aio_context_release(blk_get_aio_context(s->common.blk));
177 }
178
179 static inline void mirror_clip_sectors(MirrorBlockJob *s,
180                                        int64_t sector_num,
181                                        int *nb_sectors)
182 {
183     *nb_sectors = MIN(*nb_sectors,
184                       s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
185 }
186
187 /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
188  * return the offset of the adjusted tail sector against original. */
189 static int mirror_cow_align(MirrorBlockJob *s,
190                             int64_t *sector_num,
191                             int *nb_sectors)
192 {
193     bool need_cow;
194     int ret = 0;
195     int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
196     int64_t align_sector_num = *sector_num;
197     int align_nb_sectors = *nb_sectors;
198     int max_sectors = chunk_sectors * s->max_iov;
199
200     need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
201     need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
202                           s->cow_bitmap);
203     if (need_cow) {
204         bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
205                                        *nb_sectors, &align_sector_num,
206                                        &align_nb_sectors);
207     }
208
209     if (align_nb_sectors > max_sectors) {
210         align_nb_sectors = max_sectors;
211         if (need_cow) {
212             align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
213                                                s->target_cluster_sectors);
214         }
215     }
216     /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
217      * that doesn't matter because it's already the end of source image. */
218     mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
219
220     ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
221     *sector_num = align_sector_num;
222     *nb_sectors = align_nb_sectors;
223     assert(ret >= 0);
224     return ret;
225 }
226
227 static inline void mirror_wait_for_io(MirrorBlockJob *s)
228 {
229     assert(!s->waiting_for_io);
230     s->waiting_for_io = true;
231     qemu_coroutine_yield();
232     s->waiting_for_io = false;
233 }
234
235 /* Submit async read while handling COW.
236  * Returns: The number of sectors copied after and including sector_num,
237  *          excluding any sectors copied prior to sector_num due to alignment.
238  *          This will be nb_sectors if no alignment is necessary, or
239  *          (new_end - sector_num) if tail is rounded up or down due to
240  *          alignment or buffer limit.
241  */
242 static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
243                           int nb_sectors)
244 {
245     BlockBackend *source = s->common.blk;
246     int sectors_per_chunk, nb_chunks;
247     int ret;
248     MirrorOp *op;
249     int max_sectors;
250
251     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
252     max_sectors = sectors_per_chunk * s->max_iov;
253
254     /* We can only handle as much as buf_size at a time. */
255     nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
256     nb_sectors = MIN(max_sectors, nb_sectors);
257     assert(nb_sectors);
258     ret = nb_sectors;
259
260     if (s->cow_bitmap) {
261         ret += mirror_cow_align(s, &sector_num, &nb_sectors);
262     }
263     assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
264     /* The sector range must meet granularity because:
265      * 1) Caller passes in aligned values;
266      * 2) mirror_cow_align is used only when target cluster is larger. */
267     assert(!(sector_num % sectors_per_chunk));
268     nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
269
270     while (s->buf_free_count < nb_chunks) {
271         trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
272         mirror_wait_for_io(s);
273     }
274
275     /* Allocate a MirrorOp that is used as an AIO callback.  */
276     op = g_new(MirrorOp, 1);
277     op->s = s;
278     op->sector_num = sector_num;
279     op->nb_sectors = nb_sectors;
280
281     /* Now make a QEMUIOVector taking enough granularity-sized chunks
282      * from s->buf_free.
283      */
284     qemu_iovec_init(&op->qiov, nb_chunks);
285     while (nb_chunks-- > 0) {
286         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
287         size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
288
289         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
290         s->buf_free_count--;
291         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
292     }
293
294     /* Copy the dirty cluster.  */
295     s->in_flight++;
296     s->sectors_in_flight += nb_sectors;
297     trace_mirror_one_iteration(s, sector_num, nb_sectors);
298
299     blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
300                    mirror_read_complete, op);
301     return ret;
302 }
303
304 static void mirror_do_zero_or_discard(MirrorBlockJob *s,
305                                       int64_t sector_num,
306                                       int nb_sectors,
307                                       bool is_discard)
308 {
309     MirrorOp *op;
310
311     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
312      * so the freeing in mirror_iteration_done is nop. */
313     op = g_new0(MirrorOp, 1);
314     op->s = s;
315     op->sector_num = sector_num;
316     op->nb_sectors = nb_sectors;
317
318     s->in_flight++;
319     s->sectors_in_flight += nb_sectors;
320     if (is_discard) {
321         blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
322                          op->nb_sectors << BDRV_SECTOR_BITS,
323                          mirror_write_complete, op);
324     } else {
325         blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
326                               op->nb_sectors * BDRV_SECTOR_SIZE,
327                               s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
328                               mirror_write_complete, op);
329     }
330 }
331
332 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
333 {
334     BlockDriverState *source = s->source;
335     int64_t sector_num, first_chunk;
336     uint64_t delay_ns = 0;
337     /* At least the first dirty chunk is mirrored in one iteration. */
338     int nb_chunks = 1;
339     int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
340     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
341     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
342     int max_io_sectors = MAX((s->buf_size >> BDRV_SECTOR_BITS) / MAX_IN_FLIGHT,
343                              MAX_IO_SECTORS);
344
345     sector_num = bdrv_dirty_iter_next(s->dbi);
346     if (sector_num < 0) {
347         bdrv_set_dirty_iter(s->dbi, 0);
348         sector_num = bdrv_dirty_iter_next(s->dbi);
349         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
350         assert(sector_num >= 0);
351     }
352
353     first_chunk = sector_num / sectors_per_chunk;
354     while (test_bit(first_chunk, s->in_flight_bitmap)) {
355         trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
356         mirror_wait_for_io(s);
357     }
358
359     block_job_pause_point(&s->common);
360
361     /* Find the number of consective dirty chunks following the first dirty
362      * one, and wait for in flight requests in them. */
363     while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
364         int64_t next_dirty;
365         int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
366         int64_t next_chunk = next_sector / sectors_per_chunk;
367         if (next_sector >= end ||
368             !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
369             break;
370         }
371         if (test_bit(next_chunk, s->in_flight_bitmap)) {
372             break;
373         }
374
375         next_dirty = bdrv_dirty_iter_next(s->dbi);
376         if (next_dirty > next_sector || next_dirty < 0) {
377             /* The bitmap iterator's cache is stale, refresh it */
378             bdrv_set_dirty_iter(s->dbi, next_sector);
379             next_dirty = bdrv_dirty_iter_next(s->dbi);
380         }
381         assert(next_dirty == next_sector);
382         nb_chunks++;
383     }
384
385     /* Clear dirty bits before querying the block status, because
386      * calling bdrv_get_block_status_above could yield - if some blocks are
387      * marked dirty in this window, we need to know.
388      */
389     bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num,
390                             nb_chunks * sectors_per_chunk);
391     bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
392     while (nb_chunks > 0 && sector_num < end) {
393         int64_t ret;
394         int io_sectors, io_sectors_acct;
395         BlockDriverState *file;
396         enum MirrorMethod {
397             MIRROR_METHOD_COPY,
398             MIRROR_METHOD_ZERO,
399             MIRROR_METHOD_DISCARD
400         } mirror_method = MIRROR_METHOD_COPY;
401
402         assert(!(sector_num % sectors_per_chunk));
403         ret = bdrv_get_block_status_above(source, NULL, sector_num,
404                                           nb_chunks * sectors_per_chunk,
405                                           &io_sectors, &file);
406         if (ret < 0) {
407             io_sectors = MIN(nb_chunks * sectors_per_chunk, max_io_sectors);
408         } else if (ret & BDRV_BLOCK_DATA) {
409             io_sectors = MIN(io_sectors, max_io_sectors);
410         }
411
412         io_sectors -= io_sectors % sectors_per_chunk;
413         if (io_sectors < sectors_per_chunk) {
414             io_sectors = sectors_per_chunk;
415         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
416             int64_t target_sector_num;
417             int target_nb_sectors;
418             bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
419                                            io_sectors,  &target_sector_num,
420                                            &target_nb_sectors);
421             if (target_sector_num == sector_num &&
422                 target_nb_sectors == io_sectors) {
423                 mirror_method = ret & BDRV_BLOCK_ZERO ?
424                                     MIRROR_METHOD_ZERO :
425                                     MIRROR_METHOD_DISCARD;
426             }
427         }
428
429         while (s->in_flight >= MAX_IN_FLIGHT) {
430             trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
431             mirror_wait_for_io(s);
432         }
433
434         if (s->ret < 0) {
435             return 0;
436         }
437
438         mirror_clip_sectors(s, sector_num, &io_sectors);
439         switch (mirror_method) {
440         case MIRROR_METHOD_COPY:
441             io_sectors = mirror_do_read(s, sector_num, io_sectors);
442             io_sectors_acct = io_sectors;
443             break;
444         case MIRROR_METHOD_ZERO:
445         case MIRROR_METHOD_DISCARD:
446             mirror_do_zero_or_discard(s, sector_num, io_sectors,
447                                       mirror_method == MIRROR_METHOD_DISCARD);
448             if (write_zeroes_ok) {
449                 io_sectors_acct = 0;
450             } else {
451                 io_sectors_acct = io_sectors;
452             }
453             break;
454         default:
455             abort();
456         }
457         assert(io_sectors);
458         sector_num += io_sectors;
459         nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
460         if (s->common.speed) {
461             delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct);
462         }
463     }
464     return delay_ns;
465 }
466
467 static void mirror_free_init(MirrorBlockJob *s)
468 {
469     int granularity = s->granularity;
470     size_t buf_size = s->buf_size;
471     uint8_t *buf = s->buf;
472
473     assert(s->buf_free_count == 0);
474     QSIMPLEQ_INIT(&s->buf_free);
475     while (buf_size != 0) {
476         MirrorBuffer *cur = (MirrorBuffer *)buf;
477         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
478         s->buf_free_count++;
479         buf_size -= granularity;
480         buf += granularity;
481     }
482 }
483
484 /* This is also used for the .pause callback. There is no matching
485  * mirror_resume() because mirror_run() will begin iterating again
486  * when the job is resumed.
487  */
488 static void mirror_wait_for_all_io(MirrorBlockJob *s)
489 {
490     while (s->in_flight > 0) {
491         mirror_wait_for_io(s);
492     }
493 }
494
495 typedef struct {
496     int ret;
497 } MirrorExitData;
498
499 static void mirror_exit(BlockJob *job, void *opaque)
500 {
501     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
502     MirrorExitData *data = opaque;
503     AioContext *replace_aio_context = NULL;
504     BlockDriverState *src = s->source;
505     BlockDriverState *target_bs = blk_bs(s->target);
506     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
507     Error *local_err = NULL;
508
509     /* Make sure that the source BDS doesn't go away before we called
510      * block_job_completed(). */
511     bdrv_ref(src);
512     bdrv_ref(mirror_top_bs);
513     bdrv_ref(target_bs);
514
515     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
516      * inserting target_bs at s->to_replace, where we might not be able to get
517      * these permissions.
518      *
519      * Note that blk_unref() alone doesn't necessarily drop permissions because
520      * we might be running nested inside mirror_drain(), which takes an extra
521      * reference, so use an explicit blk_set_perm() first. */
522     blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
523     blk_unref(s->target);
524     s->target = NULL;
525
526     /* We don't access the source any more. Dropping any WRITE/RESIZE is
527      * required before it could become a backing file of target_bs. */
528     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
529                             &error_abort);
530     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
531         BlockDriverState *backing = s->is_none_mode ? src : s->base;
532         if (backing_bs(target_bs) != backing) {
533             bdrv_set_backing_hd(target_bs, backing, &local_err);
534             if (local_err) {
535                 error_report_err(local_err);
536                 data->ret = -EPERM;
537             }
538         }
539     }
540
541     if (s->to_replace) {
542         replace_aio_context = bdrv_get_aio_context(s->to_replace);
543         aio_context_acquire(replace_aio_context);
544     }
545
546     if (s->should_complete && data->ret == 0) {
547         BlockDriverState *to_replace = src;
548         if (s->to_replace) {
549             to_replace = s->to_replace;
550         }
551
552         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
553             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
554         }
555
556         /* The mirror job has no requests in flight any more, but we need to
557          * drain potential other users of the BDS before changing the graph. */
558         bdrv_drained_begin(target_bs);
559         bdrv_replace_node(to_replace, target_bs, &local_err);
560         bdrv_drained_end(target_bs);
561         if (local_err) {
562             error_report_err(local_err);
563             data->ret = -EPERM;
564         }
565     }
566     if (s->to_replace) {
567         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
568         error_free(s->replace_blocker);
569         bdrv_unref(s->to_replace);
570     }
571     if (replace_aio_context) {
572         aio_context_release(replace_aio_context);
573     }
574     g_free(s->replaces);
575     bdrv_unref(target_bs);
576
577     /* Remove the mirror filter driver from the graph. Before this, get rid of
578      * the blockers on the intermediate nodes so that the resulting state is
579      * valid. Also give up permissions on mirror_top_bs->backing, which might
580      * block the removal. */
581     block_job_remove_all_bdrv(job);
582     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
583                             &error_abort);
584     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
585
586     /* We just changed the BDS the job BB refers to (with either or both of the
587      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
588      * the right thing. We don't need any permissions any more now. */
589     blk_remove_bs(job->blk);
590     blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
591     blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
592
593     block_job_completed(&s->common, data->ret);
594
595     g_free(data);
596     bdrv_drained_end(src);
597     bdrv_unref(mirror_top_bs);
598     bdrv_unref(src);
599 }
600
601 static void mirror_throttle(MirrorBlockJob *s)
602 {
603     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
604
605     if (now - s->last_pause_ns > SLICE_TIME) {
606         s->last_pause_ns = now;
607         block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
608     } else {
609         block_job_pause_point(&s->common);
610     }
611 }
612
613 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
614 {
615     int64_t sector_num, end;
616     BlockDriverState *base = s->base;
617     BlockDriverState *bs = s->source;
618     BlockDriverState *target_bs = blk_bs(s->target);
619     int ret, n;
620
621     end = s->bdev_length / BDRV_SECTOR_SIZE;
622
623     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
624         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
625             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
626             return 0;
627         }
628
629         s->initial_zeroing_ongoing = true;
630         for (sector_num = 0; sector_num < end; ) {
631             int nb_sectors = MIN(end - sector_num,
632                 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
633
634             mirror_throttle(s);
635
636             if (block_job_is_cancelled(&s->common)) {
637                 s->initial_zeroing_ongoing = false;
638                 return 0;
639             }
640
641             if (s->in_flight >= MAX_IN_FLIGHT) {
642                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
643                                    s->in_flight);
644                 mirror_wait_for_io(s);
645                 continue;
646             }
647
648             mirror_do_zero_or_discard(s, sector_num, nb_sectors, false);
649             sector_num += nb_sectors;
650         }
651
652         mirror_wait_for_all_io(s);
653         s->initial_zeroing_ongoing = false;
654     }
655
656     /* First part, loop on the sectors and initialize the dirty bitmap.  */
657     for (sector_num = 0; sector_num < end; ) {
658         /* Just to make sure we are not exceeding int limit. */
659         int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
660                              end - sector_num);
661
662         mirror_throttle(s);
663
664         if (block_job_is_cancelled(&s->common)) {
665             return 0;
666         }
667
668         ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
669         if (ret < 0) {
670             return ret;
671         }
672
673         assert(n > 0);
674         if (ret == 1) {
675             bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
676         }
677         sector_num += n;
678     }
679     return 0;
680 }
681
682 /* Called when going out of the streaming phase to flush the bulk of the
683  * data to the medium, or just before completing.
684  */
685 static int mirror_flush(MirrorBlockJob *s)
686 {
687     int ret = blk_flush(s->target);
688     if (ret < 0) {
689         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
690             s->ret = ret;
691         }
692     }
693     return ret;
694 }
695
696 static void coroutine_fn mirror_run(void *opaque)
697 {
698     MirrorBlockJob *s = opaque;
699     MirrorExitData *data;
700     BlockDriverState *bs = s->source;
701     BlockDriverState *target_bs = blk_bs(s->target);
702     bool need_drain = true;
703     int64_t length;
704     BlockDriverInfo bdi;
705     char backing_filename[2]; /* we only need 2 characters because we are only
706                                  checking for a NULL string */
707     int ret = 0;
708     int target_cluster_size = BDRV_SECTOR_SIZE;
709
710     if (block_job_is_cancelled(&s->common)) {
711         goto immediate_exit;
712     }
713
714     s->bdev_length = bdrv_getlength(bs);
715     if (s->bdev_length < 0) {
716         ret = s->bdev_length;
717         goto immediate_exit;
718     }
719
720     /* Active commit must resize the base image if its size differs from the
721      * active layer. */
722     if (s->base == blk_bs(s->target)) {
723         int64_t base_length;
724
725         base_length = blk_getlength(s->target);
726         if (base_length < 0) {
727             ret = base_length;
728             goto immediate_exit;
729         }
730
731         if (s->bdev_length > base_length) {
732             ret = blk_truncate(s->target, s->bdev_length, NULL);
733             if (ret < 0) {
734                 goto immediate_exit;
735             }
736         }
737     }
738
739     if (s->bdev_length == 0) {
740         /* Report BLOCK_JOB_READY and wait for complete. */
741         block_job_event_ready(&s->common);
742         s->synced = true;
743         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
744             block_job_yield(&s->common);
745         }
746         s->common.cancelled = false;
747         goto immediate_exit;
748     }
749
750     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
751     s->in_flight_bitmap = bitmap_new(length);
752
753     /* If we have no backing file yet in the destination, we cannot let
754      * the destination do COW.  Instead, we copy sectors around the
755      * dirty data if needed.  We need a bitmap to do that.
756      */
757     bdrv_get_backing_filename(target_bs, backing_filename,
758                               sizeof(backing_filename));
759     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
760         target_cluster_size = bdi.cluster_size;
761     }
762     if (backing_filename[0] && !target_bs->backing
763         && s->granularity < target_cluster_size) {
764         s->buf_size = MAX(s->buf_size, target_cluster_size);
765         s->cow_bitmap = bitmap_new(length);
766     }
767     s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS;
768     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
769
770     s->buf = qemu_try_blockalign(bs, s->buf_size);
771     if (s->buf == NULL) {
772         ret = -ENOMEM;
773         goto immediate_exit;
774     }
775
776     mirror_free_init(s);
777
778     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
779     if (!s->is_none_mode) {
780         ret = mirror_dirty_init(s);
781         if (ret < 0 || block_job_is_cancelled(&s->common)) {
782             goto immediate_exit;
783         }
784     }
785
786     assert(!s->dbi);
787     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
788     for (;;) {
789         uint64_t delay_ns = 0;
790         int64_t cnt, delta;
791         bool should_complete;
792
793         if (s->ret < 0) {
794             ret = s->ret;
795             goto immediate_exit;
796         }
797
798         block_job_pause_point(&s->common);
799
800         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
801         /* s->common.offset contains the number of bytes already processed so
802          * far, cnt is the number of dirty sectors remaining and
803          * s->sectors_in_flight is the number of sectors currently being
804          * processed; together those are the current total operation length */
805         s->common.len = s->common.offset +
806                         (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
807
808         /* Note that even when no rate limit is applied we need to yield
809          * periodically with no pending I/O so that bdrv_drain_all() returns.
810          * We do so every SLICE_TIME nanoseconds, or when there is an error,
811          * or when the source is clean, whichever comes first.
812          */
813         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
814         if (delta < SLICE_TIME &&
815             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
816             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
817                 (cnt == 0 && s->in_flight > 0)) {
818                 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
819                 mirror_wait_for_io(s);
820                 continue;
821             } else if (cnt != 0) {
822                 delay_ns = mirror_iteration(s);
823             }
824         }
825
826         should_complete = false;
827         if (s->in_flight == 0 && cnt == 0) {
828             trace_mirror_before_flush(s);
829             if (!s->synced) {
830                 if (mirror_flush(s) < 0) {
831                     /* Go check s->ret.  */
832                     continue;
833                 }
834                 /* We're out of the streaming phase.  From now on, if the job
835                  * is cancelled we will actually complete all pending I/O and
836                  * report completion.  This way, block-job-cancel will leave
837                  * the target in a consistent state.
838                  */
839                 block_job_event_ready(&s->common);
840                 s->synced = true;
841             }
842
843             should_complete = s->should_complete ||
844                 block_job_is_cancelled(&s->common);
845             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
846         }
847
848         if (cnt == 0 && should_complete) {
849             /* The dirty bitmap is not updated while operations are pending.
850              * If we're about to exit, wait for pending operations before
851              * calling bdrv_get_dirty_count(bs), or we may exit while the
852              * source has dirty data to copy!
853              *
854              * Note that I/O can be submitted by the guest while
855              * mirror_populate runs, so pause it now.  Before deciding
856              * whether to switch to target check one last time if I/O has
857              * come in the meanwhile, and if not flush the data to disk.
858              */
859             trace_mirror_before_drain(s, cnt);
860
861             bdrv_drained_begin(bs);
862             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
863             if (cnt > 0 || mirror_flush(s) < 0) {
864                 bdrv_drained_end(bs);
865                 continue;
866             }
867
868             /* The two disks are in sync.  Exit and report successful
869              * completion.
870              */
871             assert(QLIST_EMPTY(&bs->tracked_requests));
872             s->common.cancelled = false;
873             need_drain = false;
874             break;
875         }
876
877         ret = 0;
878         trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
879         if (!s->synced) {
880             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
881             if (block_job_is_cancelled(&s->common)) {
882                 break;
883             }
884         } else if (!should_complete) {
885             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
886             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
887         }
888         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
889     }
890
891 immediate_exit:
892     if (s->in_flight > 0) {
893         /* We get here only if something went wrong.  Either the job failed,
894          * or it was cancelled prematurely so that we do not guarantee that
895          * the target is a copy of the source.
896          */
897         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
898         assert(need_drain);
899         mirror_wait_for_all_io(s);
900     }
901
902     assert(s->in_flight == 0);
903     qemu_vfree(s->buf);
904     g_free(s->cow_bitmap);
905     g_free(s->in_flight_bitmap);
906     bdrv_dirty_iter_free(s->dbi);
907     bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
908
909     data = g_malloc(sizeof(*data));
910     data->ret = ret;
911
912     if (need_drain) {
913         bdrv_drained_begin(bs);
914     }
915     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
916 }
917
918 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
919 {
920     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
921
922     if (speed < 0) {
923         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
924         return;
925     }
926     ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
927 }
928
929 static void mirror_complete(BlockJob *job, Error **errp)
930 {
931     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
932     BlockDriverState *target;
933
934     target = blk_bs(s->target);
935
936     if (!s->synced) {
937         error_setg(errp, "The active block job '%s' cannot be completed",
938                    job->id);
939         return;
940     }
941
942     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
943         int ret;
944
945         assert(!target->backing);
946         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
947         if (ret < 0) {
948             return;
949         }
950     }
951
952     /* block all operations on to_replace bs */
953     if (s->replaces) {
954         AioContext *replace_aio_context;
955
956         s->to_replace = bdrv_find_node(s->replaces);
957         if (!s->to_replace) {
958             error_setg(errp, "Node name '%s' not found", s->replaces);
959             return;
960         }
961
962         replace_aio_context = bdrv_get_aio_context(s->to_replace);
963         aio_context_acquire(replace_aio_context);
964
965         /* TODO Translate this into permission system. Current definition of
966          * GRAPH_MOD would require to request it for the parents; they might
967          * not even be BlockDriverStates, however, so a BdrvChild can't address
968          * them. May need redefinition of GRAPH_MOD. */
969         error_setg(&s->replace_blocker,
970                    "block device is in use by block-job-complete");
971         bdrv_op_block_all(s->to_replace, s->replace_blocker);
972         bdrv_ref(s->to_replace);
973
974         aio_context_release(replace_aio_context);
975     }
976
977     s->should_complete = true;
978     block_job_enter(&s->common);
979 }
980
981 static void mirror_pause(BlockJob *job)
982 {
983     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
984
985     mirror_wait_for_all_io(s);
986 }
987
988 static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
989 {
990     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
991
992     blk_set_aio_context(s->target, new_context);
993 }
994
995 static void mirror_drain(BlockJob *job)
996 {
997     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
998
999     /* Need to keep a reference in case blk_drain triggers execution
1000      * of mirror_complete...
1001      */
1002     if (s->target) {
1003         BlockBackend *target = s->target;
1004         blk_ref(target);
1005         blk_drain(target);
1006         blk_unref(target);
1007     }
1008 }
1009
1010 static const BlockJobDriver mirror_job_driver = {
1011     .instance_size          = sizeof(MirrorBlockJob),
1012     .job_type               = BLOCK_JOB_TYPE_MIRROR,
1013     .set_speed              = mirror_set_speed,
1014     .start                  = mirror_run,
1015     .complete               = mirror_complete,
1016     .pause                  = mirror_pause,
1017     .attached_aio_context   = mirror_attached_aio_context,
1018     .drain                  = mirror_drain,
1019 };
1020
1021 static const BlockJobDriver commit_active_job_driver = {
1022     .instance_size          = sizeof(MirrorBlockJob),
1023     .job_type               = BLOCK_JOB_TYPE_COMMIT,
1024     .set_speed              = mirror_set_speed,
1025     .start                  = mirror_run,
1026     .complete               = mirror_complete,
1027     .pause                  = mirror_pause,
1028     .attached_aio_context   = mirror_attached_aio_context,
1029     .drain                  = mirror_drain,
1030 };
1031
1032 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1033     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1034 {
1035     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1036 }
1037
1038 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1039     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1040 {
1041     return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1042 }
1043
1044 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1045 {
1046     return bdrv_co_flush(bs->backing->bs);
1047 }
1048
1049 static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
1050     BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
1051     BlockDriverState **file)
1052 {
1053     *pnum = nb_sectors;
1054     *file = bs->backing->bs;
1055     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA |
1056            (sector_num << BDRV_SECTOR_BITS);
1057 }
1058
1059 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1060     int64_t offset, int count, BdrvRequestFlags flags)
1061 {
1062     return bdrv_co_pwrite_zeroes(bs->backing, offset, count, flags);
1063 }
1064
1065 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1066     int64_t offset, int count)
1067 {
1068     return bdrv_co_pdiscard(bs->backing->bs, offset, count);
1069 }
1070
1071 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1072 {
1073     bdrv_refresh_filename(bs->backing->bs);
1074     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1075             bs->backing->bs->filename);
1076 }
1077
1078 static void bdrv_mirror_top_close(BlockDriverState *bs)
1079 {
1080 }
1081
1082 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1083                                        const BdrvChildRole *role,
1084                                        uint64_t perm, uint64_t shared,
1085                                        uint64_t *nperm, uint64_t *nshared)
1086 {
1087     /* Must be able to forward guest writes to the real image */
1088     *nperm = 0;
1089     if (perm & BLK_PERM_WRITE) {
1090         *nperm |= BLK_PERM_WRITE;
1091     }
1092
1093     *nshared = BLK_PERM_ALL;
1094 }
1095
1096 /* Dummy node that provides consistent read to its users without requiring it
1097  * from its backing file and that allows writes on the backing file chain. */
1098 static BlockDriver bdrv_mirror_top = {
1099     .format_name                = "mirror_top",
1100     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
1101     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
1102     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
1103     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
1104     .bdrv_co_flush              = bdrv_mirror_top_flush,
1105     .bdrv_co_get_block_status   = bdrv_mirror_top_get_block_status,
1106     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
1107     .bdrv_close                 = bdrv_mirror_top_close,
1108     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
1109 };
1110
1111 static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1112                              int creation_flags, BlockDriverState *target,
1113                              const char *replaces, int64_t speed,
1114                              uint32_t granularity, int64_t buf_size,
1115                              BlockMirrorBackingMode backing_mode,
1116                              BlockdevOnError on_source_error,
1117                              BlockdevOnError on_target_error,
1118                              bool unmap,
1119                              BlockCompletionFunc *cb,
1120                              void *opaque,
1121                              const BlockJobDriver *driver,
1122                              bool is_none_mode, BlockDriverState *base,
1123                              bool auto_complete, const char *filter_node_name,
1124                              Error **errp)
1125 {
1126     MirrorBlockJob *s;
1127     BlockDriverState *mirror_top_bs;
1128     bool target_graph_mod;
1129     bool target_is_backing;
1130     Error *local_err = NULL;
1131     int ret;
1132
1133     if (granularity == 0) {
1134         granularity = bdrv_get_default_bitmap_granularity(target);
1135     }
1136
1137     assert ((granularity & (granularity - 1)) == 0);
1138
1139     if (buf_size < 0) {
1140         error_setg(errp, "Invalid parameter 'buf-size'");
1141         return;
1142     }
1143
1144     if (buf_size == 0) {
1145         buf_size = DEFAULT_MIRROR_BUF_SIZE;
1146     }
1147
1148     /* In the case of active commit, add dummy driver to provide consistent
1149      * reads on the top, while disabling it in the intermediate nodes, and make
1150      * the backing chain writable. */
1151     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1152                                          BDRV_O_RDWR, errp);
1153     if (mirror_top_bs == NULL) {
1154         return;
1155     }
1156     mirror_top_bs->total_sectors = bs->total_sectors;
1157     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1158
1159     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1160      * it alive until block_job_create() succeeds even if bs has no parent. */
1161     bdrv_ref(mirror_top_bs);
1162     bdrv_drained_begin(bs);
1163     bdrv_append(mirror_top_bs, bs, &local_err);
1164     bdrv_drained_end(bs);
1165
1166     if (local_err) {
1167         bdrv_unref(mirror_top_bs);
1168         error_propagate(errp, local_err);
1169         return;
1170     }
1171
1172     /* Make sure that the source is not resized while the job is running */
1173     s = block_job_create(job_id, driver, mirror_top_bs,
1174                          BLK_PERM_CONSISTENT_READ,
1175                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1176                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1177                          creation_flags, cb, opaque, errp);
1178     if (!s) {
1179         goto fail;
1180     }
1181     /* The block job now has a reference to this node */
1182     bdrv_unref(mirror_top_bs);
1183
1184     s->source = bs;
1185     s->mirror_top_bs = mirror_top_bs;
1186
1187     /* No resize for the target either; while the mirror is still running, a
1188      * consistent read isn't necessarily possible. We could possibly allow
1189      * writes and graph modifications, though it would likely defeat the
1190      * purpose of a mirror, so leave them blocked for now.
1191      *
1192      * In the case of active commit, things look a bit different, though,
1193      * because the target is an already populated backing file in active use.
1194      * We can allow anything except resize there.*/
1195     target_is_backing = bdrv_chain_contains(bs, target);
1196     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1197     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
1198                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1199                         BLK_PERM_WRITE_UNCHANGED |
1200                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1201                                              BLK_PERM_WRITE |
1202                                              BLK_PERM_GRAPH_MOD : 0));
1203     ret = blk_insert_bs(s->target, target, errp);
1204     if (ret < 0) {
1205         goto fail;
1206     }
1207
1208     s->replaces = g_strdup(replaces);
1209     s->on_source_error = on_source_error;
1210     s->on_target_error = on_target_error;
1211     s->is_none_mode = is_none_mode;
1212     s->backing_mode = backing_mode;
1213     s->base = base;
1214     s->granularity = granularity;
1215     s->buf_size = ROUND_UP(buf_size, granularity);
1216     s->unmap = unmap;
1217     if (auto_complete) {
1218         s->should_complete = true;
1219     }
1220
1221     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1222     if (!s->dirty_bitmap) {
1223         goto fail;
1224     }
1225
1226     /* Required permissions are already taken with blk_new() */
1227     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1228                        &error_abort);
1229
1230     /* In commit_active_start() all intermediate nodes disappear, so
1231      * any jobs in them must be blocked */
1232     if (target_is_backing) {
1233         BlockDriverState *iter;
1234         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1235             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1236              * ourselves at s->base (if writes are blocked for a node, they are
1237              * also blocked for its backing file). The other options would be a
1238              * second filter driver above s->base (== target). */
1239             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1240                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1241                                      errp);
1242             if (ret < 0) {
1243                 goto fail;
1244             }
1245         }
1246     }
1247
1248     trace_mirror_start(bs, s, opaque);
1249     block_job_start(&s->common);
1250     return;
1251
1252 fail:
1253     if (s) {
1254         /* Make sure this BDS does not go away until we have completed the graph
1255          * changes below */
1256         bdrv_ref(mirror_top_bs);
1257
1258         g_free(s->replaces);
1259         blk_unref(s->target);
1260         block_job_early_fail(&s->common);
1261     }
1262
1263     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1264                             &error_abort);
1265     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1266
1267     bdrv_unref(mirror_top_bs);
1268 }
1269
1270 void mirror_start(const char *job_id, BlockDriverState *bs,
1271                   BlockDriverState *target, const char *replaces,
1272                   int64_t speed, uint32_t granularity, int64_t buf_size,
1273                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1274                   BlockdevOnError on_source_error,
1275                   BlockdevOnError on_target_error,
1276                   bool unmap, const char *filter_node_name, Error **errp)
1277 {
1278     bool is_none_mode;
1279     BlockDriverState *base;
1280
1281     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1282         error_setg(errp, "Sync mode 'incremental' not supported");
1283         return;
1284     }
1285     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1286     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1287     mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1288                      speed, granularity, buf_size, backing_mode,
1289                      on_source_error, on_target_error, unmap, NULL, NULL,
1290                      &mirror_job_driver, is_none_mode, base, false,
1291                      filter_node_name, errp);
1292 }
1293
1294 void commit_active_start(const char *job_id, BlockDriverState *bs,
1295                          BlockDriverState *base, int creation_flags,
1296                          int64_t speed, BlockdevOnError on_error,
1297                          const char *filter_node_name,
1298                          BlockCompletionFunc *cb, void *opaque,
1299                          bool auto_complete, Error **errp)
1300 {
1301     int orig_base_flags;
1302     Error *local_err = NULL;
1303
1304     orig_base_flags = bdrv_get_flags(base);
1305
1306     if (bdrv_reopen(base, bs->open_flags, errp)) {
1307         return;
1308     }
1309
1310     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1311                      MIRROR_LEAVE_BACKING_CHAIN,
1312                      on_error, on_error, true, cb, opaque,
1313                      &commit_active_job_driver, false, base, auto_complete,
1314                      filter_node_name, &local_err);
1315     if (local_err) {
1316         error_propagate(errp, local_err);
1317         goto error_restore_flags;
1318     }
1319
1320     return;
1321
1322 error_restore_flags:
1323     /* ignore error and errp for bdrv_reopen, because we want to propagate
1324      * the original error */
1325     bdrv_reopen(base, orig_base_flags, NULL);
1326     return;
1327 }
This page took 0.098505 seconds and 4 git commands to generate.