unsigned long *in_flight_bitmap;
int in_flight;
int64_t bytes_in_flight;
- QTAILQ_HEAD(MirrorOpList, MirrorOp) ops_in_flight;
+ QTAILQ_HEAD(, MirrorOp) ops_in_flight;
int ret;
bool unmap;
int target_cluster_size;
bool initial_zeroing_ongoing;
int in_active_write_counter;
bool prepared;
+ bool in_drain;
} MirrorBlockJob;
typedef struct MirrorBDSOpaque {
MirrorBlockJob *job;
+ bool stop;
} MirrorBDSOpaque;
struct MirrorOp {
}
s->prepared = true;
+ if (bdrv_chain_contains(src, target_bs)) {
+ bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
+ }
+
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
/* Make sure that the source BDS doesn't go away during bdrv_replace_node,
/* We don't access the source any more. Dropping any WRITE/RESIZE is
* required before it could become a backing file of target_bs. */
- bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
+ bs_opaque->stop = true;
+ bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
+ &error_abort);
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
BlockDriverState *backing = s->is_none_mode ? src : s->base;
if (backing_bs(target_bs) != backing) {
/* The mirror job has no requests in flight any more, but we need to
* drain potential other users of the BDS before changing the graph. */
+ assert(s->in_drain);
bdrv_drained_begin(target_bs);
bdrv_replace_node(to_replace, target_bs, &local_err);
bdrv_drained_end(target_bs);
g_free(s->replaces);
bdrv_unref(target_bs);
- /* Remove the mirror filter driver from the graph. Before this, get rid of
+ /*
+ * Remove the mirror filter driver from the graph. Before this, get rid of
* the blockers on the intermediate nodes so that the resulting state is
- * valid. Also give up permissions on mirror_top_bs->backing, which might
- * block the removal. */
+ * valid.
+ */
block_job_remove_all_bdrv(bjob);
- bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
/* We just changed the BDS the job BB refers to (with either or both of the
bs_opaque->job = NULL;
bdrv_drained_end(src);
+ s->in_drain = false;
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
*/
trace_mirror_before_drain(s, cnt);
+ s->in_drain = true;
bdrv_drained_begin(bs);
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
if (cnt > 0 || mirror_flush(s) < 0) {
bdrv_drained_end(bs);
+ s->in_drain = false;
continue;
}
bdrv_dirty_iter_free(s->dbi);
if (need_drain) {
+ s->in_drain = true;
bdrv_drained_begin(bs);
}
static bool mirror_drained_poll(BlockJob *job)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
- return !!s->in_flight;
-}
-static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
-{
- MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+ /* If the job isn't paused nor cancelled, we can't be sure that it won't
+ * issue more requests. We make an exception if we've reached this point
+ * from one of our own drain sections, to avoid a deadlock waiting for
+ * ourselves.
+ */
+ if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) {
+ return true;
+ }
- blk_set_aio_context(s->target, new_context);
+ return !!s->in_flight;
}
static void mirror_drain(BlockJob *job)
.complete = mirror_complete,
},
.drained_poll = mirror_drained_poll,
- .attached_aio_context = mirror_attached_aio_context,
.drain = mirror_drain,
};
.complete = mirror_complete,
},
.drained_poll = mirror_drained_poll,
- .attached_aio_context = mirror_attached_aio_context,
.drain = mirror_drain,
};
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
- BdrvDirtyBitmapIter *iter;
QEMUIOVector target_qiov;
- uint64_t dirty_offset;
- int dirty_bytes;
+ uint64_t dirty_offset = offset;
+ uint64_t dirty_bytes;
if (qiov) {
qemu_iovec_init(&target_qiov, qiov->niov);
}
- iter = bdrv_dirty_iter_new(job->dirty_bitmap);
- bdrv_set_dirty_iter(iter, offset);
-
while (true) {
bool valid_area;
int ret;
bdrv_dirty_bitmap_lock(job->dirty_bitmap);
- valid_area = bdrv_dirty_iter_next_area(iter, offset + bytes,
- &dirty_offset, &dirty_bytes);
+ dirty_bytes = MIN(offset + bytes - dirty_offset, INT_MAX);
+ valid_area = bdrv_dirty_bitmap_next_dirty_area(job->dirty_bitmap,
+ &dirty_offset,
+ &dirty_bytes);
if (!valid_area) {
bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
break;
break;
}
}
+
+ dirty_offset += dirty_bytes;
}
- bdrv_dirty_iter_free(iter);
if (qiov) {
qemu_iovec_destroy(&target_qiov);
}
NULL, 0);
}
-static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
+static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
{
if (bs->backing == NULL) {
/* we can be here after failed bdrv_attach_child in
* bdrv_set_backing_hd */
return;
}
- bdrv_refresh_filename(bs->backing->bs);
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
bs->backing->bs->filename);
}
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
+ MirrorBDSOpaque *s = bs->opaque;
+
+ if (s->stop) {
+ /*
+ * If the job is to be stopped, we do not need to forward
+ * anything to the real image.
+ */
+ *nperm = 0;
+ *nshared = BLK_PERM_ALL;
+ return;
+ }
+
/* Must be able to forward guest writes to the real image */
*nperm = 0;
if (perm & BLK_PERM_WRITE) {
.bdrv_child_perm = bdrv_mirror_top_child_perm,
};
-static void mirror_start_job(const char *job_id, BlockDriverState *bs,
+static BlockJob *mirror_start_job(
+ const char *job_id, BlockDriverState *bs,
int creation_flags, BlockDriverState *target,
const char *replaces, int64_t speed,
uint32_t granularity, int64_t buf_size,
if (buf_size < 0) {
error_setg(errp, "Invalid parameter 'buf-size'");
- return;
+ return NULL;
}
if (buf_size == 0) {
if (bs == target) {
error_setg(errp, "Can't mirror node into itself");
- return;
+ return NULL;
}
/* In the case of active commit, add dummy driver to provide consistent
mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
BDRV_O_RDWR, errp);
if (mirror_top_bs == NULL) {
- return;
+ return NULL;
}
if (!filter_node_name) {
mirror_top_bs->implicit = true;
}
mirror_top_bs->total_sectors = bs->total_sectors;
mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
- mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED;
+ mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
+ BDRV_REQ_NO_FALLBACK;
bs_opaque = g_new0(MirrorBDSOpaque, 1);
mirror_top_bs->opaque = bs_opaque;
- bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
/* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
* it alive until block_job_create() succeeds even if bs has no parent. */
if (local_err) {
bdrv_unref(mirror_top_bs);
error_propagate(errp, local_err);
- return;
+ return NULL;
}
/* Make sure that the source is not resized while the job is running */
* We can allow anything except resize there.*/
target_is_backing = bdrv_chain_contains(bs, target);
target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
- s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
+ s->target = blk_new(s->common.job.aio_context,
+ BLK_PERM_WRITE | BLK_PERM_RESIZE |
(target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
BLK_PERM_WRITE_UNCHANGED |
(target_is_backing ? BLK_PERM_CONSISTENT_READ |
* ensure that. */
blk_set_force_allow_inactivate(s->target);
}
+ blk_set_allow_aio_context_change(s->target, true);
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
goto fail;
}
+ ret = block_job_add_bdrv(&s->common, "source", bs, 0,
+ BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
+ BLK_PERM_CONSISTENT_READ,
+ errp);
+ if (ret < 0) {
+ goto fail;
+ }
+
/* Required permissions are already taken with blk_new() */
block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
goto fail;
}
}
+
+ if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
+ goto fail;
+ }
}
QTAILQ_INIT(&s->ops_in_flight);
trace_mirror_start(bs, s, opaque);
job_start(&s->common.job);
- return;
+
+ return &s->common;
fail:
if (s) {
g_free(s->replaces);
blk_unref(s->target);
bs_opaque->job = NULL;
+ if (s->dirty_bitmap) {
+ bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
+ }
job_early_fail(&s->common.job);
}
- bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
+ bs_opaque->stop = true;
+ bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
+ &error_abort);
bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
bdrv_unref(mirror_top_bs);
+
+ return NULL;
}
void mirror_start(const char *job_id, BlockDriverState *bs,
filter_node_name, true, copy_mode, errp);
}
-void commit_active_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, int creation_flags,
- int64_t speed, BlockdevOnError on_error,
- const char *filter_node_name,
- BlockCompletionFunc *cb, void *opaque,
- bool auto_complete, Error **errp)
+BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, int creation_flags,
+ int64_t speed, BlockdevOnError on_error,
+ const char *filter_node_name,
+ BlockCompletionFunc *cb, void *opaque,
+ bool auto_complete, Error **errp)
{
bool base_read_only;
Error *local_err = NULL;
+ BlockJob *ret;
base_read_only = bdrv_is_read_only(base);
if (base_read_only) {
if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
- return;
+ return NULL;
}
}
- mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
+ ret = mirror_start_job(
+ job_id, bs, creation_flags, base, NULL, speed, 0, 0,
MIRROR_LEAVE_BACKING_CHAIN,
on_error, on_error, true, cb, opaque,
&commit_active_job_driver, false, base, auto_complete,
goto error_restore_flags;
}
- return;
+ return ret;
error_restore_flags:
/* ignore error and errp for bdrv_reopen, because we want to propagate
if (base_read_only) {
bdrv_reopen_set_read_only(base, true, NULL);
}
- return;
+ return NULL;
}