+static void do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
+ uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
+ BdrvDirtyBitmapIter *iter;
+ QEMUIOVector target_qiov;
+ uint64_t dirty_offset;
+ int dirty_bytes;
+
+ if (qiov) {
+ qemu_iovec_init(&target_qiov, qiov->niov);
+ }
+
+ iter = bdrv_dirty_iter_new(job->dirty_bitmap);
+ bdrv_set_dirty_iter(iter, offset);
+
+ while (true) {
+ bool valid_area;
+ int ret;
+
+ bdrv_dirty_bitmap_lock(job->dirty_bitmap);
+ valid_area = bdrv_dirty_iter_next_area(iter, offset + bytes,
+ &dirty_offset, &dirty_bytes);
+ if (!valid_area) {
+ bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
+ break;
+ }
+
+ bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap,
+ dirty_offset, dirty_bytes);
+ bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
+
+ job_progress_increase_remaining(&job->common.job, dirty_bytes);
+
+ assert(dirty_offset - offset <= SIZE_MAX);
+ if (qiov) {
+ qemu_iovec_reset(&target_qiov);
+ qemu_iovec_concat(&target_qiov, qiov,
+ dirty_offset - offset, dirty_bytes);
+ }
+
+ switch (method) {
+ case MIRROR_METHOD_COPY:
+ ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes,
+ qiov ? &target_qiov : NULL, flags);
+ break;
+
+ case MIRROR_METHOD_ZERO:
+ assert(!qiov);
+ ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes,
+ flags);
+ break;
+
+ case MIRROR_METHOD_DISCARD:
+ assert(!qiov);
+ ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes);
+ break;
+
+ default:
+ abort();
+ }
+
+ if (ret >= 0) {
+ job_progress_update(&job->common.job, dirty_bytes);
+ } else {
+ BlockErrorAction action;
+
+ bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes);
+ job->actively_synced = false;
+
+ action = mirror_error_action(job, false, -ret);
+ if (action == BLOCK_ERROR_ACTION_REPORT) {
+ if (!job->ret) {
+ job->ret = ret;
+ }
+ break;
+ }
+ }
+ }
+
+ bdrv_dirty_iter_free(iter);
+ if (qiov) {
+ qemu_iovec_destroy(&target_qiov);
+ }
+}
+
+static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
+ uint64_t offset,
+ uint64_t bytes)
+{
+ MirrorOp *op;
+ uint64_t start_chunk = offset / s->granularity;
+ uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
+
+ op = g_new(MirrorOp, 1);
+ *op = (MirrorOp){
+ .s = s,
+ .offset = offset,
+ .bytes = bytes,
+ .is_active_write = true,
+ };
+ qemu_co_queue_init(&op->waiting_requests);
+ QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
+
+ s->in_active_write_counter++;
+
+ mirror_wait_on_conflicts(op, s, offset, bytes);
+
+ bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
+
+ return op;
+}
+
+static void coroutine_fn active_write_settle(MirrorOp *op)
+{
+ uint64_t start_chunk = op->offset / op->s->granularity;
+ uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
+ op->s->granularity);
+
+ if (!--op->s->in_active_write_counter && op->s->actively_synced) {
+ BdrvChild *source = op->s->mirror_top_bs->backing;
+
+ if (QLIST_FIRST(&source->bs->parents) == source &&
+ QLIST_NEXT(source, next_parent) == NULL)
+ {
+ /* Assert that we are back in sync once all active write
+ * operations are settled.
+ * Note that we can only assert this if the mirror node
+ * is the source node's only parent. */
+ assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
+ }
+ }
+ bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
+ QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
+ qemu_co_queue_restart_all(&op->waiting_requests);
+ g_free(op);
+}
+