]>
Commit | Line | Data |
---|---|---|
98d2c6f2 DM |
1 | /* |
2 | * QEMU backup | |
3 | * | |
4 | * Copyright (C) 2013 Proxmox Server Solutions | |
00e30f05 | 5 | * Copyright (c) 2019 Virtuozzo International GmbH. |
98d2c6f2 DM |
6 | * |
7 | * Authors: | |
8 | * Dietmar Maurer ([email protected]) | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
80c71a24 | 15 | #include "qemu/osdep.h" |
98d2c6f2 DM |
16 | |
17 | #include "trace.h" | |
18 | #include "block/block.h" | |
19 | #include "block/block_int.h" | |
c87621ea | 20 | #include "block/blockjob_int.h" |
49d3e828 | 21 | #include "block/block_backup.h" |
beb5f545 | 22 | #include "block/block-copy.h" |
da34e65c | 23 | #include "qapi/error.h" |
cc7a8ea7 | 24 | #include "qapi/qmp/qerror.h" |
f348b6d1 | 25 | #include "qemu/cutils.h" |
373340b2 | 26 | #include "sysemu/block-backend.h" |
b2f56462 | 27 | #include "qemu/bitmap.h" |
a410a7f1 | 28 | #include "qemu/error-report.h" |
98d2c6f2 | 29 | |
00e30f05 VSO |
30 | #include "block/backup-top.h" |
31 | ||
16096a4d | 32 | #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) |
98d2c6f2 | 33 | |
98d2c6f2 DM |
34 | typedef struct BackupBlockJob { |
35 | BlockJob common; | |
00e30f05 | 36 | BlockDriverState *backup_top; |
2c8074c4 | 37 | BlockDriverState *source_bs; |
ff789bf5 | 38 | BlockDriverState *target_bs; |
62aa1fbe | 39 | |
d58d8453 | 40 | BdrvDirtyBitmap *sync_bitmap; |
62aa1fbe | 41 | |
fc5d3f84 | 42 | MirrorSyncMode sync_mode; |
c8b56501 | 43 | BitmapSyncMode bitmap_mode; |
98d2c6f2 DM |
44 | BlockdevOnError on_source_error; |
45 | BlockdevOnError on_target_error; | |
05df8a6a | 46 | uint64_t len; |
16096a4d | 47 | int64_t cluster_size; |
86c6a3b6 | 48 | BackupPerf perf; |
a193b0f0 | 49 | |
2c8074c4 | 50 | BlockCopyState *bcs; |
71eed4ce VSO |
51 | |
52 | bool wait; | |
53 | BlockCopyCallState *bg_bcs_call; | |
98d2c6f2 DM |
54 | } BackupBlockJob; |
55 | ||
bd21935b KW |
56 | static const BlockJobDriver backup_job_driver; |
57 | ||
b976ea3c FZ |
58 | static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) |
59 | { | |
60 | BdrvDirtyBitmap *bm; | |
c23909e5 JS |
61 | bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \ |
62 | && (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER)); | |
b976ea3c | 63 | |
c23909e5 | 64 | if (sync) { |
cf0cd293 | 65 | /* |
c23909e5 JS |
66 | * We succeeded, or we always intended to sync the bitmap. |
67 | * Delete this bitmap and install the child. | |
cf0cd293 | 68 | */ |
5deb6cbd | 69 | bm = bdrv_dirty_bitmap_abdicate(job->sync_bitmap, NULL); |
c23909e5 JS |
70 | } else { |
71 | /* | |
72 | * We failed, or we never intended to sync the bitmap anyway. | |
73 | * Merge the successor back into the parent, keeping all data. | |
74 | */ | |
5deb6cbd | 75 | bm = bdrv_reclaim_dirty_bitmap(job->sync_bitmap, NULL); |
c23909e5 JS |
76 | } |
77 | ||
78 | assert(bm); | |
79 | ||
80 | if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) { | |
81 | /* If we failed and synced, merge in the bits we didn't copy: */ | |
397f4e9d | 82 | bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs), |
c23909e5 | 83 | NULL, true); |
b976ea3c FZ |
84 | } |
85 | } | |
86 | ||
4ad35181 | 87 | static void backup_commit(Job *job) |
c347b2c6 | 88 | { |
4ad35181 | 89 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
c347b2c6 JS |
90 | if (s->sync_bitmap) { |
91 | backup_cleanup_sync_bitmap(s, 0); | |
92 | } | |
93 | } | |
94 | ||
4ad35181 | 95 | static void backup_abort(Job *job) |
c347b2c6 | 96 | { |
4ad35181 | 97 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
c347b2c6 JS |
98 | if (s->sync_bitmap) { |
99 | backup_cleanup_sync_bitmap(s, -1); | |
100 | } | |
101 | } | |
102 | ||
4ad35181 | 103 | static void backup_clean(Job *job) |
e8a40bf7 | 104 | { |
4ad35181 | 105 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
bdc4c4c5 | 106 | block_job_remove_all_bdrv(&s->common); |
00e30f05 | 107 | bdrv_backup_top_drop(s->backup_top); |
e8a40bf7 JS |
108 | } |
109 | ||
49d3e828 WC |
110 | void backup_do_checkpoint(BlockJob *job, Error **errp) |
111 | { | |
112 | BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); | |
49d3e828 | 113 | |
bd21935b | 114 | assert(block_job_driver(job) == &backup_job_driver); |
49d3e828 WC |
115 | |
116 | if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) { | |
117 | error_setg(errp, "The backup job only supports block checkpoint in" | |
118 | " sync=none mode"); | |
119 | return; | |
120 | } | |
121 | ||
397f4e9d VSO |
122 | bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0, |
123 | backup_job->len); | |
49d3e828 WC |
124 | } |
125 | ||
98d2c6f2 DM |
126 | static BlockErrorAction backup_error_action(BackupBlockJob *job, |
127 | bool read, int error) | |
128 | { | |
129 | if (read) { | |
81e254dc KW |
130 | return block_job_error_action(&job->common, job->on_source_error, |
131 | true, error); | |
98d2c6f2 | 132 | } else { |
81e254dc KW |
133 | return block_job_error_action(&job->common, job->on_target_error, |
134 | false, error); | |
98d2c6f2 DM |
135 | } |
136 | } | |
137 | ||
71eed4ce | 138 | static void coroutine_fn backup_block_copy_callback(void *opaque) |
d58d8453 | 139 | { |
71eed4ce | 140 | BackupBlockJob *s = opaque; |
d58d8453 | 141 | |
71eed4ce VSO |
142 | if (s->wait) { |
143 | s->wait = false; | |
144 | aio_co_wake(s->common.job.co); | |
145 | } else { | |
146 | job_enter(&s->common.job); | |
d58d8453 | 147 | } |
d58d8453 JS |
148 | } |
149 | ||
c334e897 | 150 | static int coroutine_fn backup_loop(BackupBlockJob *job) |
d58d8453 | 151 | { |
71eed4ce | 152 | BlockCopyCallState *s = NULL; |
62aa1fbe | 153 | int ret = 0; |
71eed4ce VSO |
154 | bool error_is_read; |
155 | BlockErrorAction act; | |
156 | ||
157 | while (true) { /* retry loop */ | |
158 | job->bg_bcs_call = s = block_copy_async(job->bcs, 0, | |
159 | QEMU_ALIGN_UP(job->len, job->cluster_size), | |
160 | job->perf.max_workers, job->perf.max_chunk, | |
161 | backup_block_copy_callback, job); | |
162 | ||
163 | while (!block_copy_call_finished(s) && | |
164 | !job_is_cancelled(&job->common.job)) | |
165 | { | |
166 | job_yield(&job->common.job); | |
167 | } | |
d58d8453 | 168 | |
71eed4ce VSO |
169 | if (!block_copy_call_finished(s)) { |
170 | assert(job_is_cancelled(&job->common.job)); | |
171 | /* | |
172 | * Note that we can't use job_yield() here, as it doesn't work for | |
173 | * cancelled job. | |
174 | */ | |
175 | block_copy_call_cancel(s); | |
176 | job->wait = true; | |
177 | qemu_coroutine_yield(); | |
178 | assert(block_copy_call_finished(s)); | |
179 | ret = 0; | |
180 | goto out; | |
181 | } | |
182 | ||
183 | if (job_is_cancelled(&job->common.job) || | |
184 | block_copy_call_succeeded(s)) | |
185 | { | |
186 | ret = 0; | |
187 | goto out; | |
188 | } | |
189 | ||
190 | if (block_copy_call_cancelled(s)) { | |
191 | /* | |
192 | * Job is not cancelled but only block-copy call. This is possible | |
193 | * after job pause. Now the pause is finished, start new block-copy | |
194 | * iteration. | |
195 | */ | |
196 | block_copy_call_free(s); | |
197 | continue; | |
198 | } | |
199 | ||
200 | /* The only remaining case is failed block-copy call. */ | |
201 | assert(block_copy_call_failed(s)); | |
202 | ||
203 | ret = block_copy_call_status(s, &error_is_read); | |
204 | act = backup_error_action(job, error_is_read, -ret); | |
205 | switch (act) { | |
206 | case BLOCK_ERROR_ACTION_REPORT: | |
207 | goto out; | |
208 | case BLOCK_ERROR_ACTION_STOP: | |
209 | /* | |
210 | * Go to pause prior to starting new block-copy call on the next | |
211 | * iteration. | |
212 | */ | |
213 | job_pause_point(&job->common.job); | |
214 | break; | |
215 | case BLOCK_ERROR_ACTION_IGNORE: | |
216 | /* Proceed to new block-copy call to retry. */ | |
217 | break; | |
218 | default: | |
219 | abort(); | |
220 | } | |
221 | ||
222 | block_copy_call_free(s); | |
d58d8453 JS |
223 | } |
224 | ||
71eed4ce VSO |
225 | out: |
226 | block_copy_call_free(s); | |
227 | job->bg_bcs_call = NULL; | |
62aa1fbe | 228 | return ret; |
d58d8453 JS |
229 | } |
230 | ||
397f4e9d | 231 | static void backup_init_bcs_bitmap(BackupBlockJob *job) |
8cc6dc62 | 232 | { |
141cdcdf JS |
233 | bool ret; |
234 | uint64_t estimate; | |
397f4e9d | 235 | BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs); |
141cdcdf JS |
236 | |
237 | if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) { | |
397f4e9d | 238 | ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap, |
141cdcdf JS |
239 | NULL, true); |
240 | assert(ret); | |
241 | } else { | |
7e30dd61 JS |
242 | if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { |
243 | /* | |
244 | * We can't hog the coroutine to initialize this thoroughly. | |
245 | * Set a flag and resume work when we are able to yield safely. | |
246 | */ | |
397f4e9d | 247 | block_copy_set_skip_unallocated(job->bcs, true); |
7e30dd61 | 248 | } |
397f4e9d | 249 | bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len); |
141cdcdf | 250 | } |
8cc6dc62 | 251 | |
397f4e9d | 252 | estimate = bdrv_get_dirty_count(bcs_bitmap); |
141cdcdf | 253 | job_progress_set_remaining(&job->common.job, estimate); |
8cc6dc62 VSO |
254 | } |
255 | ||
68702775 | 256 | static int coroutine_fn backup_run(Job *job, Error **errp) |
98d2c6f2 | 257 | { |
68702775 | 258 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
511e7d31 | 259 | int ret; |
98d2c6f2 | 260 | |
397f4e9d | 261 | backup_init_bcs_bitmap(s); |
8cc6dc62 | 262 | |
7e30dd61 JS |
263 | if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { |
264 | int64_t offset = 0; | |
265 | int64_t count; | |
266 | ||
267 | for (offset = 0; offset < s->len; ) { | |
71eed4ce VSO |
268 | if (job_is_cancelled(job)) { |
269 | return -ECANCELED; | |
270 | } | |
271 | ||
272 | job_pause_point(job); | |
273 | ||
274 | if (job_is_cancelled(job)) { | |
511e7d31 | 275 | return -ECANCELED; |
7e30dd61 JS |
276 | } |
277 | ||
2c8074c4 | 278 | ret = block_copy_reset_unallocated(s->bcs, offset, &count); |
7e30dd61 | 279 | if (ret < 0) { |
511e7d31 | 280 | return ret; |
7e30dd61 JS |
281 | } |
282 | ||
283 | offset += count; | |
284 | } | |
397f4e9d | 285 | block_copy_set_skip_unallocated(s->bcs, false); |
7e30dd61 JS |
286 | } |
287 | ||
68702775 | 288 | if (s->sync_mode == MIRROR_SYNC_MODE_NONE) { |
0e23e382 | 289 | /* |
397f4e9d | 290 | * All bits are set in bcs bitmap to allow any cluster to be copied. |
0e23e382 VSO |
291 | * This does not actually require them to be copied. |
292 | */ | |
68702775 | 293 | while (!job_is_cancelled(job)) { |
0e23e382 VSO |
294 | /* |
295 | * Yield until the job is cancelled. We just let our before_write | |
296 | * notify callback service CoW requests. | |
297 | */ | |
68702775 | 298 | job_yield(job); |
98d2c6f2 | 299 | } |
fc5d3f84 | 300 | } else { |
511e7d31 | 301 | return backup_loop(s); |
98d2c6f2 DM |
302 | } |
303 | ||
511e7d31 | 304 | return 0; |
98d2c6f2 DM |
305 | } |
306 | ||
71eed4ce VSO |
307 | static void coroutine_fn backup_pause(Job *job) |
308 | { | |
309 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); | |
310 | ||
311 | if (s->bg_bcs_call && !block_copy_call_finished(s->bg_bcs_call)) { | |
312 | block_copy_call_cancel(s->bg_bcs_call); | |
313 | s->wait = true; | |
314 | qemu_coroutine_yield(); | |
315 | } | |
316 | } | |
317 | ||
318 | static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed) | |
319 | { | |
320 | BackupBlockJob *s = container_of(job, BackupBlockJob, common); | |
321 | ||
322 | /* | |
323 | * block_job_set_speed() is called first from block_job_create(), when we | |
324 | * don't yet have s->bcs. | |
325 | */ | |
326 | if (s->bcs) { | |
327 | block_copy_set_speed(s->bcs, speed); | |
328 | if (s->bg_bcs_call) { | |
329 | block_copy_kick(s->bg_bcs_call); | |
330 | } | |
331 | } | |
332 | } | |
333 | ||
9c785cd7 | 334 | static void backup_cancel(Job *job, bool force) |
ff789bf5 VSO |
335 | { |
336 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); | |
337 | ||
338 | bdrv_cancel_in_flight(s->target_bs); | |
339 | } | |
340 | ||
a7815a76 | 341 | static const BlockJobDriver backup_job_driver = { |
33e9e9bd KW |
342 | .job_driver = { |
343 | .instance_size = sizeof(BackupBlockJob), | |
252291ea | 344 | .job_type = JOB_TYPE_BACKUP, |
80fa2c75 | 345 | .free = block_job_free, |
b15de828 | 346 | .user_resume = block_job_user_resume, |
f67432a2 | 347 | .run = backup_run, |
4ad35181 KW |
348 | .commit = backup_commit, |
349 | .abort = backup_abort, | |
350 | .clean = backup_clean, | |
71eed4ce | 351 | .pause = backup_pause, |
ff789bf5 | 352 | .cancel = backup_cancel, |
71eed4ce VSO |
353 | }, |
354 | .set_speed = backup_set_speed, | |
a7815a76 JS |
355 | }; |
356 | ||
ae6b12fa VSO |
357 | static int64_t backup_calculate_cluster_size(BlockDriverState *target, |
358 | Error **errp) | |
359 | { | |
360 | int ret; | |
361 | BlockDriverInfo bdi; | |
2b088c60 | 362 | bool target_does_cow = bdrv_backing_chain_next(target); |
ae6b12fa VSO |
363 | |
364 | /* | |
365 | * If there is no backing file on the target, we cannot rely on COW if our | |
366 | * backup cluster size is smaller than the target cluster size. Even for | |
367 | * targets with a backing file, try to avoid COW if possible. | |
368 | */ | |
369 | ret = bdrv_get_info(target, &bdi); | |
2b088c60 | 370 | if (ret == -ENOTSUP && !target_does_cow) { |
ae6b12fa VSO |
371 | /* Cluster size is not defined */ |
372 | warn_report("The target block device doesn't provide " | |
373 | "information about the block size and it doesn't have a " | |
374 | "backing file. The default block size of %u bytes is " | |
375 | "used. If the actual block size of the target exceeds " | |
376 | "this default, the backup may be unusable", | |
377 | BACKUP_CLUSTER_SIZE_DEFAULT); | |
378 | return BACKUP_CLUSTER_SIZE_DEFAULT; | |
2b088c60 | 379 | } else if (ret < 0 && !target_does_cow) { |
ae6b12fa VSO |
380 | error_setg_errno(errp, -ret, |
381 | "Couldn't determine the cluster size of the target image, " | |
382 | "which has no backing file"); | |
383 | error_append_hint(errp, | |
384 | "Aborting, since this may create an unusable destination image\n"); | |
385 | return ret; | |
2b088c60 | 386 | } else if (ret < 0 && target_does_cow) { |
ae6b12fa VSO |
387 | /* Not fatal; just trudge on ahead. */ |
388 | return BACKUP_CLUSTER_SIZE_DEFAULT; | |
389 | } | |
390 | ||
391 | return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); | |
392 | } | |
393 | ||
111049a4 | 394 | BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, |
70559d49 AG |
395 | BlockDriverState *target, int64_t speed, |
396 | MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, | |
c8b56501 | 397 | BitmapSyncMode bitmap_mode, |
13b9414b | 398 | bool compress, |
00e30f05 | 399 | const char *filter_node_name, |
86c6a3b6 | 400 | BackupPerf *perf, |
98d2c6f2 DM |
401 | BlockdevOnError on_source_error, |
402 | BlockdevOnError on_target_error, | |
47970dfb | 403 | int creation_flags, |
097310b5 | 404 | BlockCompletionFunc *cb, void *opaque, |
62c9e416 | 405 | JobTxn *txn, Error **errp) |
98d2c6f2 | 406 | { |
958a04bd | 407 | int64_t len, target_len; |
91ab6883 | 408 | BackupBlockJob *job = NULL; |
ae6b12fa | 409 | int64_t cluster_size; |
2c8074c4 | 410 | BdrvRequestFlags write_flags; |
00e30f05 VSO |
411 | BlockDriverState *backup_top = NULL; |
412 | BlockCopyState *bcs = NULL; | |
98d2c6f2 DM |
413 | |
414 | assert(bs); | |
415 | assert(target); | |
98d2c6f2 | 416 | |
a6c9365a JS |
417 | /* QMP interface protects us from these cases */ |
418 | assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL); | |
419 | assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP); | |
420 | ||
c29c1dd3 FZ |
421 | if (bs == target) { |
422 | error_setg(errp, "Source and target cannot be the same"); | |
111049a4 | 423 | return NULL; |
c29c1dd3 FZ |
424 | } |
425 | ||
c29c1dd3 FZ |
426 | if (!bdrv_is_inserted(bs)) { |
427 | error_setg(errp, "Device is not inserted: %s", | |
428 | bdrv_get_device_name(bs)); | |
111049a4 | 429 | return NULL; |
c29c1dd3 FZ |
430 | } |
431 | ||
432 | if (!bdrv_is_inserted(target)) { | |
433 | error_setg(errp, "Device is not inserted: %s", | |
434 | bdrv_get_device_name(target)); | |
111049a4 | 435 | return NULL; |
c29c1dd3 FZ |
436 | } |
437 | ||
2b088c60 | 438 | if (compress && !bdrv_supports_compressed_writes(target)) { |
13b9414b PB |
439 | error_setg(errp, "Compression is not supported for this drive %s", |
440 | bdrv_get_device_name(target)); | |
111049a4 | 441 | return NULL; |
13b9414b PB |
442 | } |
443 | ||
c29c1dd3 | 444 | if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { |
111049a4 | 445 | return NULL; |
c29c1dd3 FZ |
446 | } |
447 | ||
448 | if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { | |
111049a4 | 449 | return NULL; |
c29c1dd3 FZ |
450 | } |
451 | ||
2c59fd83 VSO |
452 | cluster_size = backup_calculate_cluster_size(target, errp); |
453 | if (cluster_size < 0) { | |
454 | goto error; | |
455 | } | |
456 | ||
457 | if (perf->max_workers < 1) { | |
458 | error_setg(errp, "max-workers must be greater than zero"); | |
459 | return NULL; | |
460 | } | |
461 | ||
462 | if (perf->max_chunk < 0) { | |
463 | error_setg(errp, "max-chunk must be zero (which means no limit) or " | |
464 | "positive"); | |
465 | return NULL; | |
466 | } | |
467 | ||
468 | if (perf->max_chunk && perf->max_chunk < cluster_size) { | |
469 | error_setg(errp, "Required max-chunk (%" PRIi64 ") is less than backup " | |
470 | "cluster size (%" PRIi64 ")", perf->max_chunk, cluster_size); | |
471 | return NULL; | |
472 | } | |
473 | ||
474 | ||
1a2b8b40 | 475 | if (sync_bitmap) { |
b30ffbef JS |
476 | /* If we need to write to this bitmap, check that we can: */ |
477 | if (bitmap_mode != BITMAP_SYNC_MODE_NEVER && | |
478 | bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) { | |
479 | return NULL; | |
480 | } | |
481 | ||
d58d8453 | 482 | /* Create a new bitmap, and freeze/disable this one. */ |
5deb6cbd | 483 | if (bdrv_dirty_bitmap_create_successor(sync_bitmap, errp) < 0) { |
111049a4 | 484 | return NULL; |
d58d8453 | 485 | } |
d58d8453 JS |
486 | } |
487 | ||
98d2c6f2 DM |
488 | len = bdrv_getlength(bs); |
489 | if (len < 0) { | |
58226634 KW |
490 | error_setg_errno(errp, -len, "Unable to get length for '%s'", |
491 | bdrv_get_device_or_node_name(bs)); | |
d58d8453 | 492 | goto error; |
98d2c6f2 DM |
493 | } |
494 | ||
958a04bd KW |
495 | target_len = bdrv_getlength(target); |
496 | if (target_len < 0) { | |
497 | error_setg_errno(errp, -target_len, "Unable to get length for '%s'", | |
498 | bdrv_get_device_or_node_name(bs)); | |
499 | goto error; | |
500 | } | |
501 | ||
502 | if (target_len != len) { | |
503 | error_setg(errp, "Source and target image have different sizes"); | |
504 | goto error; | |
505 | } | |
506 | ||
a1ed82b4 | 507 | /* |
372c67ea VSO |
508 | * If source is in backing chain of target assume that target is going to be |
509 | * used for "image fleecing", i.e. it should represent a kind of snapshot of | |
510 | * source at backup-start point in time. And target is going to be read by | |
511 | * somebody (for example, used as NBD export) during backup job. | |
512 | * | |
513 | * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid | |
514 | * intersection of backup writes and third party reads from target, | |
515 | * otherwise reading from target we may occasionally read already updated by | |
516 | * guest data. | |
517 | * | |
518 | * For more information see commit f8d59dfb40bb and test | |
519 | * tests/qemu-iotests/222 | |
a1ed82b4 | 520 | */ |
2c8074c4 VSO |
521 | write_flags = (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) | |
522 | (compress ? BDRV_REQ_WRITE_COMPRESSED : 0), | |
523 | ||
00e30f05 | 524 | backup_top = bdrv_backup_top_append(bs, target, filter_node_name, |
86c6a3b6 VSO |
525 | cluster_size, perf, |
526 | write_flags, &bcs, errp); | |
00e30f05 VSO |
527 | if (!backup_top) { |
528 | goto error; | |
529 | } | |
530 | ||
843670f3 | 531 | /* job->len is fixed, so we can't allow resize */ |
00e30f05 VSO |
532 | job = block_job_create(job_id, &backup_job_driver, txn, backup_top, |
533 | 0, BLK_PERM_ALL, | |
843670f3 VSO |
534 | speed, creation_flags, cb, opaque, errp); |
535 | if (!job) { | |
536 | goto error; | |
537 | } | |
538 | ||
00e30f05 | 539 | job->backup_top = backup_top; |
843670f3 | 540 | job->source_bs = bs; |
ff789bf5 | 541 | job->target_bs = target; |
843670f3 VSO |
542 | job->on_source_error = on_source_error; |
543 | job->on_target_error = on_target_error; | |
544 | job->sync_mode = sync_mode; | |
545 | job->sync_bitmap = sync_bitmap; | |
546 | job->bitmap_mode = bitmap_mode; | |
00e30f05 | 547 | job->bcs = bcs; |
ae6b12fa | 548 | job->cluster_size = cluster_size; |
843670f3 | 549 | job->len = len; |
86c6a3b6 | 550 | job->perf = *perf; |
4c9bca7e | 551 | |
d0ebeca1 | 552 | block_copy_set_progress_meter(bcs, &job->common.job.progress); |
71eed4ce | 553 | block_copy_set_speed(bcs, speed); |
0f4b02b7 | 554 | |
00e30f05 | 555 | /* Required permissions are already taken by backup-top target */ |
76d554e2 KW |
556 | block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, |
557 | &error_abort); | |
111049a4 JS |
558 | |
559 | return &job->common; | |
d58d8453 JS |
560 | |
561 | error: | |
562 | if (sync_bitmap) { | |
5deb6cbd | 563 | bdrv_reclaim_dirty_bitmap(sync_bitmap, NULL); |
d58d8453 | 564 | } |
8ccf458a | 565 | if (backup_top) { |
00e30f05 | 566 | bdrv_backup_top_drop(backup_top); |
91ab6883 | 567 | } |
111049a4 JS |
568 | |
569 | return NULL; | |
98d2c6f2 | 570 | } |