]> Git Repo - qemu.git/blob - block/block-copy.c
block: move block_copy from block/backup.c to separate file
[qemu.git] / block / block-copy.c
1 /*
2  * block_copy API
3  *
4  * Copyright (C) 2013 Proxmox Server Solutions
5  * Copyright (c) 2019 Virtuozzo International GmbH.
6  *
7  * Authors:
8  *  Dietmar Maurer ([email protected])
9  *  Vladimir Sementsov-Ogievskiy <[email protected]>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  */
14
15 #include "qemu/osdep.h"
16
17 #include "trace.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
21
22 void block_copy_state_free(BlockCopyState *s)
23 {
24     if (!s) {
25         return;
26     }
27
28     bdrv_release_dirty_bitmap(blk_bs(s->source), s->copy_bitmap);
29     blk_unref(s->source);
30     blk_unref(s->target);
31     g_free(s);
32 }
33
34 BlockCopyState *block_copy_state_new(
35         BlockDriverState *source, BlockDriverState *target,
36         int64_t cluster_size, BdrvRequestFlags write_flags,
37         ProgressBytesCallbackFunc progress_bytes_callback,
38         ProgressResetCallbackFunc progress_reset_callback,
39         void *progress_opaque, Error **errp)
40 {
41     BlockCopyState *s;
42     int ret;
43     uint64_t no_resize = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
44                          BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD;
45     BdrvDirtyBitmap *copy_bitmap;
46
47     copy_bitmap = bdrv_create_dirty_bitmap(source, cluster_size, NULL, errp);
48     if (!copy_bitmap) {
49         return NULL;
50     }
51     bdrv_disable_dirty_bitmap(copy_bitmap);
52
53     s = g_new(BlockCopyState, 1);
54     *s = (BlockCopyState) {
55         .source = blk_new(bdrv_get_aio_context(source),
56                           BLK_PERM_CONSISTENT_READ, no_resize),
57         .target = blk_new(bdrv_get_aio_context(target),
58                           BLK_PERM_WRITE, no_resize),
59         .copy_bitmap = copy_bitmap,
60         .cluster_size = cluster_size,
61         .len = bdrv_dirty_bitmap_size(copy_bitmap),
62         .write_flags = write_flags,
63         .progress_bytes_callback = progress_bytes_callback,
64         .progress_reset_callback = progress_reset_callback,
65         .progress_opaque = progress_opaque,
66     };
67
68     s->copy_range_size = QEMU_ALIGN_DOWN(MIN(blk_get_max_transfer(s->source),
69                                              blk_get_max_transfer(s->target)),
70                                          s->cluster_size);
71     /*
72      * Set use_copy_range, consider the following:
73      * 1. Compression is not supported for copy_range.
74      * 2. copy_range does not respect max_transfer (it's a TODO), so we factor
75      *    that in here. If max_transfer is smaller than the job->cluster_size,
76      *    we do not use copy_range (in that case it's zero after aligning down
77      *    above).
78      */
79     s->use_copy_range =
80         !(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
81
82     /*
83      * We just allow aio context change on our block backends. block_copy() user
84      * (now it's only backup) is responsible for source and target being in same
85      * aio context.
86      */
87     blk_set_disable_request_queuing(s->source, true);
88     blk_set_allow_aio_context_change(s->source, true);
89     blk_set_disable_request_queuing(s->target, true);
90     blk_set_allow_aio_context_change(s->target, true);
91
92     ret = blk_insert_bs(s->source, source, errp);
93     if (ret < 0) {
94         goto fail;
95     }
96
97     ret = blk_insert_bs(s->target, target, errp);
98     if (ret < 0) {
99         goto fail;
100     }
101
102     return s;
103
104 fail:
105     block_copy_state_free(s);
106
107     return NULL;
108 }
109
110 /*
111  * Copy range to target with a bounce buffer and return the bytes copied. If
112  * error occurred, return a negative error number
113  */
114 static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
115                                                       int64_t start,
116                                                       int64_t end,
117                                                       bool is_write_notifier,
118                                                       bool *error_is_read,
119                                                       void **bounce_buffer)
120 {
121     int ret;
122     int nbytes;
123     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
124
125     assert(QEMU_IS_ALIGNED(start, s->cluster_size));
126     bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
127     nbytes = MIN(s->cluster_size, s->len - start);
128     if (!*bounce_buffer) {
129         *bounce_buffer = blk_blockalign(s->source, s->cluster_size);
130     }
131
132     ret = blk_co_pread(s->source, start, nbytes, *bounce_buffer, read_flags);
133     if (ret < 0) {
134         trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
135         if (error_is_read) {
136             *error_is_read = true;
137         }
138         goto fail;
139     }
140
141     ret = blk_co_pwrite(s->target, start, nbytes, *bounce_buffer,
142                         s->write_flags);
143     if (ret < 0) {
144         trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
145         if (error_is_read) {
146             *error_is_read = false;
147         }
148         goto fail;
149     }
150
151     return nbytes;
152 fail:
153     bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
154     return ret;
155
156 }
157
158 /*
159  * Copy range to target and return the bytes copied. If error occurred, return a
160  * negative error number.
161  */
162 static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
163                                                 int64_t start,
164                                                 int64_t end,
165                                                 bool is_write_notifier)
166 {
167     int ret;
168     int nr_clusters;
169     int nbytes;
170     int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
171
172     assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
173     assert(QEMU_IS_ALIGNED(start, s->cluster_size));
174     nbytes = MIN(s->copy_range_size, MIN(end, s->len) - start);
175     nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
176     bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
177                             s->cluster_size * nr_clusters);
178     ret = blk_co_copy_range(s->source, start, s->target, start, nbytes,
179                             read_flags, s->write_flags);
180     if (ret < 0) {
181         trace_block_copy_with_offload_fail(s, start, ret);
182         bdrv_set_dirty_bitmap(s->copy_bitmap, start,
183                               s->cluster_size * nr_clusters);
184         return ret;
185     }
186
187     return nbytes;
188 }
189
190 /*
191  * Check if the cluster starting at offset is allocated or not.
192  * return via pnum the number of contiguous clusters sharing this allocation.
193  */
194 static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
195                                            int64_t *pnum)
196 {
197     BlockDriverState *bs = blk_bs(s->source);
198     int64_t count, total_count = 0;
199     int64_t bytes = s->len - offset;
200     int ret;
201
202     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
203
204     while (true) {
205         ret = bdrv_is_allocated(bs, offset, bytes, &count);
206         if (ret < 0) {
207             return ret;
208         }
209
210         total_count += count;
211
212         if (ret || count == 0) {
213             /*
214              * ret: partial segment(s) are considered allocated.
215              * otherwise: unallocated tail is treated as an entire segment.
216              */
217             *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
218             return ret;
219         }
220
221         /* Unallocated segment(s) with uncertain following segment(s) */
222         if (total_count >= s->cluster_size) {
223             *pnum = total_count / s->cluster_size;
224             return 0;
225         }
226
227         offset += count;
228         bytes -= count;
229     }
230 }
231
232 /*
233  * Reset bits in copy_bitmap starting at offset if they represent unallocated
234  * data in the image. May reset subsequent contiguous bits.
235  * @return 0 when the cluster at @offset was unallocated,
236  *         1 otherwise, and -ret on error.
237  */
238 int64_t block_copy_reset_unallocated(BlockCopyState *s,
239                                      int64_t offset, int64_t *count)
240 {
241     int ret;
242     int64_t clusters, bytes;
243
244     ret = block_copy_is_cluster_allocated(s, offset, &clusters);
245     if (ret < 0) {
246         return ret;
247     }
248
249     bytes = clusters * s->cluster_size;
250
251     if (!ret) {
252         bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
253         s->progress_reset_callback(s->progress_opaque);
254     }
255
256     *count = bytes;
257     return ret;
258 }
259
260 int coroutine_fn block_copy(BlockCopyState *s,
261                             int64_t start, uint64_t bytes,
262                             bool *error_is_read,
263                             bool is_write_notifier)
264 {
265     int ret = 0;
266     int64_t end = bytes + start; /* bytes */
267     void *bounce_buffer = NULL;
268     int64_t status_bytes;
269
270     /*
271      * block_copy() user is responsible for keeping source and target in same
272      * aio context
273      */
274     assert(blk_get_aio_context(s->source) == blk_get_aio_context(s->target));
275
276     assert(QEMU_IS_ALIGNED(start, s->cluster_size));
277     assert(QEMU_IS_ALIGNED(end, s->cluster_size));
278
279     while (start < end) {
280         int64_t dirty_end;
281
282         if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
283             trace_block_copy_skip(s, start);
284             start += s->cluster_size;
285             continue; /* already copied */
286         }
287
288         dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
289                                                 (end - start));
290         if (dirty_end < 0) {
291             dirty_end = end;
292         }
293
294         if (s->skip_unallocated) {
295             ret = block_copy_reset_unallocated(s, start, &status_bytes);
296             if (ret == 0) {
297                 trace_block_copy_skip_range(s, start, status_bytes);
298                 start += status_bytes;
299                 continue;
300             }
301             /* Clamp to known allocated region */
302             dirty_end = MIN(dirty_end, start + status_bytes);
303         }
304
305         trace_block_copy_process(s, start);
306
307         if (s->use_copy_range) {
308             ret = block_copy_with_offload(s, start, dirty_end,
309                                           is_write_notifier);
310             if (ret < 0) {
311                 s->use_copy_range = false;
312             }
313         }
314         if (!s->use_copy_range) {
315             ret = block_copy_with_bounce_buffer(s, start, dirty_end,
316                                                 is_write_notifier,
317                                                 error_is_read, &bounce_buffer);
318         }
319         if (ret < 0) {
320             break;
321         }
322
323         start += ret;
324         s->progress_bytes_callback(ret, s->progress_opaque);
325         ret = 0;
326     }
327
328     if (bounce_buffer) {
329         qemu_vfree(bounce_buffer);
330     }
331
332     return ret;
333 }
This page took 0.043377 seconds and 4 git commands to generate.