]> Git Repo - qemu.git/blob - block/block-copy.c
block/block-copy: add memory limit
[qemu.git] / block / block-copy.c
1 /*
2  * block_copy API
3  *
4  * Copyright (C) 2013 Proxmox Server Solutions
5  * Copyright (c) 2019 Virtuozzo International GmbH.
6  *
7  * Authors:
8  *  Dietmar Maurer ([email protected])
9  *  Vladimir Sementsov-Ogievskiy <[email protected]>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  */
14
15 #include "qemu/osdep.h"
16
17 #include "trace.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
21 #include "qemu/units.h"
22
23 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
24 #define BLOCK_COPY_MAX_MEM (128 * MiB)
25
26 static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
27                                                        int64_t start,
28                                                        int64_t end)
29 {
30     BlockCopyInFlightReq *req;
31     bool waited;
32
33     do {
34         waited = false;
35         QLIST_FOREACH(req, &s->inflight_reqs, list) {
36             if (end > req->start_byte && start < req->end_byte) {
37                 qemu_co_queue_wait(&req->wait_queue, NULL);
38                 waited = true;
39                 break;
40             }
41         }
42     } while (waited);
43 }
44
45 static void block_copy_inflight_req_begin(BlockCopyState *s,
46                                           BlockCopyInFlightReq *req,
47                                           int64_t start, int64_t end)
48 {
49     req->start_byte = start;
50     req->end_byte = end;
51     qemu_co_queue_init(&req->wait_queue);
52     QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
53 }
54
55 static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
56 {
57     QLIST_REMOVE(req, list);
58     qemu_co_queue_restart_all(&req->wait_queue);
59 }
60
61 void block_copy_state_free(BlockCopyState *s)
62 {
63     if (!s) {
64         return;
65     }
66
67     bdrv_release_dirty_bitmap(s->copy_bitmap);
68     shres_destroy(s->mem);
69     g_free(s);
70 }
71
72 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
73                                      int64_t cluster_size,
74                                      BdrvRequestFlags write_flags, Error **errp)
75 {
76     BlockCopyState *s;
77     BdrvDirtyBitmap *copy_bitmap;
78
79     /* Ignore BLOCK_COPY_MAX_COPY_RANGE if requested cluster_size is larger */
80     uint32_t max_transfer =
81             MIN_NON_ZERO(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
82                          MIN_NON_ZERO(source->bs->bl.max_transfer,
83                                       target->bs->bl.max_transfer));
84
85     copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
86                                            errp);
87     if (!copy_bitmap) {
88         return NULL;
89     }
90     bdrv_disable_dirty_bitmap(copy_bitmap);
91
92     s = g_new(BlockCopyState, 1);
93     *s = (BlockCopyState) {
94         .source = source,
95         .target = target,
96         .copy_bitmap = copy_bitmap,
97         .cluster_size = cluster_size,
98         .len = bdrv_dirty_bitmap_size(copy_bitmap),
99         .write_flags = write_flags,
100         .mem = shres_create(BLOCK_COPY_MAX_MEM),
101     };
102
103     s->copy_range_size = QEMU_ALIGN_DOWN(max_transfer, cluster_size),
104     /*
105      * Set use_copy_range, consider the following:
106      * 1. Compression is not supported for copy_range.
107      * 2. copy_range does not respect max_transfer (it's a TODO), so we factor
108      *    that in here. If max_transfer is smaller than the job->cluster_size,
109      *    we do not use copy_range (in that case it's zero after aligning down
110      *    above).
111      */
112     s->use_copy_range =
113         !(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
114
115     QLIST_INIT(&s->inflight_reqs);
116
117     return s;
118 }
119
120 void block_copy_set_callbacks(
121         BlockCopyState *s,
122         ProgressBytesCallbackFunc progress_bytes_callback,
123         ProgressResetCallbackFunc progress_reset_callback,
124         void *progress_opaque)
125 {
126     s->progress_bytes_callback = progress_bytes_callback;
127     s->progress_reset_callback = progress_reset_callback;
128     s->progress_opaque = progress_opaque;
129 }
130
131 /*
132  * block_copy_do_copy
133  *
134  * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
135  * cover last cluster when s->len is not aligned to clusters.
136  *
137  * No sync here: nor bitmap neighter intersecting requests handling, only copy.
138  *
139  * Returns 0 on success.
140  */
141 static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
142                                            int64_t start, int64_t end,
143                                            bool *error_is_read)
144 {
145     int ret;
146     int nbytes = MIN(end, s->len) - start;
147     void *bounce_buffer = NULL;
148
149     assert(QEMU_IS_ALIGNED(start, s->cluster_size));
150     assert(QEMU_IS_ALIGNED(end, s->cluster_size));
151     assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
152
153     if (s->use_copy_range) {
154         ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
155                                  0, s->write_flags);
156         if (ret < 0) {
157             trace_block_copy_copy_range_fail(s, start, ret);
158             s->use_copy_range = false;
159             /* Fallback to read+write with allocated buffer */
160         } else {
161             goto out;
162         }
163     }
164
165     bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
166
167     ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
168     if (ret < 0) {
169         trace_block_copy_read_fail(s, start, ret);
170         if (error_is_read) {
171             *error_is_read = true;
172         }
173         goto out;
174     }
175
176     ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
177                          s->write_flags);
178     if (ret < 0) {
179         trace_block_copy_write_fail(s, start, ret);
180         if (error_is_read) {
181             *error_is_read = false;
182         }
183         goto out;
184     }
185
186 out:
187     qemu_vfree(bounce_buffer);
188
189     return ret;
190 }
191
192 /*
193  * Check if the cluster starting at offset is allocated or not.
194  * return via pnum the number of contiguous clusters sharing this allocation.
195  */
196 static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
197                                            int64_t *pnum)
198 {
199     BlockDriverState *bs = s->source->bs;
200     int64_t count, total_count = 0;
201     int64_t bytes = s->len - offset;
202     int ret;
203
204     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
205
206     while (true) {
207         ret = bdrv_is_allocated(bs, offset, bytes, &count);
208         if (ret < 0) {
209             return ret;
210         }
211
212         total_count += count;
213
214         if (ret || count == 0) {
215             /*
216              * ret: partial segment(s) are considered allocated.
217              * otherwise: unallocated tail is treated as an entire segment.
218              */
219             *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
220             return ret;
221         }
222
223         /* Unallocated segment(s) with uncertain following segment(s) */
224         if (total_count >= s->cluster_size) {
225             *pnum = total_count / s->cluster_size;
226             return 0;
227         }
228
229         offset += count;
230         bytes -= count;
231     }
232 }
233
234 /*
235  * Reset bits in copy_bitmap starting at offset if they represent unallocated
236  * data in the image. May reset subsequent contiguous bits.
237  * @return 0 when the cluster at @offset was unallocated,
238  *         1 otherwise, and -ret on error.
239  */
240 int64_t block_copy_reset_unallocated(BlockCopyState *s,
241                                      int64_t offset, int64_t *count)
242 {
243     int ret;
244     int64_t clusters, bytes;
245
246     ret = block_copy_is_cluster_allocated(s, offset, &clusters);
247     if (ret < 0) {
248         return ret;
249     }
250
251     bytes = clusters * s->cluster_size;
252
253     if (!ret) {
254         bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
255         s->progress_reset_callback(s->progress_opaque);
256     }
257
258     *count = bytes;
259     return ret;
260 }
261
262 int coroutine_fn block_copy(BlockCopyState *s,
263                             int64_t start, uint64_t bytes,
264                             bool *error_is_read)
265 {
266     int ret = 0;
267     int64_t end = bytes + start; /* bytes */
268     int64_t status_bytes;
269     BlockCopyInFlightReq req;
270
271     /*
272      * block_copy() user is responsible for keeping source and target in same
273      * aio context
274      */
275     assert(bdrv_get_aio_context(s->source->bs) ==
276            bdrv_get_aio_context(s->target->bs));
277
278     assert(QEMU_IS_ALIGNED(start, s->cluster_size));
279     assert(QEMU_IS_ALIGNED(end, s->cluster_size));
280
281     block_copy_wait_inflight_reqs(s, start, bytes);
282     block_copy_inflight_req_begin(s, &req, start, end);
283
284     while (start < end) {
285         int64_t next_zero, chunk_end;
286
287         if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
288             trace_block_copy_skip(s, start);
289             start += s->cluster_size;
290             continue; /* already copied */
291         }
292
293         chunk_end = MIN(end, start + (s->use_copy_range ?
294                                       s->copy_range_size : s->cluster_size));
295
296         next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
297                                                 chunk_end - start);
298         if (next_zero >= 0) {
299             assert(next_zero > start); /* start is dirty */
300             assert(next_zero < chunk_end); /* no need to do MIN() */
301             chunk_end = next_zero;
302         }
303
304         if (s->skip_unallocated) {
305             ret = block_copy_reset_unallocated(s, start, &status_bytes);
306             if (ret == 0) {
307                 trace_block_copy_skip_range(s, start, status_bytes);
308                 start += status_bytes;
309                 continue;
310             }
311             /* Clamp to known allocated region */
312             chunk_end = MIN(chunk_end, start + status_bytes);
313         }
314
315         trace_block_copy_process(s, start);
316
317         bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
318
319         co_get_from_shres(s->mem, chunk_end - start);
320         ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
321         co_put_to_shres(s->mem, chunk_end - start);
322         if (ret < 0) {
323             bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
324             break;
325         }
326
327         s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
328         start = chunk_end;
329         ret = 0;
330     }
331
332     block_copy_inflight_req_end(&req);
333
334     return ret;
335 }
This page took 0.045607 seconds and 4 git commands to generate.