Commit | Line | Data |
---|---|---|
61007b31 SH |
1 | /* |
2 | * Block layer I/O functions | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
80c71a24 | 25 | #include "qemu/osdep.h" |
61007b31 | 26 | #include "trace.h" |
7f0e9da6 | 27 | #include "sysemu/block-backend.h" |
7719f3c9 | 28 | #include "block/aio-wait.h" |
61007b31 | 29 | #include "block/blockjob.h" |
f321dcb5 | 30 | #include "block/blockjob_int.h" |
61007b31 | 31 | #include "block/block_int.h" |
21c2283e | 32 | #include "block/coroutines.h" |
f348b6d1 | 33 | #include "qemu/cutils.h" |
da34e65c | 34 | #include "qapi/error.h" |
d49b6836 | 35 | #include "qemu/error-report.h" |
db725815 | 36 | #include "qemu/main-loop.h" |
c8aa7895 | 37 | #include "sysemu/replay.h" |
61007b31 | 38 | |
cb2e2878 EB |
39 | /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ |
40 | #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) | |
41 | ||
7f8f03ef | 42 | static void bdrv_parent_cb_resize(BlockDriverState *bs); |
d05aa8bb | 43 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, |
f5a5ca79 | 44 | int64_t offset, int bytes, BdrvRequestFlags flags); |
61007b31 | 45 | |
f4c8a43b HR |
46 | static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, |
47 | bool ignore_bds_parents) | |
61007b31 | 48 | { |
02d21300 | 49 | BdrvChild *c, *next; |
27ccdd52 | 50 | |
02d21300 | 51 | QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { |
bd86fb99 | 52 | if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { |
0152bf40 KW |
53 | continue; |
54 | } | |
4be6a6d1 | 55 | bdrv_parent_drained_begin_single(c, false); |
ce0f1412 PB |
56 | } |
57 | } | |
61007b31 | 58 | |
e037c09c HR |
59 | static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, |
60 | int *drained_end_counter) | |
804db8ea HR |
61 | { |
62 | assert(c->parent_quiesce_counter > 0); | |
63 | c->parent_quiesce_counter--; | |
bd86fb99 HR |
64 | if (c->klass->drained_end) { |
65 | c->klass->drained_end(c, drained_end_counter); | |
804db8ea HR |
66 | } |
67 | } | |
68 | ||
e037c09c HR |
69 | void bdrv_parent_drained_end_single(BdrvChild *c) |
70 | { | |
71 | int drained_end_counter = 0; | |
72 | bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); | |
d73415a3 | 73 | BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); |
e037c09c HR |
74 | } |
75 | ||
f4c8a43b | 76 | static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, |
e037c09c HR |
77 | bool ignore_bds_parents, |
78 | int *drained_end_counter) | |
ce0f1412 | 79 | { |
61ad631c | 80 | BdrvChild *c; |
27ccdd52 | 81 | |
61ad631c | 82 | QLIST_FOREACH(c, &bs->parents, next_parent) { |
bd86fb99 | 83 | if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { |
0152bf40 KW |
84 | continue; |
85 | } | |
e037c09c | 86 | bdrv_parent_drained_end_single_no_poll(c, drained_end_counter); |
27ccdd52 | 87 | } |
61007b31 SH |
88 | } |
89 | ||
4be6a6d1 KW |
90 | static bool bdrv_parent_drained_poll_single(BdrvChild *c) |
91 | { | |
bd86fb99 HR |
92 | if (c->klass->drained_poll) { |
93 | return c->klass->drained_poll(c); | |
4be6a6d1 KW |
94 | } |
95 | return false; | |
96 | } | |
97 | ||
6cd5c9d7 KW |
98 | static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, |
99 | bool ignore_bds_parents) | |
89bd0305 KW |
100 | { |
101 | BdrvChild *c, *next; | |
102 | bool busy = false; | |
103 | ||
104 | QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { | |
bd86fb99 | 105 | if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) { |
89bd0305 KW |
106 | continue; |
107 | } | |
4be6a6d1 | 108 | busy |= bdrv_parent_drained_poll_single(c); |
89bd0305 KW |
109 | } |
110 | ||
111 | return busy; | |
112 | } | |
113 | ||
4be6a6d1 KW |
114 | void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) |
115 | { | |
804db8ea | 116 | c->parent_quiesce_counter++; |
bd86fb99 HR |
117 | if (c->klass->drained_begin) { |
118 | c->klass->drained_begin(c); | |
4be6a6d1 KW |
119 | } |
120 | if (poll) { | |
121 | BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); | |
122 | } | |
123 | } | |
124 | ||
d9e0dfa2 EB |
125 | static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) |
126 | { | |
127 | dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); | |
128 | dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer); | |
129 | dst->opt_mem_alignment = MAX(dst->opt_mem_alignment, | |
130 | src->opt_mem_alignment); | |
131 | dst->min_mem_alignment = MAX(dst->min_mem_alignment, | |
132 | src->min_mem_alignment); | |
133 | dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); | |
134 | } | |
135 | ||
61007b31 SH |
136 | void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) |
137 | { | |
33985614 | 138 | ERRP_GUARD(); |
61007b31 | 139 | BlockDriver *drv = bs->drv; |
66b129ac HR |
140 | BdrvChild *c; |
141 | bool have_limits; | |
61007b31 SH |
142 | |
143 | memset(&bs->bl, 0, sizeof(bs->bl)); | |
144 | ||
145 | if (!drv) { | |
146 | return; | |
147 | } | |
148 | ||
79ba8c98 | 149 | /* Default alignment based on whether driver has byte interface */ |
e31f6864 | 150 | bs->bl.request_alignment = (drv->bdrv_co_preadv || |
ac850bf0 VSO |
151 | drv->bdrv_aio_preadv || |
152 | drv->bdrv_co_preadv_part) ? 1 : 512; | |
79ba8c98 | 153 | |
61007b31 | 154 | /* Take some limits from the children as a default */ |
66b129ac HR |
155 | have_limits = false; |
156 | QLIST_FOREACH(c, &bs->children, next) { | |
157 | if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) | |
158 | { | |
33985614 VSO |
159 | bdrv_refresh_limits(c->bs, errp); |
160 | if (*errp) { | |
66b129ac HR |
161 | return; |
162 | } | |
163 | bdrv_merge_limits(&bs->bl, &c->bs->bl); | |
164 | have_limits = true; | |
61007b31 | 165 | } |
66b129ac HR |
166 | } |
167 | ||
168 | if (!have_limits) { | |
4196d2f0 | 169 | bs->bl.min_mem_alignment = 512; |
038adc2f | 170 | bs->bl.opt_mem_alignment = qemu_real_host_page_size; |
bd44feb7 SH |
171 | |
172 | /* Safe default since most protocols use readv()/writev()/etc */ | |
173 | bs->bl.max_iov = IOV_MAX; | |
61007b31 SH |
174 | } |
175 | ||
61007b31 SH |
176 | /* Then let the driver override it */ |
177 | if (drv->bdrv_refresh_limits) { | |
178 | drv->bdrv_refresh_limits(bs, errp); | |
8b117001 VSO |
179 | if (*errp) { |
180 | return; | |
181 | } | |
182 | } | |
183 | ||
184 | if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) { | |
185 | error_setg(errp, "Driver requires too large request alignment"); | |
61007b31 SH |
186 | } |
187 | } | |
188 | ||
189 | /** | |
190 | * The copy-on-read flag is actually a reference count so multiple users may | |
191 | * use the feature without worrying about clobbering its previous state. | |
192 | * Copy-on-read stays enabled until all users have called to disable it. | |
193 | */ | |
194 | void bdrv_enable_copy_on_read(BlockDriverState *bs) | |
195 | { | |
d73415a3 | 196 | qatomic_inc(&bs->copy_on_read); |
61007b31 SH |
197 | } |
198 | ||
199 | void bdrv_disable_copy_on_read(BlockDriverState *bs) | |
200 | { | |
d73415a3 | 201 | int old = qatomic_fetch_dec(&bs->copy_on_read); |
d3faa13e | 202 | assert(old >= 1); |
61007b31 SH |
203 | } |
204 | ||
61124f03 PB |
205 | typedef struct { |
206 | Coroutine *co; | |
207 | BlockDriverState *bs; | |
208 | bool done; | |
481cad48 | 209 | bool begin; |
b0165585 | 210 | bool recursive; |
fe4f0614 | 211 | bool poll; |
0152bf40 | 212 | BdrvChild *parent; |
6cd5c9d7 | 213 | bool ignore_bds_parents; |
8e1da77e | 214 | int *drained_end_counter; |
61124f03 PB |
215 | } BdrvCoDrainData; |
216 | ||
217 | static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) | |
218 | { | |
219 | BdrvCoDrainData *data = opaque; | |
220 | BlockDriverState *bs = data->bs; | |
221 | ||
481cad48 | 222 | if (data->begin) { |
f8ea8dac | 223 | bs->drv->bdrv_co_drain_begin(bs); |
481cad48 MP |
224 | } else { |
225 | bs->drv->bdrv_co_drain_end(bs); | |
226 | } | |
61124f03 | 227 | |
65181d63 | 228 | /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ |
d73415a3 | 229 | qatomic_mb_set(&data->done, true); |
e037c09c | 230 | if (!data->begin) { |
d73415a3 | 231 | qatomic_dec(data->drained_end_counter); |
8e1da77e | 232 | } |
65181d63 | 233 | bdrv_dec_in_flight(bs); |
8e1da77e | 234 | |
e037c09c | 235 | g_free(data); |
61124f03 PB |
236 | } |
237 | ||
db0289b9 | 238 | /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ |
8e1da77e HR |
239 | static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, |
240 | int *drained_end_counter) | |
61124f03 | 241 | { |
0109e7e6 | 242 | BdrvCoDrainData *data; |
61124f03 | 243 | |
f8ea8dac | 244 | if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || |
481cad48 | 245 | (!begin && !bs->drv->bdrv_co_drain_end)) { |
61124f03 PB |
246 | return; |
247 | } | |
248 | ||
0109e7e6 KW |
249 | data = g_new(BdrvCoDrainData, 1); |
250 | *data = (BdrvCoDrainData) { | |
251 | .bs = bs, | |
252 | .done = false, | |
8e1da77e HR |
253 | .begin = begin, |
254 | .drained_end_counter = drained_end_counter, | |
0109e7e6 KW |
255 | }; |
256 | ||
e037c09c | 257 | if (!begin) { |
d73415a3 | 258 | qatomic_inc(drained_end_counter); |
8e1da77e HR |
259 | } |
260 | ||
0109e7e6 KW |
261 | /* Make sure the driver callback completes during the polling phase for |
262 | * drain_begin. */ | |
263 | bdrv_inc_in_flight(bs); | |
264 | data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); | |
265 | aio_co_schedule(bdrv_get_aio_context(bs), data->co); | |
61124f03 PB |
266 | } |
267 | ||
1cc8e54a | 268 | /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ |
fe4f0614 | 269 | bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, |
6cd5c9d7 | 270 | BdrvChild *ignore_parent, bool ignore_bds_parents) |
89bd0305 | 271 | { |
fe4f0614 KW |
272 | BdrvChild *child, *next; |
273 | ||
6cd5c9d7 | 274 | if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { |
89bd0305 KW |
275 | return true; |
276 | } | |
277 | ||
d73415a3 | 278 | if (qatomic_read(&bs->in_flight)) { |
fe4f0614 KW |
279 | return true; |
280 | } | |
281 | ||
282 | if (recursive) { | |
6cd5c9d7 | 283 | assert(!ignore_bds_parents); |
fe4f0614 | 284 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { |
6cd5c9d7 | 285 | if (bdrv_drain_poll(child->bs, recursive, child, false)) { |
fe4f0614 KW |
286 | return true; |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | return false; | |
89bd0305 KW |
292 | } |
293 | ||
fe4f0614 | 294 | static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, |
89bd0305 | 295 | BdrvChild *ignore_parent) |
1cc8e54a | 296 | { |
6cd5c9d7 | 297 | return bdrv_drain_poll(bs, recursive, ignore_parent, false); |
1cc8e54a KW |
298 | } |
299 | ||
b0165585 | 300 | static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, |
6cd5c9d7 KW |
301 | BdrvChild *parent, bool ignore_bds_parents, |
302 | bool poll); | |
b0165585 | 303 | static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, |
8e1da77e HR |
304 | BdrvChild *parent, bool ignore_bds_parents, |
305 | int *drained_end_counter); | |
0152bf40 | 306 | |
a77fd4bb FZ |
307 | static void bdrv_co_drain_bh_cb(void *opaque) |
308 | { | |
309 | BdrvCoDrainData *data = opaque; | |
310 | Coroutine *co = data->co; | |
99723548 | 311 | BlockDriverState *bs = data->bs; |
a77fd4bb | 312 | |
c8ca33d0 | 313 | if (bs) { |
aa1361d5 | 314 | AioContext *ctx = bdrv_get_aio_context(bs); |
960d5fb3 | 315 | aio_context_acquire(ctx); |
c8ca33d0 KW |
316 | bdrv_dec_in_flight(bs); |
317 | if (data->begin) { | |
e037c09c | 318 | assert(!data->drained_end_counter); |
6cd5c9d7 KW |
319 | bdrv_do_drained_begin(bs, data->recursive, data->parent, |
320 | data->ignore_bds_parents, data->poll); | |
c8ca33d0 | 321 | } else { |
e037c09c | 322 | assert(!data->poll); |
6cd5c9d7 | 323 | bdrv_do_drained_end(bs, data->recursive, data->parent, |
8e1da77e HR |
324 | data->ignore_bds_parents, |
325 | data->drained_end_counter); | |
c8ca33d0 | 326 | } |
960d5fb3 | 327 | aio_context_release(ctx); |
481cad48 | 328 | } else { |
c8ca33d0 KW |
329 | assert(data->begin); |
330 | bdrv_drain_all_begin(); | |
481cad48 MP |
331 | } |
332 | ||
a77fd4bb | 333 | data->done = true; |
1919631e | 334 | aio_co_wake(co); |
a77fd4bb FZ |
335 | } |
336 | ||
481cad48 | 337 | static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, |
b0165585 | 338 | bool begin, bool recursive, |
6cd5c9d7 KW |
339 | BdrvChild *parent, |
340 | bool ignore_bds_parents, | |
8e1da77e HR |
341 | bool poll, |
342 | int *drained_end_counter) | |
a77fd4bb FZ |
343 | { |
344 | BdrvCoDrainData data; | |
960d5fb3 KW |
345 | Coroutine *self = qemu_coroutine_self(); |
346 | AioContext *ctx = bdrv_get_aio_context(bs); | |
347 | AioContext *co_ctx = qemu_coroutine_get_aio_context(self); | |
a77fd4bb FZ |
348 | |
349 | /* Calling bdrv_drain() from a BH ensures the current coroutine yields and | |
c40a2545 | 350 | * other coroutines run if they were queued by aio_co_enter(). */ |
a77fd4bb FZ |
351 | |
352 | assert(qemu_in_coroutine()); | |
353 | data = (BdrvCoDrainData) { | |
960d5fb3 | 354 | .co = self, |
a77fd4bb FZ |
355 | .bs = bs, |
356 | .done = false, | |
481cad48 | 357 | .begin = begin, |
b0165585 | 358 | .recursive = recursive, |
0152bf40 | 359 | .parent = parent, |
6cd5c9d7 | 360 | .ignore_bds_parents = ignore_bds_parents, |
fe4f0614 | 361 | .poll = poll, |
8e1da77e | 362 | .drained_end_counter = drained_end_counter, |
a77fd4bb | 363 | }; |
8e1da77e | 364 | |
c8ca33d0 KW |
365 | if (bs) { |
366 | bdrv_inc_in_flight(bs); | |
367 | } | |
960d5fb3 KW |
368 | |
369 | /* | |
370 | * Temporarily drop the lock across yield or we would get deadlocks. | |
371 | * bdrv_co_drain_bh_cb() reaquires the lock as needed. | |
372 | * | |
373 | * When we yield below, the lock for the current context will be | |
374 | * released, so if this is actually the lock that protects bs, don't drop | |
375 | * it a second time. | |
376 | */ | |
377 | if (ctx != co_ctx) { | |
378 | aio_context_release(ctx); | |
379 | } | |
380 | replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data); | |
a77fd4bb FZ |
381 | |
382 | qemu_coroutine_yield(); | |
383 | /* If we are resumed from some other event (such as an aio completion or a | |
384 | * timer callback), it is a bug in the caller that should be fixed. */ | |
385 | assert(data.done); | |
960d5fb3 KW |
386 | |
387 | /* Reaquire the AioContext of bs if we dropped it */ | |
388 | if (ctx != co_ctx) { | |
389 | aio_context_acquire(ctx); | |
390 | } | |
a77fd4bb FZ |
391 | } |
392 | ||
dcf94a23 | 393 | void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, |
6cd5c9d7 | 394 | BdrvChild *parent, bool ignore_bds_parents) |
6820643f | 395 | { |
dcf94a23 | 396 | assert(!qemu_in_coroutine()); |
d42cf288 | 397 | |
60369b86 | 398 | /* Stop things in parent-to-child order */ |
d73415a3 | 399 | if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { |
6820643f | 400 | aio_disable_external(bdrv_get_aio_context(bs)); |
6820643f KW |
401 | } |
402 | ||
6cd5c9d7 | 403 | bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); |
8e1da77e | 404 | bdrv_drain_invoke(bs, true, NULL); |
dcf94a23 KW |
405 | } |
406 | ||
407 | static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | |
6cd5c9d7 KW |
408 | BdrvChild *parent, bool ignore_bds_parents, |
409 | bool poll) | |
dcf94a23 KW |
410 | { |
411 | BdrvChild *child, *next; | |
412 | ||
413 | if (qemu_in_coroutine()) { | |
6cd5c9d7 | 414 | bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, |
8e1da77e | 415 | poll, NULL); |
dcf94a23 KW |
416 | return; |
417 | } | |
418 | ||
6cd5c9d7 | 419 | bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); |
d30b8e64 | 420 | |
b0165585 | 421 | if (recursive) { |
6cd5c9d7 | 422 | assert(!ignore_bds_parents); |
d736f119 | 423 | bs->recursive_quiesce_counter++; |
b0165585 | 424 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { |
6cd5c9d7 KW |
425 | bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, |
426 | false); | |
b0165585 KW |
427 | } |
428 | } | |
fe4f0614 KW |
429 | |
430 | /* | |
431 | * Wait for drained requests to finish. | |
432 | * | |
433 | * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The | |
434 | * call is needed so things in this AioContext can make progress even | |
435 | * though we don't return to the main AioContext loop - this automatically | |
436 | * includes other nodes in the same AioContext and therefore all child | |
437 | * nodes. | |
438 | */ | |
439 | if (poll) { | |
6cd5c9d7 | 440 | assert(!ignore_bds_parents); |
fe4f0614 KW |
441 | BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); |
442 | } | |
6820643f KW |
443 | } |
444 | ||
0152bf40 KW |
445 | void bdrv_drained_begin(BlockDriverState *bs) |
446 | { | |
6cd5c9d7 | 447 | bdrv_do_drained_begin(bs, false, NULL, false, true); |
b0165585 KW |
448 | } |
449 | ||
450 | void bdrv_subtree_drained_begin(BlockDriverState *bs) | |
451 | { | |
6cd5c9d7 | 452 | bdrv_do_drained_begin(bs, true, NULL, false, true); |
0152bf40 KW |
453 | } |
454 | ||
e037c09c HR |
455 | /** |
456 | * This function does not poll, nor must any of its recursively called | |
457 | * functions. The *drained_end_counter pointee will be incremented | |
458 | * once for every background operation scheduled, and decremented once | |
459 | * the operation settles. Therefore, the pointer must remain valid | |
460 | * until the pointee reaches 0. That implies that whoever sets up the | |
461 | * pointee has to poll until it is 0. | |
462 | * | |
463 | * We use atomic operations to access *drained_end_counter, because | |
464 | * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of | |
465 | * @bs may contain nodes in different AioContexts, | |
466 | * (2) bdrv_drain_all_end() uses the same counter for all nodes, | |
467 | * regardless of which AioContext they are in. | |
468 | */ | |
6cd5c9d7 | 469 | static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, |
8e1da77e HR |
470 | BdrvChild *parent, bool ignore_bds_parents, |
471 | int *drained_end_counter) | |
6820643f | 472 | { |
61ad631c | 473 | BdrvChild *child; |
0f115168 KW |
474 | int old_quiesce_counter; |
475 | ||
e037c09c HR |
476 | assert(drained_end_counter != NULL); |
477 | ||
481cad48 | 478 | if (qemu_in_coroutine()) { |
6cd5c9d7 | 479 | bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, |
8e1da77e | 480 | false, drained_end_counter); |
481cad48 MP |
481 | return; |
482 | } | |
6820643f | 483 | assert(bs->quiesce_counter > 0); |
6820643f | 484 | |
60369b86 | 485 | /* Re-enable things in child-to-parent order */ |
8e1da77e | 486 | bdrv_drain_invoke(bs, false, drained_end_counter); |
e037c09c HR |
487 | bdrv_parent_drained_end(bs, parent, ignore_bds_parents, |
488 | drained_end_counter); | |
5cb2737e | 489 | |
d73415a3 | 490 | old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); |
0f115168 | 491 | if (old_quiesce_counter == 1) { |
0f115168 KW |
492 | aio_enable_external(bdrv_get_aio_context(bs)); |
493 | } | |
b0165585 KW |
494 | |
495 | if (recursive) { | |
6cd5c9d7 | 496 | assert(!ignore_bds_parents); |
d736f119 | 497 | bs->recursive_quiesce_counter--; |
61ad631c | 498 | QLIST_FOREACH(child, &bs->children, next) { |
8e1da77e HR |
499 | bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents, |
500 | drained_end_counter); | |
b0165585 KW |
501 | } |
502 | } | |
6820643f KW |
503 | } |
504 | ||
0152bf40 KW |
505 | void bdrv_drained_end(BlockDriverState *bs) |
506 | { | |
e037c09c HR |
507 | int drained_end_counter = 0; |
508 | bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); | |
d73415a3 | 509 | BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); |
e037c09c HR |
510 | } |
511 | ||
512 | void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) | |
513 | { | |
514 | bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter); | |
b0165585 KW |
515 | } |
516 | ||
517 | void bdrv_subtree_drained_end(BlockDriverState *bs) | |
518 | { | |
e037c09c HR |
519 | int drained_end_counter = 0; |
520 | bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); | |
d73415a3 | 521 | BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); |
0152bf40 KW |
522 | } |
523 | ||
d736f119 KW |
524 | void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) |
525 | { | |
526 | int i; | |
527 | ||
528 | for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { | |
6cd5c9d7 | 529 | bdrv_do_drained_begin(child->bs, true, child, false, true); |
d736f119 KW |
530 | } |
531 | } | |
532 | ||
533 | void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) | |
534 | { | |
e037c09c | 535 | int drained_end_counter = 0; |
d736f119 KW |
536 | int i; |
537 | ||
538 | for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { | |
e037c09c HR |
539 | bdrv_do_drained_end(child->bs, true, child, false, |
540 | &drained_end_counter); | |
d736f119 | 541 | } |
e037c09c | 542 | |
d73415a3 | 543 | BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); |
d736f119 KW |
544 | } |
545 | ||
61007b31 | 546 | /* |
67da1dc5 FZ |
547 | * Wait for pending requests to complete on a single BlockDriverState subtree, |
548 | * and suspend block driver's internal I/O until next request arrives. | |
61007b31 | 549 | * |
61007b31 SH |
550 | * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState |
551 | * AioContext. | |
552 | */ | |
b6e84c97 | 553 | void coroutine_fn bdrv_co_drain(BlockDriverState *bs) |
61007b31 | 554 | { |
6820643f KW |
555 | assert(qemu_in_coroutine()); |
556 | bdrv_drained_begin(bs); | |
557 | bdrv_drained_end(bs); | |
b6e84c97 | 558 | } |
f406c03c | 559 | |
b6e84c97 PB |
560 | void bdrv_drain(BlockDriverState *bs) |
561 | { | |
6820643f KW |
562 | bdrv_drained_begin(bs); |
563 | bdrv_drained_end(bs); | |
61007b31 SH |
564 | } |
565 | ||
c13ad59f KW |
566 | static void bdrv_drain_assert_idle(BlockDriverState *bs) |
567 | { | |
568 | BdrvChild *child, *next; | |
569 | ||
d73415a3 | 570 | assert(qatomic_read(&bs->in_flight) == 0); |
c13ad59f KW |
571 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { |
572 | bdrv_drain_assert_idle(child->bs); | |
573 | } | |
574 | } | |
575 | ||
0f12264e KW |
576 | unsigned int bdrv_drain_all_count = 0; |
577 | ||
578 | static bool bdrv_drain_all_poll(void) | |
579 | { | |
580 | BlockDriverState *bs = NULL; | |
581 | bool result = false; | |
582 | ||
0f12264e KW |
583 | /* bdrv_drain_poll() can't make changes to the graph and we are holding the |
584 | * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ | |
585 | while ((bs = bdrv_next_all_states(bs))) { | |
586 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
587 | aio_context_acquire(aio_context); | |
588 | result |= bdrv_drain_poll(bs, false, NULL, true); | |
589 | aio_context_release(aio_context); | |
590 | } | |
591 | ||
592 | return result; | |
593 | } | |
594 | ||
61007b31 SH |
595 | /* |
596 | * Wait for pending requests to complete across all BlockDriverStates | |
597 | * | |
598 | * This function does not flush data to disk, use bdrv_flush_all() for that | |
599 | * after calling this function. | |
c0778f66 AG |
600 | * |
601 | * This pauses all block jobs and disables external clients. It must | |
602 | * be paired with bdrv_drain_all_end(). | |
603 | * | |
604 | * NOTE: no new block jobs or BlockDriverStates can be created between | |
605 | * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. | |
61007b31 | 606 | */ |
c0778f66 | 607 | void bdrv_drain_all_begin(void) |
61007b31 | 608 | { |
0f12264e | 609 | BlockDriverState *bs = NULL; |
61007b31 | 610 | |
c8ca33d0 | 611 | if (qemu_in_coroutine()) { |
8e1da77e | 612 | bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL); |
c8ca33d0 KW |
613 | return; |
614 | } | |
615 | ||
c8aa7895 PD |
616 | /* |
617 | * bdrv queue is managed by record/replay, | |
618 | * waiting for finishing the I/O requests may | |
619 | * be infinite | |
620 | */ | |
621 | if (replay_events_enabled()) { | |
622 | return; | |
623 | } | |
624 | ||
0f12264e KW |
625 | /* AIO_WAIT_WHILE() with a NULL context can only be called from the main |
626 | * loop AioContext, so make sure we're in the main context. */ | |
9a7e86c8 | 627 | assert(qemu_get_current_aio_context() == qemu_get_aio_context()); |
0f12264e KW |
628 | assert(bdrv_drain_all_count < INT_MAX); |
629 | bdrv_drain_all_count++; | |
9a7e86c8 | 630 | |
0f12264e KW |
631 | /* Quiesce all nodes, without polling in-flight requests yet. The graph |
632 | * cannot change during this loop. */ | |
633 | while ((bs = bdrv_next_all_states(bs))) { | |
61007b31 SH |
634 | AioContext *aio_context = bdrv_get_aio_context(bs); |
635 | ||
636 | aio_context_acquire(aio_context); | |
0f12264e | 637 | bdrv_do_drained_begin(bs, false, NULL, true, false); |
61007b31 SH |
638 | aio_context_release(aio_context); |
639 | } | |
640 | ||
0f12264e | 641 | /* Now poll the in-flight requests */ |
cfe29d82 | 642 | AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); |
0f12264e KW |
643 | |
644 | while ((bs = bdrv_next_all_states(bs))) { | |
c13ad59f | 645 | bdrv_drain_assert_idle(bs); |
61007b31 | 646 | } |
c0778f66 AG |
647 | } |
648 | ||
1a6d3bd2 GK |
649 | void bdrv_drain_all_end_quiesce(BlockDriverState *bs) |
650 | { | |
651 | int drained_end_counter = 0; | |
652 | ||
653 | g_assert(bs->quiesce_counter > 0); | |
654 | g_assert(!bs->refcnt); | |
655 | ||
656 | while (bs->quiesce_counter) { | |
657 | bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); | |
658 | } | |
659 | BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); | |
660 | } | |
661 | ||
c0778f66 AG |
662 | void bdrv_drain_all_end(void) |
663 | { | |
0f12264e | 664 | BlockDriverState *bs = NULL; |
e037c09c | 665 | int drained_end_counter = 0; |
c0778f66 | 666 | |
c8aa7895 PD |
667 | /* |
668 | * bdrv queue is managed by record/replay, | |
669 | * waiting for finishing the I/O requests may | |
670 | * be endless | |
671 | */ | |
672 | if (replay_events_enabled()) { | |
673 | return; | |
674 | } | |
675 | ||
0f12264e | 676 | while ((bs = bdrv_next_all_states(bs))) { |
61007b31 SH |
677 | AioContext *aio_context = bdrv_get_aio_context(bs); |
678 | ||
679 | aio_context_acquire(aio_context); | |
e037c09c | 680 | bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter); |
61007b31 SH |
681 | aio_context_release(aio_context); |
682 | } | |
0f12264e | 683 | |
e037c09c | 684 | assert(qemu_get_current_aio_context() == qemu_get_aio_context()); |
d73415a3 | 685 | AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); |
e037c09c | 686 | |
0f12264e KW |
687 | assert(bdrv_drain_all_count > 0); |
688 | bdrv_drain_all_count--; | |
61007b31 SH |
689 | } |
690 | ||
c0778f66 AG |
691 | void bdrv_drain_all(void) |
692 | { | |
693 | bdrv_drain_all_begin(); | |
694 | bdrv_drain_all_end(); | |
695 | } | |
696 | ||
61007b31 SH |
697 | /** |
698 | * Remove an active request from the tracked requests list | |
699 | * | |
700 | * This function should be called when a tracked request is completing. | |
701 | */ | |
702 | static void tracked_request_end(BdrvTrackedRequest *req) | |
703 | { | |
704 | if (req->serialising) { | |
d73415a3 | 705 | qatomic_dec(&req->bs->serialising_in_flight); |
61007b31 SH |
706 | } |
707 | ||
3783fa3d | 708 | qemu_co_mutex_lock(&req->bs->reqs_lock); |
61007b31 SH |
709 | QLIST_REMOVE(req, list); |
710 | qemu_co_queue_restart_all(&req->wait_queue); | |
3783fa3d | 711 | qemu_co_mutex_unlock(&req->bs->reqs_lock); |
61007b31 SH |
712 | } |
713 | ||
714 | /** | |
715 | * Add an active request to the tracked requests list | |
716 | */ | |
717 | static void tracked_request_begin(BdrvTrackedRequest *req, | |
718 | BlockDriverState *bs, | |
719 | int64_t offset, | |
22931a15 | 720 | uint64_t bytes, |
ebde595c | 721 | enum BdrvTrackedRequestType type) |
61007b31 | 722 | { |
22931a15 FZ |
723 | assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes); |
724 | ||
61007b31 SH |
725 | *req = (BdrvTrackedRequest){ |
726 | .bs = bs, | |
727 | .offset = offset, | |
728 | .bytes = bytes, | |
ebde595c | 729 | .type = type, |
61007b31 SH |
730 | .co = qemu_coroutine_self(), |
731 | .serialising = false, | |
732 | .overlap_offset = offset, | |
733 | .overlap_bytes = bytes, | |
734 | }; | |
735 | ||
736 | qemu_co_queue_init(&req->wait_queue); | |
737 | ||
3783fa3d | 738 | qemu_co_mutex_lock(&bs->reqs_lock); |
61007b31 | 739 | QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); |
3783fa3d | 740 | qemu_co_mutex_unlock(&bs->reqs_lock); |
61007b31 SH |
741 | } |
742 | ||
3ba0e1a0 PB |
743 | static bool tracked_request_overlaps(BdrvTrackedRequest *req, |
744 | int64_t offset, uint64_t bytes) | |
745 | { | |
746 | /* aaaa bbbb */ | |
747 | if (offset >= req->overlap_offset + req->overlap_bytes) { | |
748 | return false; | |
749 | } | |
750 | /* bbbb aaaa */ | |
751 | if (req->overlap_offset >= offset + bytes) { | |
752 | return false; | |
753 | } | |
754 | return true; | |
755 | } | |
756 | ||
3183937f VSO |
757 | /* Called with self->bs->reqs_lock held */ |
758 | static BdrvTrackedRequest * | |
759 | bdrv_find_conflicting_request(BdrvTrackedRequest *self) | |
760 | { | |
761 | BdrvTrackedRequest *req; | |
762 | ||
763 | QLIST_FOREACH(req, &self->bs->tracked_requests, list) { | |
764 | if (req == self || (!req->serialising && !self->serialising)) { | |
765 | continue; | |
766 | } | |
767 | if (tracked_request_overlaps(req, self->overlap_offset, | |
768 | self->overlap_bytes)) | |
769 | { | |
770 | /* | |
771 | * Hitting this means there was a reentrant request, for | |
772 | * example, a block driver issuing nested requests. This must | |
773 | * never happen since it means deadlock. | |
774 | */ | |
775 | assert(qemu_coroutine_self() != req->co); | |
776 | ||
777 | /* | |
778 | * If the request is already (indirectly) waiting for us, or | |
779 | * will wait for us as soon as it wakes up, then just go on | |
780 | * (instead of producing a deadlock in the former case). | |
781 | */ | |
782 | if (!req->waiting_for) { | |
783 | return req; | |
784 | } | |
785 | } | |
786 | } | |
787 | ||
788 | return NULL; | |
789 | } | |
790 | ||
ec1c8868 | 791 | /* Called with self->bs->reqs_lock held */ |
3ba0e1a0 | 792 | static bool coroutine_fn |
ec1c8868 | 793 | bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) |
3ba0e1a0 PB |
794 | { |
795 | BdrvTrackedRequest *req; | |
3ba0e1a0 PB |
796 | bool waited = false; |
797 | ||
3183937f VSO |
798 | while ((req = bdrv_find_conflicting_request(self))) { |
799 | self->waiting_for = req; | |
ec1c8868 | 800 | qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock); |
3183937f VSO |
801 | self->waiting_for = NULL; |
802 | waited = true; | |
803 | } | |
804 | ||
3ba0e1a0 PB |
805 | return waited; |
806 | } | |
807 | ||
8ac5aab2 VSO |
808 | /* Called with req->bs->reqs_lock held */ |
809 | static void tracked_request_set_serialising(BdrvTrackedRequest *req, | |
810 | uint64_t align) | |
61007b31 SH |
811 | { |
812 | int64_t overlap_offset = req->offset & ~(align - 1); | |
22931a15 | 813 | uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) |
61007b31 SH |
814 | - overlap_offset; |
815 | ||
816 | if (!req->serialising) { | |
d73415a3 | 817 | qatomic_inc(&req->bs->serialising_in_flight); |
61007b31 SH |
818 | req->serialising = true; |
819 | } | |
820 | ||
821 | req->overlap_offset = MIN(req->overlap_offset, overlap_offset); | |
822 | req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); | |
09d2f948 VSO |
823 | } |
824 | ||
c28107e9 HR |
825 | /** |
826 | * Return the tracked request on @bs for the current coroutine, or | |
827 | * NULL if there is none. | |
828 | */ | |
829 | BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) | |
830 | { | |
831 | BdrvTrackedRequest *req; | |
832 | Coroutine *self = qemu_coroutine_self(); | |
833 | ||
834 | QLIST_FOREACH(req, &bs->tracked_requests, list) { | |
835 | if (req->co == self) { | |
836 | return req; | |
837 | } | |
838 | } | |
839 | ||
840 | return NULL; | |
841 | } | |
842 | ||
244483e6 KW |
843 | /** |
844 | * Round a region to cluster boundaries | |
845 | */ | |
846 | void bdrv_round_to_clusters(BlockDriverState *bs, | |
7cfd5275 | 847 | int64_t offset, int64_t bytes, |
244483e6 | 848 | int64_t *cluster_offset, |
7cfd5275 | 849 | int64_t *cluster_bytes) |
244483e6 KW |
850 | { |
851 | BlockDriverInfo bdi; | |
852 | ||
853 | if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { | |
854 | *cluster_offset = offset; | |
855 | *cluster_bytes = bytes; | |
856 | } else { | |
857 | int64_t c = bdi.cluster_size; | |
858 | *cluster_offset = QEMU_ALIGN_DOWN(offset, c); | |
859 | *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); | |
860 | } | |
861 | } | |
862 | ||
61007b31 SH |
863 | static int bdrv_get_cluster_size(BlockDriverState *bs) |
864 | { | |
865 | BlockDriverInfo bdi; | |
866 | int ret; | |
867 | ||
868 | ret = bdrv_get_info(bs, &bdi); | |
869 | if (ret < 0 || bdi.cluster_size == 0) { | |
a5b8dd2c | 870 | return bs->bl.request_alignment; |
61007b31 SH |
871 | } else { |
872 | return bdi.cluster_size; | |
873 | } | |
874 | } | |
875 | ||
99723548 PB |
876 | void bdrv_inc_in_flight(BlockDriverState *bs) |
877 | { | |
d73415a3 | 878 | qatomic_inc(&bs->in_flight); |
99723548 PB |
879 | } |
880 | ||
c9d1a561 PB |
881 | void bdrv_wakeup(BlockDriverState *bs) |
882 | { | |
cfe29d82 | 883 | aio_wait_kick(); |
c9d1a561 PB |
884 | } |
885 | ||
99723548 PB |
886 | void bdrv_dec_in_flight(BlockDriverState *bs) |
887 | { | |
d73415a3 | 888 | qatomic_dec(&bs->in_flight); |
c9d1a561 | 889 | bdrv_wakeup(bs); |
99723548 PB |
890 | } |
891 | ||
18fbd0de | 892 | static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) |
61007b31 SH |
893 | { |
894 | BlockDriverState *bs = self->bs; | |
61007b31 SH |
895 | bool waited = false; |
896 | ||
d73415a3 | 897 | if (!qatomic_read(&bs->serialising_in_flight)) { |
61007b31 SH |
898 | return false; |
899 | } | |
900 | ||
3ba0e1a0 | 901 | qemu_co_mutex_lock(&bs->reqs_lock); |
ec1c8868 | 902 | waited = bdrv_wait_serialising_requests_locked(self); |
3ba0e1a0 | 903 | qemu_co_mutex_unlock(&bs->reqs_lock); |
61007b31 SH |
904 | |
905 | return waited; | |
906 | } | |
907 | ||
8ac5aab2 VSO |
908 | bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, |
909 | uint64_t align) | |
910 | { | |
911 | bool waited; | |
912 | ||
913 | qemu_co_mutex_lock(&req->bs->reqs_lock); | |
914 | ||
915 | tracked_request_set_serialising(req, align); | |
916 | waited = bdrv_wait_serialising_requests_locked(req); | |
917 | ||
918 | qemu_co_mutex_unlock(&req->bs->reqs_lock); | |
919 | ||
920 | return waited; | |
921 | } | |
922 | ||
8b117001 | 923 | int bdrv_check_request(int64_t offset, int64_t bytes) |
61007b31 | 924 | { |
8b117001 | 925 | if (offset < 0 || bytes < 0) { |
61007b31 SH |
926 | return -EIO; |
927 | } | |
928 | ||
8b117001 VSO |
929 | if (bytes > BDRV_MAX_LENGTH) { |
930 | return -EIO; | |
931 | } | |
932 | ||
933 | if (offset > BDRV_MAX_LENGTH - bytes) { | |
934 | return -EIO; | |
935 | } | |
936 | ||
937 | return 0; | |
938 | } | |
939 | ||
940 | static int bdrv_check_request32(int64_t offset, int64_t bytes) | |
941 | { | |
942 | int ret = bdrv_check_request(offset, bytes); | |
943 | if (ret < 0) { | |
944 | return ret; | |
945 | } | |
946 | ||
947 | if (bytes > BDRV_REQUEST_MAX_BYTES) { | |
61007b31 SH |
948 | return -EIO; |
949 | } | |
950 | ||
951 | return 0; | |
952 | } | |
953 | ||
720ff280 | 954 | int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, |
f5a5ca79 | 955 | int bytes, BdrvRequestFlags flags) |
61007b31 | 956 | { |
fae2681a VSO |
957 | return bdrv_pwritev(child, offset, bytes, NULL, |
958 | BDRV_REQ_ZERO_WRITE | flags); | |
61007b31 SH |
959 | } |
960 | ||
961 | /* | |
74021bc4 | 962 | * Completely zero out a block device with the help of bdrv_pwrite_zeroes. |
61007b31 SH |
963 | * The operation is sped up by checking the block status and only writing |
964 | * zeroes to the device if they currently do not return zeroes. Optional | |
74021bc4 | 965 | * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP, |
465fe887 | 966 | * BDRV_REQ_FUA). |
61007b31 | 967 | * |
f4649069 | 968 | * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite(). |
61007b31 | 969 | */ |
720ff280 | 970 | int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) |
61007b31 | 971 | { |
237d78f8 EB |
972 | int ret; |
973 | int64_t target_size, bytes, offset = 0; | |
720ff280 | 974 | BlockDriverState *bs = child->bs; |
61007b31 | 975 | |
7286d610 EB |
976 | target_size = bdrv_getlength(bs); |
977 | if (target_size < 0) { | |
978 | return target_size; | |
61007b31 SH |
979 | } |
980 | ||
981 | for (;;) { | |
7286d610 EB |
982 | bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); |
983 | if (bytes <= 0) { | |
61007b31 SH |
984 | return 0; |
985 | } | |
237d78f8 | 986 | ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL); |
61007b31 | 987 | if (ret < 0) { |
61007b31 SH |
988 | return ret; |
989 | } | |
990 | if (ret & BDRV_BLOCK_ZERO) { | |
237d78f8 | 991 | offset += bytes; |
61007b31 SH |
992 | continue; |
993 | } | |
237d78f8 | 994 | ret = bdrv_pwrite_zeroes(child, offset, bytes, flags); |
61007b31 | 995 | if (ret < 0) { |
61007b31 SH |
996 | return ret; |
997 | } | |
237d78f8 | 998 | offset += bytes; |
61007b31 SH |
999 | } |
1000 | } | |
1001 | ||
2e11d756 | 1002 | /* See bdrv_pwrite() for the return codes */ |
cf2ab8fc | 1003 | int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) |
61007b31 | 1004 | { |
fae2681a | 1005 | int ret; |
0d93ed08 | 1006 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); |
61007b31 SH |
1007 | |
1008 | if (bytes < 0) { | |
1009 | return -EINVAL; | |
1010 | } | |
1011 | ||
fae2681a | 1012 | ret = bdrv_preadv(child, offset, bytes, &qiov, 0); |
61007b31 | 1013 | |
fae2681a | 1014 | return ret < 0 ? ret : bytes; |
61007b31 SH |
1015 | } |
1016 | ||
2e11d756 AG |
1017 | /* Return no. of bytes on success or < 0 on error. Important errors are: |
1018 | -EIO generic I/O error (may happen for all errors) | |
1019 | -ENOMEDIUM No media inserted. | |
1020 | -EINVAL Invalid offset or number of bytes | |
1021 | -EACCES Trying to write a read-only device | |
1022 | */ | |
d9ca2ea2 | 1023 | int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) |
61007b31 | 1024 | { |
fae2681a | 1025 | int ret; |
0d93ed08 | 1026 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); |
61007b31 SH |
1027 | |
1028 | if (bytes < 0) { | |
1029 | return -EINVAL; | |
1030 | } | |
1031 | ||
fae2681a VSO |
1032 | ret = bdrv_pwritev(child, offset, bytes, &qiov, 0); |
1033 | ||
1034 | return ret < 0 ? ret : bytes; | |
61007b31 SH |
1035 | } |
1036 | ||
1037 | /* | |
1038 | * Writes to the file and ensures that no writes are reordered across this | |
1039 | * request (acts as a barrier) | |
1040 | * | |
1041 | * Returns 0 on success, -errno in error cases. | |
1042 | */ | |
d9ca2ea2 KW |
1043 | int bdrv_pwrite_sync(BdrvChild *child, int64_t offset, |
1044 | const void *buf, int count) | |
61007b31 SH |
1045 | { |
1046 | int ret; | |
1047 | ||
d9ca2ea2 | 1048 | ret = bdrv_pwrite(child, offset, buf, count); |
61007b31 SH |
1049 | if (ret < 0) { |
1050 | return ret; | |
1051 | } | |
1052 | ||
d9ca2ea2 | 1053 | ret = bdrv_flush(child->bs); |
855a6a93 KW |
1054 | if (ret < 0) { |
1055 | return ret; | |
61007b31 SH |
1056 | } |
1057 | ||
1058 | return 0; | |
1059 | } | |
1060 | ||
08844473 KW |
1061 | typedef struct CoroutineIOCompletion { |
1062 | Coroutine *coroutine; | |
1063 | int ret; | |
1064 | } CoroutineIOCompletion; | |
1065 | ||
1066 | static void bdrv_co_io_em_complete(void *opaque, int ret) | |
1067 | { | |
1068 | CoroutineIOCompletion *co = opaque; | |
1069 | ||
1070 | co->ret = ret; | |
b9e413dd | 1071 | aio_co_wake(co->coroutine); |
08844473 KW |
1072 | } |
1073 | ||
166fe960 KW |
1074 | static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, |
1075 | uint64_t offset, uint64_t bytes, | |
ac850bf0 VSO |
1076 | QEMUIOVector *qiov, |
1077 | size_t qiov_offset, int flags) | |
166fe960 KW |
1078 | { |
1079 | BlockDriver *drv = bs->drv; | |
3fb06697 KW |
1080 | int64_t sector_num; |
1081 | unsigned int nb_sectors; | |
ac850bf0 VSO |
1082 | QEMUIOVector local_qiov; |
1083 | int ret; | |
3fb06697 | 1084 | |
fa166538 | 1085 | assert(!(flags & ~BDRV_REQ_MASK)); |
fe0480d6 | 1086 | assert(!(flags & BDRV_REQ_NO_FALLBACK)); |
fa166538 | 1087 | |
d470ad42 HR |
1088 | if (!drv) { |
1089 | return -ENOMEDIUM; | |
1090 | } | |
1091 | ||
ac850bf0 VSO |
1092 | if (drv->bdrv_co_preadv_part) { |
1093 | return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset, | |
1094 | flags); | |
1095 | } | |
1096 | ||
1097 | if (qiov_offset > 0 || bytes != qiov->size) { | |
1098 | qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); | |
1099 | qiov = &local_qiov; | |
1100 | } | |
1101 | ||
3fb06697 | 1102 | if (drv->bdrv_co_preadv) { |
ac850bf0 VSO |
1103 | ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags); |
1104 | goto out; | |
3fb06697 KW |
1105 | } |
1106 | ||
edfab6a0 | 1107 | if (drv->bdrv_aio_preadv) { |
08844473 KW |
1108 | BlockAIOCB *acb; |
1109 | CoroutineIOCompletion co = { | |
1110 | .coroutine = qemu_coroutine_self(), | |
1111 | }; | |
1112 | ||
edfab6a0 EB |
1113 | acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags, |
1114 | bdrv_co_io_em_complete, &co); | |
08844473 | 1115 | if (acb == NULL) { |
ac850bf0 VSO |
1116 | ret = -EIO; |
1117 | goto out; | |
08844473 KW |
1118 | } else { |
1119 | qemu_coroutine_yield(); | |
ac850bf0 VSO |
1120 | ret = co.ret; |
1121 | goto out; | |
08844473 KW |
1122 | } |
1123 | } | |
edfab6a0 EB |
1124 | |
1125 | sector_num = offset >> BDRV_SECTOR_BITS; | |
1126 | nb_sectors = bytes >> BDRV_SECTOR_BITS; | |
1127 | ||
1bbbf32d NS |
1128 | assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); |
1129 | assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); | |
41ae31e3 | 1130 | assert(bytes <= BDRV_REQUEST_MAX_BYTES); |
edfab6a0 EB |
1131 | assert(drv->bdrv_co_readv); |
1132 | ||
ac850bf0 VSO |
1133 | ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); |
1134 | ||
1135 | out: | |
1136 | if (qiov == &local_qiov) { | |
1137 | qemu_iovec_destroy(&local_qiov); | |
1138 | } | |
1139 | ||
1140 | return ret; | |
166fe960 KW |
1141 | } |
1142 | ||
78a07294 KW |
1143 | static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, |
1144 | uint64_t offset, uint64_t bytes, | |
ac850bf0 VSO |
1145 | QEMUIOVector *qiov, |
1146 | size_t qiov_offset, int flags) | |
78a07294 KW |
1147 | { |
1148 | BlockDriver *drv = bs->drv; | |
3fb06697 KW |
1149 | int64_t sector_num; |
1150 | unsigned int nb_sectors; | |
ac850bf0 | 1151 | QEMUIOVector local_qiov; |
78a07294 KW |
1152 | int ret; |
1153 | ||
fa166538 | 1154 | assert(!(flags & ~BDRV_REQ_MASK)); |
fe0480d6 | 1155 | assert(!(flags & BDRV_REQ_NO_FALLBACK)); |
fa166538 | 1156 | |
d470ad42 HR |
1157 | if (!drv) { |
1158 | return -ENOMEDIUM; | |
1159 | } | |
1160 | ||
ac850bf0 VSO |
1161 | if (drv->bdrv_co_pwritev_part) { |
1162 | ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, | |
1163 | flags & bs->supported_write_flags); | |
1164 | flags &= ~bs->supported_write_flags; | |
1165 | goto emulate_flags; | |
1166 | } | |
1167 | ||
1168 | if (qiov_offset > 0 || bytes != qiov->size) { | |
1169 | qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); | |
1170 | qiov = &local_qiov; | |
1171 | } | |
1172 | ||
3fb06697 | 1173 | if (drv->bdrv_co_pwritev) { |
515c2f43 KW |
1174 | ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, |
1175 | flags & bs->supported_write_flags); | |
1176 | flags &= ~bs->supported_write_flags; | |
3fb06697 KW |
1177 | goto emulate_flags; |
1178 | } | |
1179 | ||
edfab6a0 | 1180 | if (drv->bdrv_aio_pwritev) { |
08844473 KW |
1181 | BlockAIOCB *acb; |
1182 | CoroutineIOCompletion co = { | |
1183 | .coroutine = qemu_coroutine_self(), | |
1184 | }; | |
1185 | ||
edfab6a0 EB |
1186 | acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, |
1187 | flags & bs->supported_write_flags, | |
1188 | bdrv_co_io_em_complete, &co); | |
1189 | flags &= ~bs->supported_write_flags; | |
08844473 | 1190 | if (acb == NULL) { |
3fb06697 | 1191 | ret = -EIO; |
08844473 KW |
1192 | } else { |
1193 | qemu_coroutine_yield(); | |
3fb06697 | 1194 | ret = co.ret; |
08844473 | 1195 | } |
edfab6a0 EB |
1196 | goto emulate_flags; |
1197 | } | |
1198 | ||
1199 | sector_num = offset >> BDRV_SECTOR_BITS; | |
1200 | nb_sectors = bytes >> BDRV_SECTOR_BITS; | |
1201 | ||
1bbbf32d NS |
1202 | assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); |
1203 | assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); | |
41ae31e3 | 1204 | assert(bytes <= BDRV_REQUEST_MAX_BYTES); |
edfab6a0 | 1205 | |
e18a58b4 EB |
1206 | assert(drv->bdrv_co_writev); |
1207 | ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, | |
1208 | flags & bs->supported_write_flags); | |
1209 | flags &= ~bs->supported_write_flags; | |
78a07294 | 1210 | |
3fb06697 | 1211 | emulate_flags: |
4df863f3 | 1212 | if (ret == 0 && (flags & BDRV_REQ_FUA)) { |
78a07294 KW |
1213 | ret = bdrv_co_flush(bs); |
1214 | } | |
1215 | ||
ac850bf0 VSO |
1216 | if (qiov == &local_qiov) { |
1217 | qemu_iovec_destroy(&local_qiov); | |
1218 | } | |
1219 | ||
78a07294 KW |
1220 | return ret; |
1221 | } | |
1222 | ||
29a298af PB |
1223 | static int coroutine_fn |
1224 | bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, | |
ac850bf0 VSO |
1225 | uint64_t bytes, QEMUIOVector *qiov, |
1226 | size_t qiov_offset) | |
29a298af PB |
1227 | { |
1228 | BlockDriver *drv = bs->drv; | |
ac850bf0 VSO |
1229 | QEMUIOVector local_qiov; |
1230 | int ret; | |
29a298af | 1231 | |
d470ad42 HR |
1232 | if (!drv) { |
1233 | return -ENOMEDIUM; | |
1234 | } | |
1235 | ||
ac850bf0 | 1236 | if (!block_driver_can_compress(drv)) { |
29a298af PB |
1237 | return -ENOTSUP; |
1238 | } | |
1239 | ||
ac850bf0 VSO |
1240 | if (drv->bdrv_co_pwritev_compressed_part) { |
1241 | return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes, | |
1242 | qiov, qiov_offset); | |
1243 | } | |
1244 | ||
1245 | if (qiov_offset == 0) { | |
1246 | return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov); | |
1247 | } | |
1248 | ||
1249 | qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes); | |
1250 | ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov); | |
1251 | qemu_iovec_destroy(&local_qiov); | |
1252 | ||
1253 | return ret; | |
29a298af PB |
1254 | } |
1255 | ||
85c97ca7 | 1256 | static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, |
3299e5ec | 1257 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, |
1143ec5e | 1258 | size_t qiov_offset, int flags) |
61007b31 | 1259 | { |
85c97ca7 KW |
1260 | BlockDriverState *bs = child->bs; |
1261 | ||
61007b31 SH |
1262 | /* Perform I/O through a temporary buffer so that users who scribble over |
1263 | * their read buffer while the operation is in progress do not end up | |
1264 | * modifying the image file. This is critical for zero-copy guest I/O | |
1265 | * where anything might happen inside guest memory. | |
1266 | */ | |
2275cc90 | 1267 | void *bounce_buffer = NULL; |
61007b31 SH |
1268 | |
1269 | BlockDriver *drv = bs->drv; | |
244483e6 | 1270 | int64_t cluster_offset; |
7cfd5275 | 1271 | int64_t cluster_bytes; |
61007b31 SH |
1272 | size_t skip_bytes; |
1273 | int ret; | |
cb2e2878 EB |
1274 | int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, |
1275 | BDRV_REQUEST_MAX_BYTES); | |
1276 | unsigned int progress = 0; | |
8644476e | 1277 | bool skip_write; |
61007b31 | 1278 | |
d470ad42 HR |
1279 | if (!drv) { |
1280 | return -ENOMEDIUM; | |
1281 | } | |
1282 | ||
8644476e HR |
1283 | /* |
1284 | * Do not write anything when the BDS is inactive. That is not | |
1285 | * allowed, and it would not help. | |
1286 | */ | |
1287 | skip_write = (bs->open_flags & BDRV_O_INACTIVE); | |
1288 | ||
1bf03e66 KW |
1289 | /* FIXME We cannot require callers to have write permissions when all they |
1290 | * are doing is a read request. If we did things right, write permissions | |
1291 | * would be obtained anyway, but internally by the copy-on-read code. As | |
765d9df9 | 1292 | * long as it is implemented here rather than in a separate filter driver, |
1bf03e66 KW |
1293 | * the copy-on-read code doesn't have its own BdrvChild, however, for which |
1294 | * it could request permissions. Therefore we have to bypass the permission | |
1295 | * system for the moment. */ | |
1296 | // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); | |
afa4b293 | 1297 | |
61007b31 | 1298 | /* Cover entire cluster so no additional backing file I/O is required when |
cb2e2878 EB |
1299 | * allocating cluster in the image file. Note that this value may exceed |
1300 | * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which | |
1301 | * is one reason we loop rather than doing it all at once. | |
61007b31 | 1302 | */ |
244483e6 | 1303 | bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); |
cb2e2878 | 1304 | skip_bytes = offset - cluster_offset; |
61007b31 | 1305 | |
244483e6 KW |
1306 | trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, |
1307 | cluster_offset, cluster_bytes); | |
61007b31 | 1308 | |
cb2e2878 EB |
1309 | while (cluster_bytes) { |
1310 | int64_t pnum; | |
61007b31 | 1311 | |
8644476e HR |
1312 | if (skip_write) { |
1313 | ret = 1; /* "already allocated", so nothing will be copied */ | |
cb2e2878 | 1314 | pnum = MIN(cluster_bytes, max_transfer); |
8644476e HR |
1315 | } else { |
1316 | ret = bdrv_is_allocated(bs, cluster_offset, | |
1317 | MIN(cluster_bytes, max_transfer), &pnum); | |
1318 | if (ret < 0) { | |
1319 | /* | |
1320 | * Safe to treat errors in querying allocation as if | |
1321 | * unallocated; we'll probably fail again soon on the | |
1322 | * read, but at least that will set a decent errno. | |
1323 | */ | |
1324 | pnum = MIN(cluster_bytes, max_transfer); | |
1325 | } | |
61007b31 | 1326 | |
8644476e HR |
1327 | /* Stop at EOF if the image ends in the middle of the cluster */ |
1328 | if (ret == 0 && pnum == 0) { | |
1329 | assert(progress >= bytes); | |
1330 | break; | |
1331 | } | |
b0ddcbbb | 1332 | |
8644476e HR |
1333 | assert(skip_bytes < pnum); |
1334 | } | |
61007b31 | 1335 | |
cb2e2878 | 1336 | if (ret <= 0) { |
1143ec5e VSO |
1337 | QEMUIOVector local_qiov; |
1338 | ||
cb2e2878 | 1339 | /* Must copy-on-read; use the bounce buffer */ |
0d93ed08 | 1340 | pnum = MIN(pnum, MAX_BOUNCE_BUFFER); |
2275cc90 VSO |
1341 | if (!bounce_buffer) { |
1342 | int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); | |
1343 | int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); | |
1344 | int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); | |
1345 | ||
1346 | bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len); | |
1347 | if (!bounce_buffer) { | |
1348 | ret = -ENOMEM; | |
1349 | goto err; | |
1350 | } | |
1351 | } | |
0d93ed08 | 1352 | qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); |
61007b31 | 1353 | |
cb2e2878 | 1354 | ret = bdrv_driver_preadv(bs, cluster_offset, pnum, |
ac850bf0 | 1355 | &local_qiov, 0, 0); |
cb2e2878 EB |
1356 | if (ret < 0) { |
1357 | goto err; | |
1358 | } | |
1359 | ||
1360 | bdrv_debug_event(bs, BLKDBG_COR_WRITE); | |
1361 | if (drv->bdrv_co_pwrite_zeroes && | |
1362 | buffer_is_zero(bounce_buffer, pnum)) { | |
1363 | /* FIXME: Should we (perhaps conditionally) be setting | |
1364 | * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy | |
1365 | * that still correctly reads as zero? */ | |
7adcf59f HR |
1366 | ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, |
1367 | BDRV_REQ_WRITE_UNCHANGED); | |
cb2e2878 EB |
1368 | } else { |
1369 | /* This does not change the data on the disk, it is not | |
1370 | * necessary to flush even in cache=writethrough mode. | |
1371 | */ | |
1372 | ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, | |
ac850bf0 | 1373 | &local_qiov, 0, |
7adcf59f | 1374 | BDRV_REQ_WRITE_UNCHANGED); |
cb2e2878 EB |
1375 | } |
1376 | ||
1377 | if (ret < 0) { | |
1378 | /* It might be okay to ignore write errors for guest | |
1379 | * requests. If this is a deliberate copy-on-read | |
1380 | * then we don't want to ignore the error. Simply | |
1381 | * report it in all cases. | |
1382 | */ | |
1383 | goto err; | |
1384 | } | |
1385 | ||
3299e5ec | 1386 | if (!(flags & BDRV_REQ_PREFETCH)) { |
1143ec5e VSO |
1387 | qemu_iovec_from_buf(qiov, qiov_offset + progress, |
1388 | bounce_buffer + skip_bytes, | |
4ab78b19 | 1389 | MIN(pnum - skip_bytes, bytes - progress)); |
3299e5ec VSO |
1390 | } |
1391 | } else if (!(flags & BDRV_REQ_PREFETCH)) { | |
cb2e2878 | 1392 | /* Read directly into the destination */ |
1143ec5e VSO |
1393 | ret = bdrv_driver_preadv(bs, offset + progress, |
1394 | MIN(pnum - skip_bytes, bytes - progress), | |
1395 | qiov, qiov_offset + progress, 0); | |
cb2e2878 EB |
1396 | if (ret < 0) { |
1397 | goto err; | |
1398 | } | |
1399 | } | |
1400 | ||
1401 | cluster_offset += pnum; | |
1402 | cluster_bytes -= pnum; | |
1403 | progress += pnum - skip_bytes; | |
1404 | skip_bytes = 0; | |
1405 | } | |
1406 | ret = 0; | |
61007b31 SH |
1407 | |
1408 | err: | |
1409 | qemu_vfree(bounce_buffer); | |
1410 | return ret; | |
1411 | } | |
1412 | ||
1413 | /* | |
1414 | * Forwards an already correctly aligned request to the BlockDriver. This | |
1a62d0ac EB |
1415 | * handles copy on read, zeroing after EOF, and fragmentation of large |
1416 | * reads; any other features must be implemented by the caller. | |
61007b31 | 1417 | */ |
85c97ca7 | 1418 | static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, |
61007b31 | 1419 | BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, |
65cd4424 | 1420 | int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) |
61007b31 | 1421 | { |
85c97ca7 | 1422 | BlockDriverState *bs = child->bs; |
c9d20029 | 1423 | int64_t total_bytes, max_bytes; |
1a62d0ac EB |
1424 | int ret = 0; |
1425 | uint64_t bytes_remaining = bytes; | |
1426 | int max_transfer; | |
61007b31 | 1427 | |
49c07526 KW |
1428 | assert(is_power_of_2(align)); |
1429 | assert((offset & (align - 1)) == 0); | |
1430 | assert((bytes & (align - 1)) == 0); | |
abb06c5a | 1431 | assert((bs->open_flags & BDRV_O_NO_IO) == 0); |
1a62d0ac EB |
1432 | max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), |
1433 | align); | |
a604fa2b EB |
1434 | |
1435 | /* TODO: We would need a per-BDS .supported_read_flags and | |
1436 | * potential fallback support, if we ever implement any read flags | |
1437 | * to pass through to drivers. For now, there aren't any | |
1438 | * passthrough flags. */ | |
c53cb427 | 1439 | assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); |
61007b31 SH |
1440 | |
1441 | /* Handle Copy on Read and associated serialisation */ | |
1442 | if (flags & BDRV_REQ_COPY_ON_READ) { | |
1443 | /* If we touch the same cluster it counts as an overlap. This | |
1444 | * guarantees that allocating writes will be serialized and not race | |
1445 | * with each other for the same cluster. For example, in copy-on-read | |
1446 | * it ensures that the CoR read and write operations are atomic and | |
1447 | * guest writes cannot interleave between them. */ | |
8ac5aab2 | 1448 | bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); |
18fbd0de PB |
1449 | } else { |
1450 | bdrv_wait_serialising_requests(req); | |
61007b31 SH |
1451 | } |
1452 | ||
61007b31 | 1453 | if (flags & BDRV_REQ_COPY_ON_READ) { |
d6a644bb | 1454 | int64_t pnum; |
61007b31 | 1455 | |
88e63df2 | 1456 | ret = bdrv_is_allocated(bs, offset, bytes, &pnum); |
61007b31 SH |
1457 | if (ret < 0) { |
1458 | goto out; | |
1459 | } | |
1460 | ||
88e63df2 | 1461 | if (!ret || pnum != bytes) { |
65cd4424 VSO |
1462 | ret = bdrv_co_do_copy_on_readv(child, offset, bytes, |
1463 | qiov, qiov_offset, flags); | |
3299e5ec VSO |
1464 | goto out; |
1465 | } else if (flags & BDRV_REQ_PREFETCH) { | |
61007b31 SH |
1466 | goto out; |
1467 | } | |
1468 | } | |
1469 | ||
1a62d0ac | 1470 | /* Forward the request to the BlockDriver, possibly fragmenting it */ |
c9d20029 KW |
1471 | total_bytes = bdrv_getlength(bs); |
1472 | if (total_bytes < 0) { | |
1473 | ret = total_bytes; | |
1474 | goto out; | |
1475 | } | |
61007b31 | 1476 | |
c9d20029 | 1477 | max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align); |
1a62d0ac | 1478 | if (bytes <= max_bytes && bytes <= max_transfer) { |
65cd4424 | 1479 | ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0); |
1a62d0ac EB |
1480 | goto out; |
1481 | } | |
61007b31 | 1482 | |
1a62d0ac EB |
1483 | while (bytes_remaining) { |
1484 | int num; | |
61007b31 | 1485 | |
1a62d0ac | 1486 | if (max_bytes) { |
1a62d0ac EB |
1487 | num = MIN(bytes_remaining, MIN(max_bytes, max_transfer)); |
1488 | assert(num); | |
61007b31 | 1489 | |
1a62d0ac | 1490 | ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining, |
134b7dec HR |
1491 | num, qiov, |
1492 | qiov_offset + bytes - bytes_remaining, 0); | |
1a62d0ac | 1493 | max_bytes -= num; |
1a62d0ac EB |
1494 | } else { |
1495 | num = bytes_remaining; | |
134b7dec HR |
1496 | ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining, |
1497 | 0, bytes_remaining); | |
1a62d0ac EB |
1498 | } |
1499 | if (ret < 0) { | |
1500 | goto out; | |
1501 | } | |
1502 | bytes_remaining -= num; | |
61007b31 SH |
1503 | } |
1504 | ||
1505 | out: | |
1a62d0ac | 1506 | return ret < 0 ? ret : 0; |
61007b31 SH |
1507 | } |
1508 | ||
61007b31 | 1509 | /* |
7a3f542f VSO |
1510 | * Request padding |
1511 | * | |
1512 | * |<---- align ----->| |<----- align ---->| | |
1513 | * |<- head ->|<------------- bytes ------------->|<-- tail -->| | |
1514 | * | | | | | | | |
1515 | * -*----------$-------*-------- ... --------*-----$------------*--- | |
1516 | * | | | | | | | |
1517 | * | offset | | end | | |
1518 | * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end) | |
1519 | * [buf ... ) [tail_buf ) | |
1520 | * | |
1521 | * @buf is an aligned allocation needed to store @head and @tail paddings. @head | |
1522 | * is placed at the beginning of @buf and @tail at the @end. | |
1523 | * | |
1524 | * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk | |
1525 | * around tail, if tail exists. | |
1526 | * | |
1527 | * @merge_reads is true for small requests, | |
1528 | * if @buf_len == @head + bytes + @tail. In this case it is possible that both | |
1529 | * head and tail exist but @buf_len == align and @tail_buf == @buf. | |
1530 | */ | |
1531 | typedef struct BdrvRequestPadding { | |
1532 | uint8_t *buf; | |
1533 | size_t buf_len; | |
1534 | uint8_t *tail_buf; | |
1535 | size_t head; | |
1536 | size_t tail; | |
1537 | bool merge_reads; | |
1538 | QEMUIOVector local_qiov; | |
1539 | } BdrvRequestPadding; | |
1540 | ||
1541 | static bool bdrv_init_padding(BlockDriverState *bs, | |
1542 | int64_t offset, int64_t bytes, | |
1543 | BdrvRequestPadding *pad) | |
1544 | { | |
1545 | uint64_t align = bs->bl.request_alignment; | |
1546 | size_t sum; | |
1547 | ||
1548 | memset(pad, 0, sizeof(*pad)); | |
1549 | ||
1550 | pad->head = offset & (align - 1); | |
1551 | pad->tail = ((offset + bytes) & (align - 1)); | |
1552 | if (pad->tail) { | |
1553 | pad->tail = align - pad->tail; | |
1554 | } | |
1555 | ||
ac9d00bf | 1556 | if (!pad->head && !pad->tail) { |
7a3f542f VSO |
1557 | return false; |
1558 | } | |
1559 | ||
ac9d00bf VSO |
1560 | assert(bytes); /* Nothing good in aligning zero-length requests */ |
1561 | ||
7a3f542f VSO |
1562 | sum = pad->head + bytes + pad->tail; |
1563 | pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align; | |
1564 | pad->buf = qemu_blockalign(bs, pad->buf_len); | |
1565 | pad->merge_reads = sum == pad->buf_len; | |
1566 | if (pad->tail) { | |
1567 | pad->tail_buf = pad->buf + pad->buf_len - align; | |
1568 | } | |
1569 | ||
1570 | return true; | |
1571 | } | |
1572 | ||
1573 | static int bdrv_padding_rmw_read(BdrvChild *child, | |
1574 | BdrvTrackedRequest *req, | |
1575 | BdrvRequestPadding *pad, | |
1576 | bool zero_middle) | |
1577 | { | |
1578 | QEMUIOVector local_qiov; | |
1579 | BlockDriverState *bs = child->bs; | |
1580 | uint64_t align = bs->bl.request_alignment; | |
1581 | int ret; | |
1582 | ||
1583 | assert(req->serialising && pad->buf); | |
1584 | ||
1585 | if (pad->head || pad->merge_reads) { | |
1586 | uint64_t bytes = pad->merge_reads ? pad->buf_len : align; | |
1587 | ||
1588 | qemu_iovec_init_buf(&local_qiov, pad->buf, bytes); | |
1589 | ||
1590 | if (pad->head) { | |
1591 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); | |
1592 | } | |
1593 | if (pad->merge_reads && pad->tail) { | |
1594 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); | |
1595 | } | |
1596 | ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes, | |
65cd4424 | 1597 | align, &local_qiov, 0, 0); |
7a3f542f VSO |
1598 | if (ret < 0) { |
1599 | return ret; | |
1600 | } | |
1601 | if (pad->head) { | |
1602 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); | |
1603 | } | |
1604 | if (pad->merge_reads && pad->tail) { | |
1605 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); | |
1606 | } | |
1607 | ||
1608 | if (pad->merge_reads) { | |
1609 | goto zero_mem; | |
1610 | } | |
1611 | } | |
1612 | ||
1613 | if (pad->tail) { | |
1614 | qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align); | |
1615 | ||
1616 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); | |
1617 | ret = bdrv_aligned_preadv( | |
1618 | child, req, | |
1619 | req->overlap_offset + req->overlap_bytes - align, | |
65cd4424 | 1620 | align, align, &local_qiov, 0, 0); |
7a3f542f VSO |
1621 | if (ret < 0) { |
1622 | return ret; | |
1623 | } | |
1624 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); | |
1625 | } | |
1626 | ||
1627 | zero_mem: | |
1628 | if (zero_middle) { | |
1629 | memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail); | |
1630 | } | |
1631 | ||
1632 | return 0; | |
1633 | } | |
1634 | ||
1635 | static void bdrv_padding_destroy(BdrvRequestPadding *pad) | |
1636 | { | |
1637 | if (pad->buf) { | |
1638 | qemu_vfree(pad->buf); | |
1639 | qemu_iovec_destroy(&pad->local_qiov); | |
1640 | } | |
1641 | } | |
1642 | ||
1643 | /* | |
1644 | * bdrv_pad_request | |
1645 | * | |
1646 | * Exchange request parameters with padded request if needed. Don't include RMW | |
1647 | * read of padding, bdrv_padding_rmw_read() should be called separately if | |
1648 | * needed. | |
1649 | * | |
1650 | * All parameters except @bs are in-out: they represent original request at | |
1651 | * function call and padded (if padding needed) at function finish. | |
1652 | * | |
1653 | * Function always succeeds. | |
61007b31 | 1654 | */ |
1acc3466 VSO |
1655 | static bool bdrv_pad_request(BlockDriverState *bs, |
1656 | QEMUIOVector **qiov, size_t *qiov_offset, | |
7a3f542f VSO |
1657 | int64_t *offset, unsigned int *bytes, |
1658 | BdrvRequestPadding *pad) | |
1659 | { | |
1660 | if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { | |
1661 | return false; | |
1662 | } | |
1663 | ||
1664 | qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, | |
1acc3466 | 1665 | *qiov, *qiov_offset, *bytes, |
7a3f542f VSO |
1666 | pad->buf + pad->buf_len - pad->tail, pad->tail); |
1667 | *bytes += pad->head + pad->tail; | |
1668 | *offset -= pad->head; | |
1669 | *qiov = &pad->local_qiov; | |
1acc3466 | 1670 | *qiov_offset = 0; |
7a3f542f VSO |
1671 | |
1672 | return true; | |
1673 | } | |
1674 | ||
a03ef88f | 1675 | int coroutine_fn bdrv_co_preadv(BdrvChild *child, |
61007b31 SH |
1676 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, |
1677 | BdrvRequestFlags flags) | |
1acc3466 VSO |
1678 | { |
1679 | return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags); | |
1680 | } | |
1681 | ||
1682 | int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, | |
1683 | int64_t offset, unsigned int bytes, | |
1684 | QEMUIOVector *qiov, size_t qiov_offset, | |
1685 | BdrvRequestFlags flags) | |
61007b31 | 1686 | { |
a03ef88f | 1687 | BlockDriverState *bs = child->bs; |
61007b31 | 1688 | BdrvTrackedRequest req; |
7a3f542f | 1689 | BdrvRequestPadding pad; |
61007b31 SH |
1690 | int ret; |
1691 | ||
7a3f542f | 1692 | trace_bdrv_co_preadv(bs, offset, bytes, flags); |
61007b31 | 1693 | |
f4dad307 VSO |
1694 | if (!bdrv_is_inserted(bs)) { |
1695 | return -ENOMEDIUM; | |
1696 | } | |
1697 | ||
8b117001 | 1698 | ret = bdrv_check_request32(offset, bytes); |
61007b31 SH |
1699 | if (ret < 0) { |
1700 | return ret; | |
1701 | } | |
1702 | ||
ac9d00bf VSO |
1703 | if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { |
1704 | /* | |
1705 | * Aligning zero request is nonsense. Even if driver has special meaning | |
1706 | * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass | |
1707 | * it to driver due to request_alignment. | |
1708 | * | |
1709 | * Still, no reason to return an error if someone do unaligned | |
1710 | * zero-length read occasionally. | |
1711 | */ | |
1712 | return 0; | |
1713 | } | |
1714 | ||
99723548 PB |
1715 | bdrv_inc_in_flight(bs); |
1716 | ||
9568b511 | 1717 | /* Don't do copy-on-read if we read data before write operation */ |
d73415a3 | 1718 | if (qatomic_read(&bs->copy_on_read)) { |
61007b31 SH |
1719 | flags |= BDRV_REQ_COPY_ON_READ; |
1720 | } | |
1721 | ||
1acc3466 | 1722 | bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad); |
61007b31 | 1723 | |
ebde595c | 1724 | tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ); |
7a3f542f VSO |
1725 | ret = bdrv_aligned_preadv(child, &req, offset, bytes, |
1726 | bs->bl.request_alignment, | |
1acc3466 | 1727 | qiov, qiov_offset, flags); |
61007b31 | 1728 | tracked_request_end(&req); |
99723548 | 1729 | bdrv_dec_in_flight(bs); |
61007b31 | 1730 | |
7a3f542f | 1731 | bdrv_padding_destroy(&pad); |
61007b31 SH |
1732 | |
1733 | return ret; | |
1734 | } | |
1735 | ||
d05aa8bb | 1736 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, |
f5a5ca79 | 1737 | int64_t offset, int bytes, BdrvRequestFlags flags) |
61007b31 SH |
1738 | { |
1739 | BlockDriver *drv = bs->drv; | |
1740 | QEMUIOVector qiov; | |
0d93ed08 | 1741 | void *buf = NULL; |
61007b31 | 1742 | int ret = 0; |
465fe887 | 1743 | bool need_flush = false; |
443668ca DL |
1744 | int head = 0; |
1745 | int tail = 0; | |
61007b31 | 1746 | |
cf081fca | 1747 | int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); |
a5b8dd2c EB |
1748 | int alignment = MAX(bs->bl.pwrite_zeroes_alignment, |
1749 | bs->bl.request_alignment); | |
cb2e2878 | 1750 | int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); |
d05aa8bb | 1751 | |
d470ad42 HR |
1752 | if (!drv) { |
1753 | return -ENOMEDIUM; | |
1754 | } | |
1755 | ||
fe0480d6 KW |
1756 | if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) { |
1757 | return -ENOTSUP; | |
1758 | } | |
1759 | ||
b8d0a980 EB |
1760 | assert(alignment % bs->bl.request_alignment == 0); |
1761 | head = offset % alignment; | |
f5a5ca79 | 1762 | tail = (offset + bytes) % alignment; |
b8d0a980 EB |
1763 | max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); |
1764 | assert(max_write_zeroes >= bs->bl.request_alignment); | |
61007b31 | 1765 | |
f5a5ca79 MP |
1766 | while (bytes > 0 && !ret) { |
1767 | int num = bytes; | |
61007b31 SH |
1768 | |
1769 | /* Align request. Block drivers can expect the "bulk" of the request | |
443668ca DL |
1770 | * to be aligned, and that unaligned requests do not cross cluster |
1771 | * boundaries. | |
61007b31 | 1772 | */ |
443668ca | 1773 | if (head) { |
b2f95fee EB |
1774 | /* Make a small request up to the first aligned sector. For |
1775 | * convenience, limit this request to max_transfer even if | |
1776 | * we don't need to fall back to writes. */ | |
f5a5ca79 | 1777 | num = MIN(MIN(bytes, max_transfer), alignment - head); |
b2f95fee EB |
1778 | head = (head + num) % alignment; |
1779 | assert(num < max_write_zeroes); | |
d05aa8bb | 1780 | } else if (tail && num > alignment) { |
443668ca DL |
1781 | /* Shorten the request to the last aligned sector. */ |
1782 | num -= tail; | |
61007b31 SH |
1783 | } |
1784 | ||
1785 | /* limit request size */ | |
1786 | if (num > max_write_zeroes) { | |
1787 | num = max_write_zeroes; | |
1788 | } | |
1789 | ||
1790 | ret = -ENOTSUP; | |
1791 | /* First try the efficient write zeroes operation */ | |
d05aa8bb EB |
1792 | if (drv->bdrv_co_pwrite_zeroes) { |
1793 | ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, | |
1794 | flags & bs->supported_zero_flags); | |
1795 | if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && | |
1796 | !(bs->supported_zero_flags & BDRV_REQ_FUA)) { | |
1797 | need_flush = true; | |
1798 | } | |
465fe887 EB |
1799 | } else { |
1800 | assert(!bs->supported_zero_flags); | |
61007b31 SH |
1801 | } |
1802 | ||
294682cc | 1803 | if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) { |
61007b31 | 1804 | /* Fall back to bounce buffer if write zeroes is unsupported */ |
465fe887 EB |
1805 | BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; |
1806 | ||
1807 | if ((flags & BDRV_REQ_FUA) && | |
1808 | !(bs->supported_write_flags & BDRV_REQ_FUA)) { | |
1809 | /* No need for bdrv_driver_pwrite() to do a fallback | |
1810 | * flush on each chunk; use just one at the end */ | |
1811 | write_flags &= ~BDRV_REQ_FUA; | |
1812 | need_flush = true; | |
1813 | } | |
5def6b80 | 1814 | num = MIN(num, max_transfer); |
0d93ed08 VSO |
1815 | if (buf == NULL) { |
1816 | buf = qemu_try_blockalign0(bs, num); | |
1817 | if (buf == NULL) { | |
61007b31 SH |
1818 | ret = -ENOMEM; |
1819 | goto fail; | |
1820 | } | |
61007b31 | 1821 | } |
0d93ed08 | 1822 | qemu_iovec_init_buf(&qiov, buf, num); |
61007b31 | 1823 | |
ac850bf0 | 1824 | ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags); |
61007b31 SH |
1825 | |
1826 | /* Keep bounce buffer around if it is big enough for all | |
1827 | * all future requests. | |
1828 | */ | |
5def6b80 | 1829 | if (num < max_transfer) { |
0d93ed08 VSO |
1830 | qemu_vfree(buf); |
1831 | buf = NULL; | |
61007b31 SH |
1832 | } |
1833 | } | |
1834 | ||
d05aa8bb | 1835 | offset += num; |
f5a5ca79 | 1836 | bytes -= num; |
61007b31 SH |
1837 | } |
1838 | ||
1839 | fail: | |
465fe887 EB |
1840 | if (ret == 0 && need_flush) { |
1841 | ret = bdrv_co_flush(bs); | |
1842 | } | |
0d93ed08 | 1843 | qemu_vfree(buf); |
61007b31 SH |
1844 | return ret; |
1845 | } | |
1846 | ||
85fe2479 FZ |
1847 | static inline int coroutine_fn |
1848 | bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, | |
1849 | BdrvTrackedRequest *req, int flags) | |
1850 | { | |
1851 | BlockDriverState *bs = child->bs; | |
85fe2479 FZ |
1852 | int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); |
1853 | ||
1854 | if (bs->read_only) { | |
1855 | return -EPERM; | |
1856 | } | |
1857 | ||
85fe2479 FZ |
1858 | assert(!(bs->open_flags & BDRV_O_INACTIVE)); |
1859 | assert((bs->open_flags & BDRV_O_NO_IO) == 0); | |
1860 | assert(!(flags & ~BDRV_REQ_MASK)); | |
d1a764d1 | 1861 | assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING))); |
85fe2479 FZ |
1862 | |
1863 | if (flags & BDRV_REQ_SERIALISING) { | |
d1a764d1 VSO |
1864 | QEMU_LOCK_GUARD(&bs->reqs_lock); |
1865 | ||
1866 | tracked_request_set_serialising(req, bdrv_get_cluster_size(bs)); | |
1867 | ||
1868 | if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) { | |
1869 | return -EBUSY; | |
1870 | } | |
1871 | ||
1872 | bdrv_wait_serialising_requests_locked(req); | |
18fbd0de PB |
1873 | } else { |
1874 | bdrv_wait_serialising_requests(req); | |
85fe2479 FZ |
1875 | } |
1876 | ||
85fe2479 FZ |
1877 | assert(req->overlap_offset <= offset); |
1878 | assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); | |
cd47d792 | 1879 | assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); |
85fe2479 | 1880 | |
cd47d792 FZ |
1881 | switch (req->type) { |
1882 | case BDRV_TRACKED_WRITE: | |
1883 | case BDRV_TRACKED_DISCARD: | |
1884 | if (flags & BDRV_REQ_WRITE_UNCHANGED) { | |
1885 | assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE)); | |
1886 | } else { | |
1887 | assert(child->perm & BLK_PERM_WRITE); | |
1888 | } | |
1889 | return notifier_with_return_list_notify(&bs->before_write_notifiers, | |
1890 | req); | |
1891 | case BDRV_TRACKED_TRUNCATE: | |
1892 | assert(child->perm & BLK_PERM_RESIZE); | |
1893 | return 0; | |
1894 | default: | |
1895 | abort(); | |
85fe2479 | 1896 | } |
85fe2479 FZ |
1897 | } |
1898 | ||
1899 | static inline void coroutine_fn | |
1900 | bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, | |
1901 | BdrvTrackedRequest *req, int ret) | |
1902 | { | |
1903 | int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); | |
1904 | BlockDriverState *bs = child->bs; | |
1905 | ||
d73415a3 | 1906 | qatomic_inc(&bs->write_gen); |
85fe2479 | 1907 | |
00695c27 FZ |
1908 | /* |
1909 | * Discard cannot extend the image, but in error handling cases, such as | |
1910 | * when reverting a qcow2 cluster allocation, the discarded range can pass | |
1911 | * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD | |
1912 | * here. Instead, just skip it, since semantically a discard request | |
1913 | * beyond EOF cannot expand the image anyway. | |
1914 | */ | |
7f8f03ef | 1915 | if (ret == 0 && |
cd47d792 FZ |
1916 | (req->type == BDRV_TRACKED_TRUNCATE || |
1917 | end_sector > bs->total_sectors) && | |
1918 | req->type != BDRV_TRACKED_DISCARD) { | |
7f8f03ef FZ |
1919 | bs->total_sectors = end_sector; |
1920 | bdrv_parent_cb_resize(bs); | |
1921 | bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS); | |
85fe2479 | 1922 | } |
00695c27 FZ |
1923 | if (req->bytes) { |
1924 | switch (req->type) { | |
1925 | case BDRV_TRACKED_WRITE: | |
1926 | stat64_max(&bs->wr_highest_offset, offset + bytes); | |
1927 | /* fall through, to set dirty bits */ | |
1928 | case BDRV_TRACKED_DISCARD: | |
1929 | bdrv_set_dirty(bs, offset, bytes); | |
1930 | break; | |
1931 | default: | |
1932 | break; | |
1933 | } | |
1934 | } | |
85fe2479 FZ |
1935 | } |
1936 | ||
61007b31 | 1937 | /* |
04ed95f4 EB |
1938 | * Forwards an already correctly aligned write request to the BlockDriver, |
1939 | * after possibly fragmenting it. | |
61007b31 | 1940 | */ |
85c97ca7 | 1941 | static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, |
61007b31 | 1942 | BdrvTrackedRequest *req, int64_t offset, unsigned int bytes, |
28c4da28 | 1943 | int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) |
61007b31 | 1944 | { |
85c97ca7 | 1945 | BlockDriverState *bs = child->bs; |
61007b31 | 1946 | BlockDriver *drv = bs->drv; |
61007b31 SH |
1947 | int ret; |
1948 | ||
04ed95f4 EB |
1949 | uint64_t bytes_remaining = bytes; |
1950 | int max_transfer; | |
61007b31 | 1951 | |
d470ad42 HR |
1952 | if (!drv) { |
1953 | return -ENOMEDIUM; | |
1954 | } | |
1955 | ||
d6883bc9 VSO |
1956 | if (bdrv_has_readonly_bitmaps(bs)) { |
1957 | return -EPERM; | |
1958 | } | |
1959 | ||
cff86b38 EB |
1960 | assert(is_power_of_2(align)); |
1961 | assert((offset & (align - 1)) == 0); | |
1962 | assert((bytes & (align - 1)) == 0); | |
28c4da28 | 1963 | assert(!qiov || qiov_offset + bytes <= qiov->size); |
04ed95f4 EB |
1964 | max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX), |
1965 | align); | |
61007b31 | 1966 | |
85fe2479 | 1967 | ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags); |
61007b31 SH |
1968 | |
1969 | if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF && | |
c1499a5e | 1970 | !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes && |
28c4da28 | 1971 | qemu_iovec_is_zero(qiov, qiov_offset, bytes)) { |
61007b31 SH |
1972 | flags |= BDRV_REQ_ZERO_WRITE; |
1973 | if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) { | |
1974 | flags |= BDRV_REQ_MAY_UNMAP; | |
1975 | } | |
1976 | } | |
1977 | ||
1978 | if (ret < 0) { | |
1979 | /* Do nothing, write notifier decided to fail this request */ | |
1980 | } else if (flags & BDRV_REQ_ZERO_WRITE) { | |
9a4f4c31 | 1981 | bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO); |
9896c876 | 1982 | ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags); |
3ea1a091 | 1983 | } else if (flags & BDRV_REQ_WRITE_COMPRESSED) { |
28c4da28 VSO |
1984 | ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, |
1985 | qiov, qiov_offset); | |
04ed95f4 | 1986 | } else if (bytes <= max_transfer) { |
9a4f4c31 | 1987 | bdrv_debug_event(bs, BLKDBG_PWRITEV); |
28c4da28 | 1988 | ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags); |
04ed95f4 EB |
1989 | } else { |
1990 | bdrv_debug_event(bs, BLKDBG_PWRITEV); | |
1991 | while (bytes_remaining) { | |
1992 | int num = MIN(bytes_remaining, max_transfer); | |
04ed95f4 EB |
1993 | int local_flags = flags; |
1994 | ||
1995 | assert(num); | |
1996 | if (num < bytes_remaining && (flags & BDRV_REQ_FUA) && | |
1997 | !(bs->supported_write_flags & BDRV_REQ_FUA)) { | |
1998 | /* If FUA is going to be emulated by flush, we only | |
1999 | * need to flush on the last iteration */ | |
2000 | local_flags &= ~BDRV_REQ_FUA; | |
2001 | } | |
04ed95f4 EB |
2002 | |
2003 | ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining, | |
134b7dec HR |
2004 | num, qiov, |
2005 | qiov_offset + bytes - bytes_remaining, | |
28c4da28 | 2006 | local_flags); |
04ed95f4 EB |
2007 | if (ret < 0) { |
2008 | break; | |
2009 | } | |
2010 | bytes_remaining -= num; | |
2011 | } | |
61007b31 | 2012 | } |
9a4f4c31 | 2013 | bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE); |
61007b31 | 2014 | |
61007b31 | 2015 | if (ret >= 0) { |
04ed95f4 | 2016 | ret = 0; |
61007b31 | 2017 | } |
85fe2479 | 2018 | bdrv_co_write_req_finish(child, offset, bytes, req, ret); |
61007b31 SH |
2019 | |
2020 | return ret; | |
2021 | } | |
2022 | ||
85c97ca7 | 2023 | static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, |
9eeb6dd1 FZ |
2024 | int64_t offset, |
2025 | unsigned int bytes, | |
2026 | BdrvRequestFlags flags, | |
2027 | BdrvTrackedRequest *req) | |
2028 | { | |
85c97ca7 | 2029 | BlockDriverState *bs = child->bs; |
9eeb6dd1 | 2030 | QEMUIOVector local_qiov; |
a5b8dd2c | 2031 | uint64_t align = bs->bl.request_alignment; |
9eeb6dd1 | 2032 | int ret = 0; |
7a3f542f VSO |
2033 | bool padding; |
2034 | BdrvRequestPadding pad; | |
9eeb6dd1 | 2035 | |
7a3f542f VSO |
2036 | padding = bdrv_init_padding(bs, offset, bytes, &pad); |
2037 | if (padding) { | |
8ac5aab2 | 2038 | bdrv_make_request_serialising(req, align); |
9eeb6dd1 | 2039 | |
7a3f542f VSO |
2040 | bdrv_padding_rmw_read(child, req, &pad, true); |
2041 | ||
2042 | if (pad.head || pad.merge_reads) { | |
2043 | int64_t aligned_offset = offset & ~(align - 1); | |
2044 | int64_t write_bytes = pad.merge_reads ? pad.buf_len : align; | |
2045 | ||
2046 | qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes); | |
2047 | ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes, | |
28c4da28 | 2048 | align, &local_qiov, 0, |
7a3f542f VSO |
2049 | flags & ~BDRV_REQ_ZERO_WRITE); |
2050 | if (ret < 0 || pad.merge_reads) { | |
2051 | /* Error or all work is done */ | |
2052 | goto out; | |
2053 | } | |
2054 | offset += write_bytes - pad.head; | |
2055 | bytes -= write_bytes - pad.head; | |
9eeb6dd1 | 2056 | } |
9eeb6dd1 FZ |
2057 | } |
2058 | ||
2059 | assert(!bytes || (offset & (align - 1)) == 0); | |
2060 | if (bytes >= align) { | |
2061 | /* Write the aligned part in the middle. */ | |
2062 | uint64_t aligned_bytes = bytes & ~(align - 1); | |
85c97ca7 | 2063 | ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align, |
28c4da28 | 2064 | NULL, 0, flags); |
9eeb6dd1 | 2065 | if (ret < 0) { |
7a3f542f | 2066 | goto out; |
9eeb6dd1 FZ |
2067 | } |
2068 | bytes -= aligned_bytes; | |
2069 | offset += aligned_bytes; | |
2070 | } | |
2071 | ||
2072 | assert(!bytes || (offset & (align - 1)) == 0); | |
2073 | if (bytes) { | |
7a3f542f | 2074 | assert(align == pad.tail + bytes); |
9eeb6dd1 | 2075 | |
7a3f542f | 2076 | qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align); |
85c97ca7 | 2077 | ret = bdrv_aligned_pwritev(child, req, offset, align, align, |
28c4da28 VSO |
2078 | &local_qiov, 0, |
2079 | flags & ~BDRV_REQ_ZERO_WRITE); | |
9eeb6dd1 | 2080 | } |
9eeb6dd1 | 2081 | |
7a3f542f VSO |
2082 | out: |
2083 | bdrv_padding_destroy(&pad); | |
2084 | ||
2085 | return ret; | |
9eeb6dd1 FZ |
2086 | } |
2087 | ||
61007b31 SH |
2088 | /* |
2089 | * Handle a write request in coroutine context | |
2090 | */ | |
a03ef88f | 2091 | int coroutine_fn bdrv_co_pwritev(BdrvChild *child, |
61007b31 SH |
2092 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, |
2093 | BdrvRequestFlags flags) | |
1acc3466 VSO |
2094 | { |
2095 | return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags); | |
2096 | } | |
2097 | ||
2098 | int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, | |
2099 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset, | |
2100 | BdrvRequestFlags flags) | |
61007b31 | 2101 | { |
a03ef88f | 2102 | BlockDriverState *bs = child->bs; |
61007b31 | 2103 | BdrvTrackedRequest req; |
a5b8dd2c | 2104 | uint64_t align = bs->bl.request_alignment; |
7a3f542f | 2105 | BdrvRequestPadding pad; |
61007b31 SH |
2106 | int ret; |
2107 | ||
f42cf447 DB |
2108 | trace_bdrv_co_pwritev(child->bs, offset, bytes, flags); |
2109 | ||
f4dad307 | 2110 | if (!bdrv_is_inserted(bs)) { |
61007b31 SH |
2111 | return -ENOMEDIUM; |
2112 | } | |
61007b31 | 2113 | |
8b117001 | 2114 | ret = bdrv_check_request32(offset, bytes); |
61007b31 SH |
2115 | if (ret < 0) { |
2116 | return ret; | |
2117 | } | |
2118 | ||
f2208fdc AG |
2119 | /* If the request is misaligned then we can't make it efficient */ |
2120 | if ((flags & BDRV_REQ_NO_FALLBACK) && | |
2121 | !QEMU_IS_ALIGNED(offset | bytes, align)) | |
2122 | { | |
2123 | return -ENOTSUP; | |
2124 | } | |
2125 | ||
ac9d00bf VSO |
2126 | if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) { |
2127 | /* | |
2128 | * Aligning zero request is nonsense. Even if driver has special meaning | |
2129 | * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass | |
2130 | * it to driver due to request_alignment. | |
2131 | * | |
2132 | * Still, no reason to return an error if someone do unaligned | |
2133 | * zero-length write occasionally. | |
2134 | */ | |
2135 | return 0; | |
2136 | } | |
2137 | ||
99723548 | 2138 | bdrv_inc_in_flight(bs); |
61007b31 SH |
2139 | /* |
2140 | * Align write if necessary by performing a read-modify-write cycle. | |
2141 | * Pad qiov with the read parts and be sure to have a tracked request not | |
2142 | * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. | |
2143 | */ | |
ebde595c | 2144 | tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); |
61007b31 | 2145 | |
18a59f03 | 2146 | if (flags & BDRV_REQ_ZERO_WRITE) { |
85c97ca7 | 2147 | ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req); |
9eeb6dd1 FZ |
2148 | goto out; |
2149 | } | |
2150 | ||
1acc3466 | 2151 | if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { |
8ac5aab2 | 2152 | bdrv_make_request_serialising(&req, align); |
7a3f542f | 2153 | bdrv_padding_rmw_read(child, &req, &pad, false); |
61007b31 SH |
2154 | } |
2155 | ||
85c97ca7 | 2156 | ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, |
1acc3466 | 2157 | qiov, qiov_offset, flags); |
61007b31 | 2158 | |
7a3f542f | 2159 | bdrv_padding_destroy(&pad); |
61007b31 | 2160 | |
9eeb6dd1 FZ |
2161 | out: |
2162 | tracked_request_end(&req); | |
99723548 | 2163 | bdrv_dec_in_flight(bs); |
7a3f542f | 2164 | |
61007b31 SH |
2165 | return ret; |
2166 | } | |
2167 | ||
a03ef88f | 2168 | int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, |
f5a5ca79 | 2169 | int bytes, BdrvRequestFlags flags) |
61007b31 | 2170 | { |
f5a5ca79 | 2171 | trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags); |
61007b31 | 2172 | |
a03ef88f | 2173 | if (!(child->bs->open_flags & BDRV_O_UNMAP)) { |
61007b31 SH |
2174 | flags &= ~BDRV_REQ_MAY_UNMAP; |
2175 | } | |
61007b31 | 2176 | |
f5a5ca79 | 2177 | return bdrv_co_pwritev(child, offset, bytes, NULL, |
74021bc4 | 2178 | BDRV_REQ_ZERO_WRITE | flags); |
61007b31 SH |
2179 | } |
2180 | ||
4085f5c7 JS |
2181 | /* |
2182 | * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not. | |
2183 | */ | |
2184 | int bdrv_flush_all(void) | |
2185 | { | |
2186 | BdrvNextIterator it; | |
2187 | BlockDriverState *bs = NULL; | |
2188 | int result = 0; | |
2189 | ||
c8aa7895 PD |
2190 | /* |
2191 | * bdrv queue is managed by record/replay, | |
2192 | * creating new flush request for stopping | |
2193 | * the VM may break the determinism | |
2194 | */ | |
2195 | if (replay_events_enabled()) { | |
2196 | return result; | |
2197 | } | |
2198 | ||
4085f5c7 JS |
2199 | for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { |
2200 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
2201 | int ret; | |
2202 | ||
2203 | aio_context_acquire(aio_context); | |
2204 | ret = bdrv_flush(bs); | |
2205 | if (ret < 0 && !result) { | |
2206 | result = ret; | |
2207 | } | |
2208 | aio_context_release(aio_context); | |
2209 | } | |
2210 | ||
2211 | return result; | |
2212 | } | |
2213 | ||
61007b31 SH |
2214 | /* |
2215 | * Returns the allocation status of the specified sectors. | |
2216 | * Drivers not implementing the functionality are assumed to not support | |
2217 | * backing files, hence all their sectors are reported as allocated. | |
2218 | * | |
86a3d5c6 EB |
2219 | * If 'want_zero' is true, the caller is querying for mapping |
2220 | * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and | |
2221 | * _ZERO where possible; otherwise, the result favors larger 'pnum', | |
2222 | * with a focus on accurate BDRV_BLOCK_ALLOCATED. | |
c9ce8c4d | 2223 | * |
2e8bc787 | 2224 | * If 'offset' is beyond the end of the disk image the return value is |
fb0d8654 | 2225 | * BDRV_BLOCK_EOF and 'pnum' is set to 0. |
61007b31 | 2226 | * |
2e8bc787 | 2227 | * 'bytes' is the max value 'pnum' should be set to. If bytes goes |
fb0d8654 EB |
2228 | * beyond the end of the disk image it will be clamped; if 'pnum' is set to |
2229 | * the end of the image, then the returned value will include BDRV_BLOCK_EOF. | |
67a0fd2a | 2230 | * |
2e8bc787 EB |
2231 | * 'pnum' is set to the number of bytes (including and immediately |
2232 | * following the specified offset) that are easily known to be in the | |
2233 | * same allocated/unallocated state. Note that a second call starting | |
2234 | * at the original offset plus returned pnum may have the same status. | |
2235 | * The returned value is non-zero on success except at end-of-file. | |
2236 | * | |
2237 | * Returns negative errno on failure. Otherwise, if the | |
2238 | * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are | |
2239 | * set to the host mapping and BDS corresponding to the guest offset. | |
61007b31 | 2240 | */ |
2e8bc787 EB |
2241 | static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, |
2242 | bool want_zero, | |
2243 | int64_t offset, int64_t bytes, | |
2244 | int64_t *pnum, int64_t *map, | |
2245 | BlockDriverState **file) | |
2246 | { | |
2247 | int64_t total_size; | |
2248 | int64_t n; /* bytes */ | |
efa6e2ed | 2249 | int ret; |
2e8bc787 | 2250 | int64_t local_map = 0; |
298a1665 | 2251 | BlockDriverState *local_file = NULL; |
efa6e2ed EB |
2252 | int64_t aligned_offset, aligned_bytes; |
2253 | uint32_t align; | |
549ec0d9 | 2254 | bool has_filtered_child; |
61007b31 | 2255 | |
298a1665 EB |
2256 | assert(pnum); |
2257 | *pnum = 0; | |
2e8bc787 EB |
2258 | total_size = bdrv_getlength(bs); |
2259 | if (total_size < 0) { | |
2260 | ret = total_size; | |
298a1665 | 2261 | goto early_out; |
61007b31 SH |
2262 | } |
2263 | ||
2e8bc787 | 2264 | if (offset >= total_size) { |
298a1665 EB |
2265 | ret = BDRV_BLOCK_EOF; |
2266 | goto early_out; | |
61007b31 | 2267 | } |
2e8bc787 | 2268 | if (!bytes) { |
298a1665 EB |
2269 | ret = 0; |
2270 | goto early_out; | |
9cdcfd9f | 2271 | } |
61007b31 | 2272 | |
2e8bc787 EB |
2273 | n = total_size - offset; |
2274 | if (n < bytes) { | |
2275 | bytes = n; | |
61007b31 SH |
2276 | } |
2277 | ||
d470ad42 HR |
2278 | /* Must be non-NULL or bdrv_getlength() would have failed */ |
2279 | assert(bs->drv); | |
549ec0d9 HR |
2280 | has_filtered_child = bdrv_filter_child(bs); |
2281 | if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { | |
2e8bc787 | 2282 | *pnum = bytes; |
61007b31 | 2283 | ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED; |
2e8bc787 | 2284 | if (offset + bytes == total_size) { |
fb0d8654 EB |
2285 | ret |= BDRV_BLOCK_EOF; |
2286 | } | |
61007b31 | 2287 | if (bs->drv->protocol_name) { |
2e8bc787 EB |
2288 | ret |= BDRV_BLOCK_OFFSET_VALID; |
2289 | local_map = offset; | |
298a1665 | 2290 | local_file = bs; |
61007b31 | 2291 | } |
298a1665 | 2292 | goto early_out; |
61007b31 SH |
2293 | } |
2294 | ||
99723548 | 2295 | bdrv_inc_in_flight(bs); |
efa6e2ed EB |
2296 | |
2297 | /* Round out to request_alignment boundaries */ | |
86a3d5c6 | 2298 | align = bs->bl.request_alignment; |
efa6e2ed EB |
2299 | aligned_offset = QEMU_ALIGN_DOWN(offset, align); |
2300 | aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset; | |
2301 | ||
549ec0d9 HR |
2302 | if (bs->drv->bdrv_co_block_status) { |
2303 | ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, | |
2304 | aligned_bytes, pnum, &local_map, | |
2305 | &local_file); | |
2306 | } else { | |
2307 | /* Default code for filters */ | |
2308 | ||
2309 | local_file = bdrv_filter_bs(bs); | |
2310 | assert(local_file); | |
2311 | ||
2312 | *pnum = aligned_bytes; | |
2313 | local_map = aligned_offset; | |
2314 | ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID; | |
2315 | } | |
636cb512 EB |
2316 | if (ret < 0) { |
2317 | *pnum = 0; | |
2318 | goto out; | |
efa6e2ed EB |
2319 | } |
2320 | ||
2e8bc787 | 2321 | /* |
636cb512 | 2322 | * The driver's result must be a non-zero multiple of request_alignment. |
efa6e2ed | 2323 | * Clamp pnum and adjust map to original request. |
2e8bc787 | 2324 | */ |
636cb512 EB |
2325 | assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) && |
2326 | align > offset - aligned_offset); | |
69f47505 VSO |
2327 | if (ret & BDRV_BLOCK_RECURSE) { |
2328 | assert(ret & BDRV_BLOCK_DATA); | |
2329 | assert(ret & BDRV_BLOCK_OFFSET_VALID); | |
2330 | assert(!(ret & BDRV_BLOCK_ZERO)); | |
2331 | } | |
2332 | ||
efa6e2ed EB |
2333 | *pnum -= offset - aligned_offset; |
2334 | if (*pnum > bytes) { | |
2335 | *pnum = bytes; | |
61007b31 | 2336 | } |
2e8bc787 | 2337 | if (ret & BDRV_BLOCK_OFFSET_VALID) { |
efa6e2ed | 2338 | local_map += offset - aligned_offset; |
2e8bc787 | 2339 | } |
61007b31 SH |
2340 | |
2341 | if (ret & BDRV_BLOCK_RAW) { | |
298a1665 | 2342 | assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); |
2e8bc787 EB |
2343 | ret = bdrv_co_block_status(local_file, want_zero, local_map, |
2344 | *pnum, pnum, &local_map, &local_file); | |
99723548 | 2345 | goto out; |
61007b31 SH |
2346 | } |
2347 | ||
2348 | if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) { | |
2349 | ret |= BDRV_BLOCK_ALLOCATED; | |
d40f4a56 | 2350 | } else if (bs->drv->supports_backing) { |
cb850315 HR |
2351 | BlockDriverState *cow_bs = bdrv_cow_bs(bs); |
2352 | ||
d40f4a56 AG |
2353 | if (!cow_bs) { |
2354 | ret |= BDRV_BLOCK_ZERO; | |
2355 | } else if (want_zero) { | |
cb850315 | 2356 | int64_t size2 = bdrv_getlength(cow_bs); |
c9ce8c4d | 2357 | |
2e8bc787 | 2358 | if (size2 >= 0 && offset >= size2) { |
61007b31 SH |
2359 | ret |= BDRV_BLOCK_ZERO; |
2360 | } | |
2361 | } | |
2362 | } | |
2363 | ||
69f47505 VSO |
2364 | if (want_zero && ret & BDRV_BLOCK_RECURSE && |
2365 | local_file && local_file != bs && | |
61007b31 SH |
2366 | (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && |
2367 | (ret & BDRV_BLOCK_OFFSET_VALID)) { | |
2e8bc787 EB |
2368 | int64_t file_pnum; |
2369 | int ret2; | |
61007b31 | 2370 | |
2e8bc787 EB |
2371 | ret2 = bdrv_co_block_status(local_file, want_zero, local_map, |
2372 | *pnum, &file_pnum, NULL, NULL); | |
61007b31 SH |
2373 | if (ret2 >= 0) { |
2374 | /* Ignore errors. This is just providing extra information, it | |
2375 | * is useful but not necessary. | |
2376 | */ | |
c61e684e EB |
2377 | if (ret2 & BDRV_BLOCK_EOF && |
2378 | (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) { | |
2379 | /* | |
2380 | * It is valid for the format block driver to read | |
2381 | * beyond the end of the underlying file's current | |
2382 | * size; such areas read as zero. | |
2383 | */ | |
61007b31 SH |
2384 | ret |= BDRV_BLOCK_ZERO; |
2385 | } else { | |
2386 | /* Limit request to the range reported by the protocol driver */ | |
2387 | *pnum = file_pnum; | |
2388 | ret |= (ret2 & BDRV_BLOCK_ZERO); | |
2389 | } | |
2390 | } | |
2391 | } | |
2392 | ||
99723548 PB |
2393 | out: |
2394 | bdrv_dec_in_flight(bs); | |
2e8bc787 | 2395 | if (ret >= 0 && offset + *pnum == total_size) { |
fb0d8654 EB |
2396 | ret |= BDRV_BLOCK_EOF; |
2397 | } | |
298a1665 EB |
2398 | early_out: |
2399 | if (file) { | |
2400 | *file = local_file; | |
2401 | } | |
2e8bc787 EB |
2402 | if (map) { |
2403 | *map = local_map; | |
2404 | } | |
61007b31 SH |
2405 | return ret; |
2406 | } | |
2407 | ||
21c2283e | 2408 | int coroutine_fn |
f9e694cb VSO |
2409 | bdrv_co_common_block_status_above(BlockDriverState *bs, |
2410 | BlockDriverState *base, | |
3555a432 | 2411 | bool include_base, |
f9e694cb VSO |
2412 | bool want_zero, |
2413 | int64_t offset, | |
2414 | int64_t bytes, | |
2415 | int64_t *pnum, | |
2416 | int64_t *map, | |
a92b1b06 EB |
2417 | BlockDriverState **file, |
2418 | int *depth) | |
ba3f0e25 | 2419 | { |
67c095c8 | 2420 | int ret; |
ba3f0e25 | 2421 | BlockDriverState *p; |
67c095c8 | 2422 | int64_t eof = 0; |
a92b1b06 | 2423 | int dummy; |
ba3f0e25 | 2424 | |
3555a432 | 2425 | assert(!include_base || base); /* Can't include NULL base */ |
67c095c8 | 2426 | |
a92b1b06 EB |
2427 | if (!depth) { |
2428 | depth = &dummy; | |
2429 | } | |
2430 | *depth = 0; | |
2431 | ||
624f27bb VSO |
2432 | if (!include_base && bs == base) { |
2433 | *pnum = bytes; | |
2434 | return 0; | |
2435 | } | |
2436 | ||
67c095c8 | 2437 | ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file); |
a92b1b06 | 2438 | ++*depth; |
3555a432 | 2439 | if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { |
67c095c8 VSO |
2440 | return ret; |
2441 | } | |
2442 | ||
2443 | if (ret & BDRV_BLOCK_EOF) { | |
2444 | eof = offset + *pnum; | |
2445 | } | |
2446 | ||
2447 | assert(*pnum <= bytes); | |
2448 | bytes = *pnum; | |
2449 | ||
3555a432 | 2450 | for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; |
67c095c8 VSO |
2451 | p = bdrv_filter_or_cow_bs(p)) |
2452 | { | |
5b648c67 EB |
2453 | ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map, |
2454 | file); | |
a92b1b06 | 2455 | ++*depth; |
c61e684e | 2456 | if (ret < 0) { |
67c095c8 | 2457 | return ret; |
c61e684e | 2458 | } |
67c095c8 | 2459 | if (*pnum == 0) { |
c61e684e | 2460 | /* |
67c095c8 VSO |
2461 | * The top layer deferred to this layer, and because this layer is |
2462 | * short, any zeroes that we synthesize beyond EOF behave as if they | |
2463 | * were allocated at this layer. | |
2464 | * | |
2465 | * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be | |
2466 | * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see | |
2467 | * below. | |
c61e684e | 2468 | */ |
67c095c8 | 2469 | assert(ret & BDRV_BLOCK_EOF); |
5b648c67 | 2470 | *pnum = bytes; |
67c095c8 VSO |
2471 | if (file) { |
2472 | *file = p; | |
2473 | } | |
2474 | ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED; | |
2475 | break; | |
c61e684e | 2476 | } |
67c095c8 VSO |
2477 | if (ret & BDRV_BLOCK_ALLOCATED) { |
2478 | /* | |
2479 | * We've found the node and the status, we must break. | |
2480 | * | |
2481 | * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be | |
2482 | * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see | |
2483 | * below. | |
2484 | */ | |
2485 | ret &= ~BDRV_BLOCK_EOF; | |
ba3f0e25 FZ |
2486 | break; |
2487 | } | |
67c095c8 | 2488 | |
3555a432 VSO |
2489 | if (p == base) { |
2490 | assert(include_base); | |
2491 | break; | |
2492 | } | |
2493 | ||
67c095c8 VSO |
2494 | /* |
2495 | * OK, [offset, offset + *pnum) region is unallocated on this layer, | |
2496 | * let's continue the diving. | |
2497 | */ | |
2498 | assert(*pnum <= bytes); | |
2499 | bytes = *pnum; | |
ba3f0e25 | 2500 | } |
67c095c8 VSO |
2501 | |
2502 | if (offset + *pnum == eof) { | |
2503 | ret |= BDRV_BLOCK_EOF; | |
2504 | } | |
2505 | ||
ba3f0e25 FZ |
2506 | return ret; |
2507 | } | |
2508 | ||
31826642 EB |
2509 | int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base, |
2510 | int64_t offset, int64_t bytes, int64_t *pnum, | |
2511 | int64_t *map, BlockDriverState **file) | |
c9ce8c4d | 2512 | { |
3555a432 | 2513 | return bdrv_common_block_status_above(bs, base, false, true, offset, bytes, |
a92b1b06 | 2514 | pnum, map, file, NULL); |
c9ce8c4d EB |
2515 | } |
2516 | ||
237d78f8 EB |
2517 | int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes, |
2518 | int64_t *pnum, int64_t *map, BlockDriverState **file) | |
ba3f0e25 | 2519 | { |
cb850315 | 2520 | return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs), |
31826642 | 2521 | offset, bytes, pnum, map, file); |
ba3f0e25 FZ |
2522 | } |
2523 | ||
46cd1e8a AG |
2524 | /* |
2525 | * Check @bs (and its backing chain) to see if the range defined | |
2526 | * by @offset and @bytes is known to read as zeroes. | |
2527 | * Return 1 if that is the case, 0 otherwise and -errno on error. | |
2528 | * This test is meant to be fast rather than accurate so returning 0 | |
2529 | * does not guarantee non-zero data. | |
2530 | */ | |
2531 | int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, | |
2532 | int64_t bytes) | |
2533 | { | |
2534 | int ret; | |
2535 | int64_t pnum = bytes; | |
2536 | ||
2537 | if (!bytes) { | |
2538 | return 1; | |
2539 | } | |
2540 | ||
2541 | ret = bdrv_common_block_status_above(bs, NULL, false, false, offset, | |
a92b1b06 | 2542 | bytes, &pnum, NULL, NULL, NULL); |
46cd1e8a AG |
2543 | |
2544 | if (ret < 0) { | |
2545 | return ret; | |
2546 | } | |
2547 | ||
2548 | return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); | |
2549 | } | |
2550 | ||
d6a644bb EB |
2551 | int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, |
2552 | int64_t bytes, int64_t *pnum) | |
61007b31 | 2553 | { |
7ddb99b9 EB |
2554 | int ret; |
2555 | int64_t dummy; | |
d6a644bb | 2556 | |
3555a432 VSO |
2557 | ret = bdrv_common_block_status_above(bs, bs, true, false, offset, |
2558 | bytes, pnum ? pnum : &dummy, NULL, | |
a92b1b06 | 2559 | NULL, NULL); |
61007b31 SH |
2560 | if (ret < 0) { |
2561 | return ret; | |
2562 | } | |
2563 | return !!(ret & BDRV_BLOCK_ALLOCATED); | |
2564 | } | |
2565 | ||
2566 | /* | |
2567 | * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP] | |
2568 | * | |
a92b1b06 EB |
2569 | * Return a positive depth if (a prefix of) the given range is allocated |
2570 | * in any image between BASE and TOP (BASE is only included if include_base | |
2571 | * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth. | |
170d3bd3 AS |
2572 | * BASE can be NULL to check if the given offset is allocated in any |
2573 | * image of the chain. Return 0 otherwise, or negative errno on | |
2574 | * failure. | |
61007b31 | 2575 | * |
51b0a488 EB |
2576 | * 'pnum' is set to the number of bytes (including and immediately |
2577 | * following the specified offset) that are known to be in the same | |
2578 | * allocated/unallocated state. Note that a subsequent call starting | |
2579 | * at 'offset + *pnum' may return the same allocation status (in other | |
2580 | * words, the result is not necessarily the maximum possible range); | |
2581 | * but 'pnum' will only be 0 when end of file is reached. | |
61007b31 SH |
2582 | */ |
2583 | int bdrv_is_allocated_above(BlockDriverState *top, | |
2584 | BlockDriverState *base, | |
170d3bd3 AS |
2585 | bool include_base, int64_t offset, |
2586 | int64_t bytes, int64_t *pnum) | |
61007b31 | 2587 | { |
a92b1b06 | 2588 | int depth; |
7e7e5100 | 2589 | int ret = bdrv_common_block_status_above(top, base, include_base, false, |
a92b1b06 EB |
2590 | offset, bytes, pnum, NULL, NULL, |
2591 | &depth); | |
7e7e5100 VSO |
2592 | if (ret < 0) { |
2593 | return ret; | |
61007b31 SH |
2594 | } |
2595 | ||
a92b1b06 EB |
2596 | if (ret & BDRV_BLOCK_ALLOCATED) { |
2597 | return depth; | |
2598 | } | |
2599 | return 0; | |
61007b31 SH |
2600 | } |
2601 | ||
21c2283e | 2602 | int coroutine_fn |
b33b354f | 2603 | bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) |
1a8ae822 KW |
2604 | { |
2605 | BlockDriver *drv = bs->drv; | |
c4db2e25 | 2606 | BlockDriverState *child_bs = bdrv_primary_bs(bs); |
dc88a467 SH |
2607 | int ret = -ENOTSUP; |
2608 | ||
b33b354f VSO |
2609 | if (!drv) { |
2610 | return -ENOMEDIUM; | |
2611 | } | |
2612 | ||
dc88a467 | 2613 | bdrv_inc_in_flight(bs); |
1a8ae822 | 2614 | |
b33b354f VSO |
2615 | if (drv->bdrv_load_vmstate) { |
2616 | ret = drv->bdrv_load_vmstate(bs, qiov, pos); | |
c4db2e25 | 2617 | } else if (child_bs) { |
b33b354f | 2618 | ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); |
1a8ae822 KW |
2619 | } |
2620 | ||
dc88a467 | 2621 | bdrv_dec_in_flight(bs); |
b33b354f | 2622 | |
dc88a467 | 2623 | return ret; |
1a8ae822 KW |
2624 | } |
2625 | ||
b33b354f VSO |
2626 | int coroutine_fn |
2627 | bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) | |
61007b31 | 2628 | { |
b33b354f VSO |
2629 | BlockDriver *drv = bs->drv; |
2630 | BlockDriverState *child_bs = bdrv_primary_bs(bs); | |
2631 | int ret = -ENOTSUP; | |
61007b31 | 2632 | |
b33b354f VSO |
2633 | if (!drv) { |
2634 | return -ENOMEDIUM; | |
b433d942 KW |
2635 | } |
2636 | ||
b33b354f | 2637 | bdrv_inc_in_flight(bs); |
61007b31 | 2638 | |
b33b354f VSO |
2639 | if (drv->bdrv_save_vmstate) { |
2640 | ret = drv->bdrv_save_vmstate(bs, qiov, pos); | |
2641 | } else if (child_bs) { | |
2642 | ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); | |
2643 | } | |
2644 | ||
2645 | bdrv_dec_in_flight(bs); | |
2646 | ||
2647 | return ret; | |
61007b31 SH |
2648 | } |
2649 | ||
b33b354f | 2650 | int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, |
61007b31 | 2651 | int64_t pos, int size) |
5ddda0b8 | 2652 | { |
0d93ed08 | 2653 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); |
b33b354f | 2654 | int ret = bdrv_writev_vmstate(bs, &qiov, pos); |
b433d942 | 2655 | |
b33b354f | 2656 | return ret < 0 ? ret : size; |
5ddda0b8 KW |
2657 | } |
2658 | ||
b33b354f VSO |
2659 | int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, |
2660 | int64_t pos, int size) | |
61007b31 | 2661 | { |
b33b354f VSO |
2662 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); |
2663 | int ret = bdrv_readv_vmstate(bs, &qiov, pos); | |
2664 | ||
2665 | return ret < 0 ? ret : size; | |
61007b31 SH |
2666 | } |
2667 | ||
2668 | /**************************************************************/ | |
2669 | /* async I/Os */ | |
2670 | ||
61007b31 SH |
2671 | void bdrv_aio_cancel(BlockAIOCB *acb) |
2672 | { | |
2673 | qemu_aio_ref(acb); | |
2674 | bdrv_aio_cancel_async(acb); | |
2675 | while (acb->refcnt > 1) { | |
2676 | if (acb->aiocb_info->get_aio_context) { | |
2677 | aio_poll(acb->aiocb_info->get_aio_context(acb), true); | |
2678 | } else if (acb->bs) { | |
2f47da5f PB |
2679 | /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so |
2680 | * assert that we're not using an I/O thread. Thread-safe | |
2681 | * code should use bdrv_aio_cancel_async exclusively. | |
2682 | */ | |
2683 | assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); | |
61007b31 SH |
2684 | aio_poll(bdrv_get_aio_context(acb->bs), true); |
2685 | } else { | |
2686 | abort(); | |
2687 | } | |
2688 | } | |
2689 | qemu_aio_unref(acb); | |
2690 | } | |
2691 | ||
2692 | /* Async version of aio cancel. The caller is not blocked if the acb implements | |
2693 | * cancel_async, otherwise we do nothing and let the request normally complete. | |
2694 | * In either case the completion callback must be called. */ | |
2695 | void bdrv_aio_cancel_async(BlockAIOCB *acb) | |
2696 | { | |
2697 | if (acb->aiocb_info->cancel_async) { | |
2698 | acb->aiocb_info->cancel_async(acb); | |
2699 | } | |
2700 | } | |
2701 | ||
61007b31 SH |
2702 | /**************************************************************/ |
2703 | /* Coroutine block device emulation */ | |
2704 | ||
61007b31 SH |
2705 | int coroutine_fn bdrv_co_flush(BlockDriverState *bs) |
2706 | { | |
883833e2 HR |
2707 | BdrvChild *primary_child = bdrv_primary_child(bs); |
2708 | BdrvChild *child; | |
49ca6259 FZ |
2709 | int current_gen; |
2710 | int ret = 0; | |
2711 | ||
2712 | bdrv_inc_in_flight(bs); | |
61007b31 | 2713 | |
e914404e | 2714 | if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) || |
1b6bc94d | 2715 | bdrv_is_sg(bs)) { |
49ca6259 | 2716 | goto early_exit; |
61007b31 SH |
2717 | } |
2718 | ||
3783fa3d | 2719 | qemu_co_mutex_lock(&bs->reqs_lock); |
d73415a3 | 2720 | current_gen = qatomic_read(&bs->write_gen); |
3ff2f67a EY |
2721 | |
2722 | /* Wait until any previous flushes are completed */ | |
99723548 | 2723 | while (bs->active_flush_req) { |
3783fa3d | 2724 | qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); |
3ff2f67a EY |
2725 | } |
2726 | ||
3783fa3d | 2727 | /* Flushes reach this point in nondecreasing current_gen order. */ |
99723548 | 2728 | bs->active_flush_req = true; |
3783fa3d | 2729 | qemu_co_mutex_unlock(&bs->reqs_lock); |
3ff2f67a | 2730 | |
c32b82af PD |
2731 | /* Write back all layers by calling one driver function */ |
2732 | if (bs->drv->bdrv_co_flush) { | |
2733 | ret = bs->drv->bdrv_co_flush(bs); | |
2734 | goto out; | |
2735 | } | |
2736 | ||
61007b31 | 2737 | /* Write back cached data to the OS even with cache=unsafe */ |
883833e2 | 2738 | BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); |
61007b31 SH |
2739 | if (bs->drv->bdrv_co_flush_to_os) { |
2740 | ret = bs->drv->bdrv_co_flush_to_os(bs); | |
2741 | if (ret < 0) { | |
cdb5e315 | 2742 | goto out; |
61007b31 SH |
2743 | } |
2744 | } | |
2745 | ||
2746 | /* But don't actually force it to the disk with cache=unsafe */ | |
2747 | if (bs->open_flags & BDRV_O_NO_FLUSH) { | |
883833e2 | 2748 | goto flush_children; |
61007b31 SH |
2749 | } |
2750 | ||
3ff2f67a EY |
2751 | /* Check if we really need to flush anything */ |
2752 | if (bs->flushed_gen == current_gen) { | |
883833e2 | 2753 | goto flush_children; |
3ff2f67a EY |
2754 | } |
2755 | ||
883833e2 | 2756 | BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); |
d470ad42 HR |
2757 | if (!bs->drv) { |
2758 | /* bs->drv->bdrv_co_flush() might have ejected the BDS | |
2759 | * (even in case of apparent success) */ | |
2760 | ret = -ENOMEDIUM; | |
2761 | goto out; | |
2762 | } | |
61007b31 SH |
2763 | if (bs->drv->bdrv_co_flush_to_disk) { |
2764 | ret = bs->drv->bdrv_co_flush_to_disk(bs); | |
2765 | } else if (bs->drv->bdrv_aio_flush) { | |
2766 | BlockAIOCB *acb; | |
2767 | CoroutineIOCompletion co = { | |
2768 | .coroutine = qemu_coroutine_self(), | |
2769 | }; | |
2770 | ||
2771 | acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co); | |
2772 | if (acb == NULL) { | |
2773 | ret = -EIO; | |
2774 | } else { | |
2775 | qemu_coroutine_yield(); | |
2776 | ret = co.ret; | |
2777 | } | |
2778 | } else { | |
2779 | /* | |
2780 | * Some block drivers always operate in either writethrough or unsafe | |
2781 | * mode and don't support bdrv_flush therefore. Usually qemu doesn't | |
2782 | * know how the server works (because the behaviour is hardcoded or | |
2783 | * depends on server-side configuration), so we can't ensure that | |
2784 | * everything is safe on disk. Returning an error doesn't work because | |
2785 | * that would break guests even if the server operates in writethrough | |
2786 | * mode. | |
2787 | * | |
2788 | * Let's hope the user knows what he's doing. | |
2789 | */ | |
2790 | ret = 0; | |
2791 | } | |
3ff2f67a | 2792 | |
61007b31 | 2793 | if (ret < 0) { |
cdb5e315 | 2794 | goto out; |
61007b31 SH |
2795 | } |
2796 | ||
2797 | /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH | |
2798 | * in the case of cache=unsafe, so there are no useless flushes. | |
2799 | */ | |
883833e2 HR |
2800 | flush_children: |
2801 | ret = 0; | |
2802 | QLIST_FOREACH(child, &bs->children, next) { | |
2803 | if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { | |
2804 | int this_child_ret = bdrv_co_flush(child->bs); | |
2805 | if (!ret) { | |
2806 | ret = this_child_ret; | |
2807 | } | |
2808 | } | |
2809 | } | |
2810 | ||
cdb5e315 | 2811 | out: |
3ff2f67a | 2812 | /* Notify any pending flushes that we have completed */ |
e6af1e08 KW |
2813 | if (ret == 0) { |
2814 | bs->flushed_gen = current_gen; | |
2815 | } | |
3783fa3d PB |
2816 | |
2817 | qemu_co_mutex_lock(&bs->reqs_lock); | |
99723548 | 2818 | bs->active_flush_req = false; |
156af3ac DL |
2819 | /* Return value is ignored - it's ok if wait queue is empty */ |
2820 | qemu_co_queue_next(&bs->flush_queue); | |
3783fa3d | 2821 | qemu_co_mutex_unlock(&bs->reqs_lock); |
3ff2f67a | 2822 | |
49ca6259 | 2823 | early_exit: |
99723548 | 2824 | bdrv_dec_in_flight(bs); |
cdb5e315 | 2825 | return ret; |
61007b31 SH |
2826 | } |
2827 | ||
d93e5726 VSO |
2828 | int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, |
2829 | int64_t bytes) | |
61007b31 | 2830 | { |
b1066c87 | 2831 | BdrvTrackedRequest req; |
9f1963b3 | 2832 | int max_pdiscard, ret; |
3482b9bc | 2833 | int head, tail, align; |
0b9fd3f4 | 2834 | BlockDriverState *bs = child->bs; |
61007b31 | 2835 | |
d93e5726 | 2836 | if (!bs || !bs->drv || !bdrv_is_inserted(bs)) { |
61007b31 SH |
2837 | return -ENOMEDIUM; |
2838 | } | |
2839 | ||
d6883bc9 VSO |
2840 | if (bdrv_has_readonly_bitmaps(bs)) { |
2841 | return -EPERM; | |
2842 | } | |
2843 | ||
8b117001 VSO |
2844 | ret = bdrv_check_request(offset, bytes); |
2845 | if (ret < 0) { | |
2846 | return ret; | |
61007b31 SH |
2847 | } |
2848 | ||
61007b31 SH |
2849 | /* Do nothing if disabled. */ |
2850 | if (!(bs->open_flags & BDRV_O_UNMAP)) { | |
2851 | return 0; | |
2852 | } | |
2853 | ||
02aefe43 | 2854 | if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { |
61007b31 SH |
2855 | return 0; |
2856 | } | |
2857 | ||
3482b9bc EB |
2858 | /* Discard is advisory, but some devices track and coalesce |
2859 | * unaligned requests, so we must pass everything down rather than | |
2860 | * round here. Still, most devices will just silently ignore | |
2861 | * unaligned requests (by returning -ENOTSUP), so we must fragment | |
2862 | * the request accordingly. */ | |
02aefe43 | 2863 | align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); |
b8d0a980 EB |
2864 | assert(align % bs->bl.request_alignment == 0); |
2865 | head = offset % align; | |
f5a5ca79 | 2866 | tail = (offset + bytes) % align; |
9f1963b3 | 2867 | |
99723548 | 2868 | bdrv_inc_in_flight(bs); |
f5a5ca79 | 2869 | tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD); |
50824995 | 2870 | |
00695c27 | 2871 | ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0); |
ec050f77 DL |
2872 | if (ret < 0) { |
2873 | goto out; | |
2874 | } | |
2875 | ||
9f1963b3 EB |
2876 | max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), |
2877 | align); | |
3482b9bc | 2878 | assert(max_pdiscard >= bs->bl.request_alignment); |
61007b31 | 2879 | |
f5a5ca79 | 2880 | while (bytes > 0) { |
d93e5726 | 2881 | int64_t num = bytes; |
3482b9bc EB |
2882 | |
2883 | if (head) { | |
2884 | /* Make small requests to get to alignment boundaries. */ | |
f5a5ca79 | 2885 | num = MIN(bytes, align - head); |
3482b9bc EB |
2886 | if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) { |
2887 | num %= bs->bl.request_alignment; | |
2888 | } | |
2889 | head = (head + num) % align; | |
2890 | assert(num < max_pdiscard); | |
2891 | } else if (tail) { | |
2892 | if (num > align) { | |
2893 | /* Shorten the request to the last aligned cluster. */ | |
2894 | num -= tail; | |
2895 | } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) && | |
2896 | tail > bs->bl.request_alignment) { | |
2897 | tail %= bs->bl.request_alignment; | |
2898 | num -= tail; | |
2899 | } | |
2900 | } | |
2901 | /* limit request size */ | |
2902 | if (num > max_pdiscard) { | |
2903 | num = max_pdiscard; | |
2904 | } | |
61007b31 | 2905 | |
d470ad42 HR |
2906 | if (!bs->drv) { |
2907 | ret = -ENOMEDIUM; | |
2908 | goto out; | |
2909 | } | |
47a5486d EB |
2910 | if (bs->drv->bdrv_co_pdiscard) { |
2911 | ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); | |
61007b31 SH |
2912 | } else { |
2913 | BlockAIOCB *acb; | |
2914 | CoroutineIOCompletion co = { | |
2915 | .coroutine = qemu_coroutine_self(), | |
2916 | }; | |
2917 | ||
4da444a0 EB |
2918 | acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, |
2919 | bdrv_co_io_em_complete, &co); | |
61007b31 | 2920 | if (acb == NULL) { |
b1066c87 FZ |
2921 | ret = -EIO; |
2922 | goto out; | |
61007b31 SH |
2923 | } else { |
2924 | qemu_coroutine_yield(); | |
2925 | ret = co.ret; | |
2926 | } | |
2927 | } | |
2928 | if (ret && ret != -ENOTSUP) { | |
b1066c87 | 2929 | goto out; |
61007b31 SH |
2930 | } |
2931 | ||
9f1963b3 | 2932 | offset += num; |
f5a5ca79 | 2933 | bytes -= num; |
61007b31 | 2934 | } |
b1066c87 FZ |
2935 | ret = 0; |
2936 | out: | |
00695c27 | 2937 | bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret); |
b1066c87 | 2938 | tracked_request_end(&req); |
99723548 | 2939 | bdrv_dec_in_flight(bs); |
b1066c87 | 2940 | return ret; |
61007b31 SH |
2941 | } |
2942 | ||
48af776a | 2943 | int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) |
61007b31 SH |
2944 | { |
2945 | BlockDriver *drv = bs->drv; | |
5c5ae76a FZ |
2946 | CoroutineIOCompletion co = { |
2947 | .coroutine = qemu_coroutine_self(), | |
2948 | }; | |
2949 | BlockAIOCB *acb; | |
61007b31 | 2950 | |
99723548 | 2951 | bdrv_inc_in_flight(bs); |
16a389dc | 2952 | if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) { |
5c5ae76a FZ |
2953 | co.ret = -ENOTSUP; |
2954 | goto out; | |
2955 | } | |
2956 | ||
16a389dc KW |
2957 | if (drv->bdrv_co_ioctl) { |
2958 | co.ret = drv->bdrv_co_ioctl(bs, req, buf); | |
2959 | } else { | |
2960 | acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co); | |
2961 | if (!acb) { | |
2962 | co.ret = -ENOTSUP; | |
2963 | goto out; | |
2964 | } | |
2965 | qemu_coroutine_yield(); | |
5c5ae76a | 2966 | } |
5c5ae76a | 2967 | out: |
99723548 | 2968 | bdrv_dec_in_flight(bs); |
5c5ae76a FZ |
2969 | return co.ret; |
2970 | } | |
2971 | ||
61007b31 SH |
2972 | void *qemu_blockalign(BlockDriverState *bs, size_t size) |
2973 | { | |
2974 | return qemu_memalign(bdrv_opt_mem_align(bs), size); | |
2975 | } | |
2976 | ||
2977 | void *qemu_blockalign0(BlockDriverState *bs, size_t size) | |
2978 | { | |
2979 | return memset(qemu_blockalign(bs, size), 0, size); | |
2980 | } | |
2981 | ||
2982 | void *qemu_try_blockalign(BlockDriverState *bs, size_t size) | |
2983 | { | |
2984 | size_t align = bdrv_opt_mem_align(bs); | |
2985 | ||
2986 | /* Ensure that NULL is never returned on success */ | |
2987 | assert(align > 0); | |
2988 | if (size == 0) { | |
2989 | size = align; | |
2990 | } | |
2991 | ||
2992 | return qemu_try_memalign(align, size); | |
2993 | } | |
2994 | ||
2995 | void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) | |
2996 | { | |
2997 | void *mem = qemu_try_blockalign(bs, size); | |
2998 | ||
2999 | if (mem) { | |
3000 | memset(mem, 0, size); | |
3001 | } | |
3002 | ||
3003 | return mem; | |
3004 | } | |
3005 | ||
3006 | /* | |
3007 | * Check if all memory in this vector is sector aligned. | |
3008 | */ | |
3009 | bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) | |
3010 | { | |
3011 | int i; | |
4196d2f0 | 3012 | size_t alignment = bdrv_min_mem_align(bs); |
61007b31 SH |
3013 | |
3014 | for (i = 0; i < qiov->niov; i++) { | |
3015 | if ((uintptr_t) qiov->iov[i].iov_base % alignment) { | |
3016 | return false; | |
3017 | } | |
3018 | if (qiov->iov[i].iov_len % alignment) { | |
3019 | return false; | |
3020 | } | |
3021 | } | |
3022 | ||
3023 | return true; | |
3024 | } | |
3025 | ||
3026 | void bdrv_add_before_write_notifier(BlockDriverState *bs, | |
3027 | NotifierWithReturn *notifier) | |
3028 | { | |
3029 | notifier_with_return_list_add(&bs->before_write_notifiers, notifier); | |
3030 | } | |
3031 | ||
3032 | void bdrv_io_plug(BlockDriverState *bs) | |
3033 | { | |
6b98bd64 PB |
3034 | BdrvChild *child; |
3035 | ||
3036 | QLIST_FOREACH(child, &bs->children, next) { | |
3037 | bdrv_io_plug(child->bs); | |
3038 | } | |
3039 | ||
d73415a3 | 3040 | if (qatomic_fetch_inc(&bs->io_plugged) == 0) { |
6b98bd64 PB |
3041 | BlockDriver *drv = bs->drv; |
3042 | if (drv && drv->bdrv_io_plug) { | |
3043 | drv->bdrv_io_plug(bs); | |
3044 | } | |
61007b31 SH |
3045 | } |
3046 | } | |
3047 | ||
3048 | void bdrv_io_unplug(BlockDriverState *bs) | |
3049 | { | |
6b98bd64 PB |
3050 | BdrvChild *child; |
3051 | ||
3052 | assert(bs->io_plugged); | |
d73415a3 | 3053 | if (qatomic_fetch_dec(&bs->io_plugged) == 1) { |
6b98bd64 PB |
3054 | BlockDriver *drv = bs->drv; |
3055 | if (drv && drv->bdrv_io_unplug) { | |
3056 | drv->bdrv_io_unplug(bs); | |
3057 | } | |
3058 | } | |
3059 | ||
3060 | QLIST_FOREACH(child, &bs->children, next) { | |
3061 | bdrv_io_unplug(child->bs); | |
61007b31 SH |
3062 | } |
3063 | } | |
23d0ba93 FZ |
3064 | |
3065 | void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size) | |
3066 | { | |
3067 | BdrvChild *child; | |
3068 | ||
3069 | if (bs->drv && bs->drv->bdrv_register_buf) { | |
3070 | bs->drv->bdrv_register_buf(bs, host, size); | |
3071 | } | |
3072 | QLIST_FOREACH(child, &bs->children, next) { | |
3073 | bdrv_register_buf(child->bs, host, size); | |
3074 | } | |
3075 | } | |
3076 | ||
3077 | void bdrv_unregister_buf(BlockDriverState *bs, void *host) | |
3078 | { | |
3079 | BdrvChild *child; | |
3080 | ||
3081 | if (bs->drv && bs->drv->bdrv_unregister_buf) { | |
3082 | bs->drv->bdrv_unregister_buf(bs, host); | |
3083 | } | |
3084 | QLIST_FOREACH(child, &bs->children, next) { | |
3085 | bdrv_unregister_buf(child->bs, host); | |
3086 | } | |
3087 | } | |
fcc67678 | 3088 | |
67b51fb9 VSO |
3089 | static int coroutine_fn bdrv_co_copy_range_internal( |
3090 | BdrvChild *src, uint64_t src_offset, BdrvChild *dst, | |
3091 | uint64_t dst_offset, uint64_t bytes, | |
3092 | BdrvRequestFlags read_flags, BdrvRequestFlags write_flags, | |
3093 | bool recurse_src) | |
fcc67678 | 3094 | { |
999658a0 | 3095 | BdrvTrackedRequest req; |
fcc67678 FZ |
3096 | int ret; |
3097 | ||
fe0480d6 KW |
3098 | /* TODO We can support BDRV_REQ_NO_FALLBACK here */ |
3099 | assert(!(read_flags & BDRV_REQ_NO_FALLBACK)); | |
3100 | assert(!(write_flags & BDRV_REQ_NO_FALLBACK)); | |
3101 | ||
f4dad307 | 3102 | if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) { |
fcc67678 FZ |
3103 | return -ENOMEDIUM; |
3104 | } | |
8b117001 | 3105 | ret = bdrv_check_request32(dst_offset, bytes); |
fcc67678 FZ |
3106 | if (ret) { |
3107 | return ret; | |
3108 | } | |
67b51fb9 VSO |
3109 | if (write_flags & BDRV_REQ_ZERO_WRITE) { |
3110 | return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags); | |
fcc67678 FZ |
3111 | } |
3112 | ||
f4dad307 | 3113 | if (!src || !src->bs || !bdrv_is_inserted(src->bs)) { |
d4d3e5a0 FZ |
3114 | return -ENOMEDIUM; |
3115 | } | |
8b117001 | 3116 | ret = bdrv_check_request32(src_offset, bytes); |
d4d3e5a0 FZ |
3117 | if (ret) { |
3118 | return ret; | |
3119 | } | |
3120 | ||
fcc67678 FZ |
3121 | if (!src->bs->drv->bdrv_co_copy_range_from |
3122 | || !dst->bs->drv->bdrv_co_copy_range_to | |
3123 | || src->bs->encrypted || dst->bs->encrypted) { | |
3124 | return -ENOTSUP; | |
3125 | } | |
37aec7d7 | 3126 | |
fcc67678 | 3127 | if (recurse_src) { |
999658a0 VSO |
3128 | bdrv_inc_in_flight(src->bs); |
3129 | tracked_request_begin(&req, src->bs, src_offset, bytes, | |
3130 | BDRV_TRACKED_READ); | |
3131 | ||
09d2f948 VSO |
3132 | /* BDRV_REQ_SERIALISING is only for write operation */ |
3133 | assert(!(read_flags & BDRV_REQ_SERIALISING)); | |
c53cb427 | 3134 | bdrv_wait_serialising_requests(&req); |
999658a0 | 3135 | |
37aec7d7 FZ |
3136 | ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, |
3137 | src, src_offset, | |
3138 | dst, dst_offset, | |
67b51fb9 VSO |
3139 | bytes, |
3140 | read_flags, write_flags); | |
999658a0 VSO |
3141 | |
3142 | tracked_request_end(&req); | |
3143 | bdrv_dec_in_flight(src->bs); | |
fcc67678 | 3144 | } else { |
999658a0 VSO |
3145 | bdrv_inc_in_flight(dst->bs); |
3146 | tracked_request_begin(&req, dst->bs, dst_offset, bytes, | |
3147 | BDRV_TRACKED_WRITE); | |
0eb1e891 FZ |
3148 | ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req, |
3149 | write_flags); | |
3150 | if (!ret) { | |
3151 | ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs, | |
3152 | src, src_offset, | |
3153 | dst, dst_offset, | |
3154 | bytes, | |
3155 | read_flags, write_flags); | |
3156 | } | |
3157 | bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret); | |
999658a0 VSO |
3158 | tracked_request_end(&req); |
3159 | bdrv_dec_in_flight(dst->bs); | |
fcc67678 | 3160 | } |
999658a0 | 3161 | |
37aec7d7 | 3162 | return ret; |
fcc67678 FZ |
3163 | } |
3164 | ||
3165 | /* Copy range from @src to @dst. | |
3166 | * | |
3167 | * See the comment of bdrv_co_copy_range for the parameter and return value | |
3168 | * semantics. */ | |
3169 | int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset, | |
3170 | BdrvChild *dst, uint64_t dst_offset, | |
67b51fb9 VSO |
3171 | uint64_t bytes, |
3172 | BdrvRequestFlags read_flags, | |
3173 | BdrvRequestFlags write_flags) | |
fcc67678 | 3174 | { |
ecc983a5 FZ |
3175 | trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes, |
3176 | read_flags, write_flags); | |
fcc67678 | 3177 | return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, |
67b51fb9 | 3178 | bytes, read_flags, write_flags, true); |
fcc67678 FZ |
3179 | } |
3180 | ||
3181 | /* Copy range from @src to @dst. | |
3182 | * | |
3183 | * See the comment of bdrv_co_copy_range for the parameter and return value | |
3184 | * semantics. */ | |
3185 | int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset, | |
3186 | BdrvChild *dst, uint64_t dst_offset, | |
67b51fb9 VSO |
3187 | uint64_t bytes, |
3188 | BdrvRequestFlags read_flags, | |
3189 | BdrvRequestFlags write_flags) | |
fcc67678 | 3190 | { |
ecc983a5 FZ |
3191 | trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, |
3192 | read_flags, write_flags); | |
fcc67678 | 3193 | return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset, |
67b51fb9 | 3194 | bytes, read_flags, write_flags, false); |
fcc67678 FZ |
3195 | } |
3196 | ||
3197 | int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset, | |
3198 | BdrvChild *dst, uint64_t dst_offset, | |
67b51fb9 VSO |
3199 | uint64_t bytes, BdrvRequestFlags read_flags, |
3200 | BdrvRequestFlags write_flags) | |
fcc67678 | 3201 | { |
37aec7d7 FZ |
3202 | return bdrv_co_copy_range_from(src, src_offset, |
3203 | dst, dst_offset, | |
67b51fb9 | 3204 | bytes, read_flags, write_flags); |
fcc67678 | 3205 | } |
3d9f2d2a KW |
3206 | |
3207 | static void bdrv_parent_cb_resize(BlockDriverState *bs) | |
3208 | { | |
3209 | BdrvChild *c; | |
3210 | QLIST_FOREACH(c, &bs->parents, next_parent) { | |
bd86fb99 HR |
3211 | if (c->klass->resize) { |
3212 | c->klass->resize(c); | |
3d9f2d2a KW |
3213 | } |
3214 | } | |
3215 | } | |
3216 | ||
3217 | /** | |
3218 | * Truncate file to 'offset' bytes (needed only for file protocols) | |
c80d8b06 HR |
3219 | * |
3220 | * If 'exact' is true, the file must be resized to exactly the given | |
3221 | * 'offset'. Otherwise, it is sufficient for the node to be at least | |
3222 | * 'offset' bytes in length. | |
3d9f2d2a | 3223 | */ |
c80d8b06 | 3224 | int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, |
7b8e4857 KW |
3225 | PreallocMode prealloc, BdrvRequestFlags flags, |
3226 | Error **errp) | |
3d9f2d2a KW |
3227 | { |
3228 | BlockDriverState *bs = child->bs; | |
23b93525 | 3229 | BdrvChild *filtered, *backing; |
3d9f2d2a | 3230 | BlockDriver *drv = bs->drv; |
1bc5f09f KW |
3231 | BdrvTrackedRequest req; |
3232 | int64_t old_size, new_bytes; | |
3d9f2d2a KW |
3233 | int ret; |
3234 | ||
3d9f2d2a KW |
3235 | |
3236 | /* if bs->drv == NULL, bs is closed, so there's nothing to do here */ | |
3237 | if (!drv) { | |
3238 | error_setg(errp, "No medium inserted"); | |
3239 | return -ENOMEDIUM; | |
3240 | } | |
3241 | if (offset < 0) { | |
3242 | error_setg(errp, "Image size cannot be negative"); | |
3243 | return -EINVAL; | |
3244 | } | |
3245 | ||
8b117001 VSO |
3246 | ret = bdrv_check_request(offset, 0); |
3247 | if (ret < 0) { | |
3248 | error_setg(errp, "Required too big image size, it must be not greater " | |
3249 | "than %" PRId64, BDRV_MAX_LENGTH); | |
3250 | return ret; | |
3251 | } | |
3252 | ||
1bc5f09f KW |
3253 | old_size = bdrv_getlength(bs); |
3254 | if (old_size < 0) { | |
3255 | error_setg_errno(errp, -old_size, "Failed to get old image size"); | |
3256 | return old_size; | |
3257 | } | |
3258 | ||
3259 | if (offset > old_size) { | |
3260 | new_bytes = offset - old_size; | |
3261 | } else { | |
3262 | new_bytes = 0; | |
3263 | } | |
3264 | ||
3d9f2d2a | 3265 | bdrv_inc_in_flight(bs); |
5416a11e FZ |
3266 | tracked_request_begin(&req, bs, offset - new_bytes, new_bytes, |
3267 | BDRV_TRACKED_TRUNCATE); | |
1bc5f09f KW |
3268 | |
3269 | /* If we are growing the image and potentially using preallocation for the | |
3270 | * new area, we need to make sure that no write requests are made to it | |
3271 | * concurrently or they might be overwritten by preallocation. */ | |
3272 | if (new_bytes) { | |
8ac5aab2 | 3273 | bdrv_make_request_serialising(&req, 1); |
cd47d792 FZ |
3274 | } |
3275 | if (bs->read_only) { | |
3276 | error_setg(errp, "Image is read-only"); | |
3277 | ret = -EACCES; | |
3278 | goto out; | |
3279 | } | |
3280 | ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req, | |
3281 | 0); | |
3282 | if (ret < 0) { | |
3283 | error_setg_errno(errp, -ret, | |
3284 | "Failed to prepare request for truncation"); | |
3285 | goto out; | |
1bc5f09f | 3286 | } |
3d9f2d2a | 3287 | |
93393e69 | 3288 | filtered = bdrv_filter_child(bs); |
23b93525 | 3289 | backing = bdrv_cow_child(bs); |
93393e69 | 3290 | |
955c7d66 KW |
3291 | /* |
3292 | * If the image has a backing file that is large enough that it would | |
3293 | * provide data for the new area, we cannot leave it unallocated because | |
3294 | * then the backing file content would become visible. Instead, zero-fill | |
3295 | * the new area. | |
3296 | * | |
3297 | * Note that if the image has a backing file, but was opened without the | |
3298 | * backing file, taking care of keeping things consistent with that backing | |
3299 | * file is the user's responsibility. | |
3300 | */ | |
23b93525 | 3301 | if (new_bytes && backing) { |
955c7d66 KW |
3302 | int64_t backing_len; |
3303 | ||
23b93525 | 3304 | backing_len = bdrv_getlength(backing->bs); |
955c7d66 KW |
3305 | if (backing_len < 0) { |
3306 | ret = backing_len; | |
3307 | error_setg_errno(errp, -ret, "Could not get backing file size"); | |
3308 | goto out; | |
3309 | } | |
3310 | ||
3311 | if (backing_len > old_size) { | |
3312 | flags |= BDRV_REQ_ZERO_WRITE; | |
3313 | } | |
3314 | } | |
3315 | ||
6b7e8f8b | 3316 | if (drv->bdrv_co_truncate) { |
92b92799 KW |
3317 | if (flags & ~bs->supported_truncate_flags) { |
3318 | error_setg(errp, "Block driver does not support requested flags"); | |
3319 | ret = -ENOTSUP; | |
3320 | goto out; | |
3321 | } | |
3322 | ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp); | |
93393e69 HR |
3323 | } else if (filtered) { |
3324 | ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp); | |
6b7e8f8b | 3325 | } else { |
3d9f2d2a KW |
3326 | error_setg(errp, "Image format driver does not support resize"); |
3327 | ret = -ENOTSUP; | |
3328 | goto out; | |
3329 | } | |
3d9f2d2a KW |
3330 | if (ret < 0) { |
3331 | goto out; | |
3332 | } | |
6b7e8f8b | 3333 | |
3d9f2d2a KW |
3334 | ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); |
3335 | if (ret < 0) { | |
3336 | error_setg_errno(errp, -ret, "Could not refresh total sector count"); | |
3337 | } else { | |
3338 | offset = bs->total_sectors * BDRV_SECTOR_SIZE; | |
3339 | } | |
cd47d792 FZ |
3340 | /* It's possible that truncation succeeded but refresh_total_sectors |
3341 | * failed, but the latter doesn't affect how we should finish the request. | |
3342 | * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */ | |
3343 | bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0); | |
3d9f2d2a KW |
3344 | |
3345 | out: | |
1bc5f09f | 3346 | tracked_request_end(&req); |
3d9f2d2a | 3347 | bdrv_dec_in_flight(bs); |
1bc5f09f | 3348 | |
3d9f2d2a KW |
3349 | return ret; |
3350 | } |