]> Git Repo - qemu.git/blame - block/io.c
block: Guarantee that *file is set on bdrv_get_block_status()
[qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
61007b31 28#include "block/blockjob.h"
f321dcb5 29#include "block/blockjob_int.h"
61007b31 30#include "block/block_int.h"
f348b6d1 31#include "qemu/cutils.h"
da34e65c 32#include "qapi/error.h"
d49b6836 33#include "qemu/error-report.h"
61007b31
SH
34
35#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36
d05aa8bb 37static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
f5a5ca79 38 int64_t offset, int bytes, BdrvRequestFlags flags);
61007b31 39
14e9559f 40void bdrv_parent_drained_begin(BlockDriverState *bs)
61007b31 41{
c2066af0 42 BdrvChild *c;
27ccdd52 43
c2066af0
KW
44 QLIST_FOREACH(c, &bs->parents, next_parent) {
45 if (c->role->drained_begin) {
46 c->role->drained_begin(c);
47 }
ce0f1412
PB
48 }
49}
61007b31 50
14e9559f 51void bdrv_parent_drained_end(BlockDriverState *bs)
ce0f1412 52{
c2066af0 53 BdrvChild *c;
27ccdd52 54
c2066af0
KW
55 QLIST_FOREACH(c, &bs->parents, next_parent) {
56 if (c->role->drained_end) {
57 c->role->drained_end(c);
58 }
27ccdd52 59 }
61007b31
SH
60}
61
d9e0dfa2
EB
62static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
63{
64 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
65 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
66 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
67 src->opt_mem_alignment);
68 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
69 src->min_mem_alignment);
70 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
71}
72
61007b31
SH
73void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
74{
75 BlockDriver *drv = bs->drv;
76 Error *local_err = NULL;
77
78 memset(&bs->bl, 0, sizeof(bs->bl));
79
80 if (!drv) {
81 return;
82 }
83
79ba8c98 84 /* Default alignment based on whether driver has byte interface */
a5b8dd2c 85 bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
79ba8c98 86
61007b31
SH
87 /* Take some limits from the children as a default */
88 if (bs->file) {
9a4f4c31 89 bdrv_refresh_limits(bs->file->bs, &local_err);
61007b31
SH
90 if (local_err) {
91 error_propagate(errp, local_err);
92 return;
93 }
d9e0dfa2 94 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
61007b31 95 } else {
4196d2f0 96 bs->bl.min_mem_alignment = 512;
459b4e66 97 bs->bl.opt_mem_alignment = getpagesize();
bd44feb7
SH
98
99 /* Safe default since most protocols use readv()/writev()/etc */
100 bs->bl.max_iov = IOV_MAX;
61007b31
SH
101 }
102
760e0063
KW
103 if (bs->backing) {
104 bdrv_refresh_limits(bs->backing->bs, &local_err);
61007b31
SH
105 if (local_err) {
106 error_propagate(errp, local_err);
107 return;
108 }
d9e0dfa2 109 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
61007b31
SH
110 }
111
112 /* Then let the driver override it */
113 if (drv->bdrv_refresh_limits) {
114 drv->bdrv_refresh_limits(bs, errp);
115 }
116}
117
118/**
119 * The copy-on-read flag is actually a reference count so multiple users may
120 * use the feature without worrying about clobbering its previous state.
121 * Copy-on-read stays enabled until all users have called to disable it.
122 */
123void bdrv_enable_copy_on_read(BlockDriverState *bs)
124{
d3faa13e 125 atomic_inc(&bs->copy_on_read);
61007b31
SH
126}
127
128void bdrv_disable_copy_on_read(BlockDriverState *bs)
129{
d3faa13e
PB
130 int old = atomic_fetch_dec(&bs->copy_on_read);
131 assert(old >= 1);
61007b31
SH
132}
133
134/* Check if any requests are in-flight (including throttled requests) */
439db28c 135bool bdrv_requests_pending(BlockDriverState *bs)
61007b31 136{
37a639a7
KW
137 BdrvChild *child;
138
99723548 139 if (atomic_read(&bs->in_flight)) {
61007b31
SH
140 return true;
141 }
37a639a7
KW
142
143 QLIST_FOREACH(child, &bs->children, next) {
144 if (bdrv_requests_pending(child->bs)) {
145 return true;
146 }
61007b31 147 }
37a639a7 148
61007b31
SH
149 return false;
150}
151
d42cf288 152static bool bdrv_drain_recurse(BlockDriverState *bs)
67da1dc5 153{
178bd438 154 BdrvChild *child, *tmp;
d42cf288
PB
155 bool waited;
156
88b062c2 157 waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
67da1dc5
FZ
158
159 if (bs->drv && bs->drv->bdrv_drain) {
160 bs->drv->bdrv_drain(bs);
161 }
d42cf288 162
178bd438
FZ
163 QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
164 BlockDriverState *bs = child->bs;
165 bool in_main_loop =
166 qemu_get_current_aio_context() == qemu_get_aio_context();
167 assert(bs->refcnt > 0);
168 if (in_main_loop) {
169 /* In case the recursive bdrv_drain_recurse processes a
170 * block_job_defer_to_main_loop BH and modifies the graph,
171 * let's hold a reference to bs until we are done.
172 *
173 * IOThread doesn't have such a BH, and it is not safe to call
174 * bdrv_unref without BQL, so skip doing it there.
175 */
176 bdrv_ref(bs);
177 }
178 waited |= bdrv_drain_recurse(bs);
179 if (in_main_loop) {
180 bdrv_unref(bs);
181 }
67da1dc5 182 }
d42cf288
PB
183
184 return waited;
67da1dc5
FZ
185}
186
a77fd4bb
FZ
187typedef struct {
188 Coroutine *co;
189 BlockDriverState *bs;
a77fd4bb
FZ
190 bool done;
191} BdrvCoDrainData;
192
193static void bdrv_co_drain_bh_cb(void *opaque)
194{
195 BdrvCoDrainData *data = opaque;
196 Coroutine *co = data->co;
99723548 197 BlockDriverState *bs = data->bs;
a77fd4bb 198
99723548 199 bdrv_dec_in_flight(bs);
d42cf288 200 bdrv_drained_begin(bs);
a77fd4bb 201 data->done = true;
1919631e 202 aio_co_wake(co);
a77fd4bb
FZ
203}
204
b6e84c97 205static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
a77fd4bb
FZ
206{
207 BdrvCoDrainData data;
208
209 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210 * other coroutines run if they were queued from
211 * qemu_co_queue_run_restart(). */
212
213 assert(qemu_in_coroutine());
214 data = (BdrvCoDrainData) {
215 .co = qemu_coroutine_self(),
216 .bs = bs,
217 .done = false,
a77fd4bb 218 };
99723548 219 bdrv_inc_in_flight(bs);
fffb6e12
PB
220 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
221 bdrv_co_drain_bh_cb, &data);
a77fd4bb
FZ
222
223 qemu_coroutine_yield();
224 /* If we are resumed from some other event (such as an aio completion or a
225 * timer callback), it is a bug in the caller that should be fixed. */
226 assert(data.done);
227}
228
6820643f
KW
229void bdrv_drained_begin(BlockDriverState *bs)
230{
d42cf288
PB
231 if (qemu_in_coroutine()) {
232 bdrv_co_yield_to_drain(bs);
233 return;
234 }
235
414c2ec3 236 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
6820643f
KW
237 aio_disable_external(bdrv_get_aio_context(bs));
238 bdrv_parent_drained_begin(bs);
239 }
240
6820643f 241 bdrv_drain_recurse(bs);
6820643f
KW
242}
243
244void bdrv_drained_end(BlockDriverState *bs)
245{
246 assert(bs->quiesce_counter > 0);
414c2ec3 247 if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
6820643f
KW
248 return;
249 }
250
251 bdrv_parent_drained_end(bs);
252 aio_enable_external(bdrv_get_aio_context(bs));
253}
254
61007b31 255/*
67da1dc5
FZ
256 * Wait for pending requests to complete on a single BlockDriverState subtree,
257 * and suspend block driver's internal I/O until next request arrives.
61007b31 258 *
61007b31
SH
259 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
260 * AioContext.
7a63f3cd
SH
261 *
262 * Only this BlockDriverState's AioContext is run, so in-flight requests must
263 * not depend on events in other AioContexts. In that case, use
264 * bdrv_drain_all() instead.
61007b31 265 */
b6e84c97 266void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
61007b31 267{
6820643f
KW
268 assert(qemu_in_coroutine());
269 bdrv_drained_begin(bs);
270 bdrv_drained_end(bs);
b6e84c97 271}
f406c03c 272
b6e84c97
PB
273void bdrv_drain(BlockDriverState *bs)
274{
6820643f
KW
275 bdrv_drained_begin(bs);
276 bdrv_drained_end(bs);
61007b31
SH
277}
278
279/*
280 * Wait for pending requests to complete across all BlockDriverStates
281 *
282 * This function does not flush data to disk, use bdrv_flush_all() for that
283 * after calling this function.
c0778f66
AG
284 *
285 * This pauses all block jobs and disables external clients. It must
286 * be paired with bdrv_drain_all_end().
287 *
288 * NOTE: no new block jobs or BlockDriverStates can be created between
289 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
61007b31 290 */
c0778f66 291void bdrv_drain_all_begin(void)
61007b31
SH
292{
293 /* Always run first iteration so any pending completion BHs run */
99723548 294 bool waited = true;
7c8eece4 295 BlockDriverState *bs;
88be7b4b 296 BdrvNextIterator it;
f406c03c 297 GSList *aio_ctxs = NULL, *ctx;
61007b31 298
f321dcb5 299 block_job_pause_all();
eb1364ce 300
88be7b4b 301 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
61007b31
SH
302 AioContext *aio_context = bdrv_get_aio_context(bs);
303
304 aio_context_acquire(aio_context);
c2066af0 305 bdrv_parent_drained_begin(bs);
c0778f66 306 aio_disable_external(aio_context);
61007b31 307 aio_context_release(aio_context);
f406c03c 308
764ba3ae 309 if (!g_slist_find(aio_ctxs, aio_context)) {
f406c03c
AY
310 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
311 }
61007b31
SH
312 }
313
7a63f3cd
SH
314 /* Note that completion of an asynchronous I/O operation can trigger any
315 * number of other I/O operations on other devices---for example a
316 * coroutine can submit an I/O request to another device in response to
317 * request completion. Therefore we must keep looping until there was no
318 * more activity rather than simply draining each device independently.
319 */
99723548
PB
320 while (waited) {
321 waited = false;
61007b31 322
f406c03c
AY
323 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
324 AioContext *aio_context = ctx->data;
61007b31
SH
325
326 aio_context_acquire(aio_context);
88be7b4b 327 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
f406c03c 328 if (aio_context == bdrv_get_aio_context(bs)) {
d42cf288 329 waited |= bdrv_drain_recurse(bs);
f406c03c
AY
330 }
331 }
61007b31
SH
332 aio_context_release(aio_context);
333 }
334 }
335
c0778f66
AG
336 g_slist_free(aio_ctxs);
337}
338
339void bdrv_drain_all_end(void)
340{
341 BlockDriverState *bs;
342 BdrvNextIterator it;
c0778f66 343
88be7b4b 344 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
61007b31
SH
345 AioContext *aio_context = bdrv_get_aio_context(bs);
346
347 aio_context_acquire(aio_context);
c0778f66 348 aio_enable_external(aio_context);
c2066af0 349 bdrv_parent_drained_end(bs);
61007b31
SH
350 aio_context_release(aio_context);
351 }
eb1364ce 352
f321dcb5 353 block_job_resume_all();
61007b31
SH
354}
355
c0778f66
AG
356void bdrv_drain_all(void)
357{
358 bdrv_drain_all_begin();
359 bdrv_drain_all_end();
360}
361
61007b31
SH
362/**
363 * Remove an active request from the tracked requests list
364 *
365 * This function should be called when a tracked request is completing.
366 */
367static void tracked_request_end(BdrvTrackedRequest *req)
368{
369 if (req->serialising) {
20fc71b2 370 atomic_dec(&req->bs->serialising_in_flight);
61007b31
SH
371 }
372
3783fa3d 373 qemu_co_mutex_lock(&req->bs->reqs_lock);
61007b31
SH
374 QLIST_REMOVE(req, list);
375 qemu_co_queue_restart_all(&req->wait_queue);
3783fa3d 376 qemu_co_mutex_unlock(&req->bs->reqs_lock);
61007b31
SH
377}
378
379/**
380 * Add an active request to the tracked requests list
381 */
382static void tracked_request_begin(BdrvTrackedRequest *req,
383 BlockDriverState *bs,
384 int64_t offset,
ebde595c
FZ
385 unsigned int bytes,
386 enum BdrvTrackedRequestType type)
61007b31
SH
387{
388 *req = (BdrvTrackedRequest){
389 .bs = bs,
390 .offset = offset,
391 .bytes = bytes,
ebde595c 392 .type = type,
61007b31
SH
393 .co = qemu_coroutine_self(),
394 .serialising = false,
395 .overlap_offset = offset,
396 .overlap_bytes = bytes,
397 };
398
399 qemu_co_queue_init(&req->wait_queue);
400
3783fa3d 401 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31 402 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
3783fa3d 403 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
404}
405
406static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
407{
408 int64_t overlap_offset = req->offset & ~(align - 1);
409 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
410 - overlap_offset;
411
412 if (!req->serialising) {
20fc71b2 413 atomic_inc(&req->bs->serialising_in_flight);
61007b31
SH
414 req->serialising = true;
415 }
416
417 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
418 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
419}
420
421/**
244483e6 422 * Round a region to cluster boundaries (sector-based)
61007b31 423 */
244483e6
KW
424void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
425 int64_t sector_num, int nb_sectors,
426 int64_t *cluster_sector_num,
427 int *cluster_nb_sectors)
61007b31
SH
428{
429 BlockDriverInfo bdi;
430
431 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
432 *cluster_sector_num = sector_num;
433 *cluster_nb_sectors = nb_sectors;
434 } else {
435 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
436 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
437 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
438 nb_sectors, c);
439 }
440}
441
244483e6
KW
442/**
443 * Round a region to cluster boundaries
444 */
445void bdrv_round_to_clusters(BlockDriverState *bs,
446 int64_t offset, unsigned int bytes,
447 int64_t *cluster_offset,
448 unsigned int *cluster_bytes)
449{
450 BlockDriverInfo bdi;
451
452 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
453 *cluster_offset = offset;
454 *cluster_bytes = bytes;
455 } else {
456 int64_t c = bdi.cluster_size;
457 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
458 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
459 }
460}
461
61007b31
SH
462static int bdrv_get_cluster_size(BlockDriverState *bs)
463{
464 BlockDriverInfo bdi;
465 int ret;
466
467 ret = bdrv_get_info(bs, &bdi);
468 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 469 return bs->bl.request_alignment;
61007b31
SH
470 } else {
471 return bdi.cluster_size;
472 }
473}
474
475static bool tracked_request_overlaps(BdrvTrackedRequest *req,
476 int64_t offset, unsigned int bytes)
477{
478 /* aaaa bbbb */
479 if (offset >= req->overlap_offset + req->overlap_bytes) {
480 return false;
481 }
482 /* bbbb aaaa */
483 if (req->overlap_offset >= offset + bytes) {
484 return false;
485 }
486 return true;
487}
488
99723548
PB
489void bdrv_inc_in_flight(BlockDriverState *bs)
490{
491 atomic_inc(&bs->in_flight);
492}
493
c9d1a561
PB
494static void dummy_bh_cb(void *opaque)
495{
496}
497
498void bdrv_wakeup(BlockDriverState *bs)
499{
e2a6ae7f
PB
500 /* The barrier (or an atomic op) is in the caller. */
501 if (atomic_read(&bs->wakeup)) {
c9d1a561
PB
502 aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
503 }
504}
505
99723548
PB
506void bdrv_dec_in_flight(BlockDriverState *bs)
507{
508 atomic_dec(&bs->in_flight);
c9d1a561 509 bdrv_wakeup(bs);
99723548
PB
510}
511
61007b31
SH
512static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
513{
514 BlockDriverState *bs = self->bs;
515 BdrvTrackedRequest *req;
516 bool retry;
517 bool waited = false;
518
20fc71b2 519 if (!atomic_read(&bs->serialising_in_flight)) {
61007b31
SH
520 return false;
521 }
522
523 do {
524 retry = false;
3783fa3d 525 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31
SH
526 QLIST_FOREACH(req, &bs->tracked_requests, list) {
527 if (req == self || (!req->serialising && !self->serialising)) {
528 continue;
529 }
530 if (tracked_request_overlaps(req, self->overlap_offset,
531 self->overlap_bytes))
532 {
533 /* Hitting this means there was a reentrant request, for
534 * example, a block driver issuing nested requests. This must
535 * never happen since it means deadlock.
536 */
537 assert(qemu_coroutine_self() != req->co);
538
539 /* If the request is already (indirectly) waiting for us, or
540 * will wait for us as soon as it wakes up, then just go on
541 * (instead of producing a deadlock in the former case). */
542 if (!req->waiting_for) {
543 self->waiting_for = req;
3783fa3d 544 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
61007b31
SH
545 self->waiting_for = NULL;
546 retry = true;
547 waited = true;
548 break;
549 }
550 }
551 }
3783fa3d 552 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
553 } while (retry);
554
555 return waited;
556}
557
558static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
559 size_t size)
560{
561 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
562 return -EIO;
563 }
564
565 if (!bdrv_is_inserted(bs)) {
566 return -ENOMEDIUM;
567 }
568
569 if (offset < 0) {
570 return -EIO;
571 }
572
573 return 0;
574}
575
61007b31 576typedef struct RwCo {
e293b7a3 577 BdrvChild *child;
61007b31
SH
578 int64_t offset;
579 QEMUIOVector *qiov;
580 bool is_write;
581 int ret;
582 BdrvRequestFlags flags;
583} RwCo;
584
585static void coroutine_fn bdrv_rw_co_entry(void *opaque)
586{
587 RwCo *rwco = opaque;
588
589 if (!rwco->is_write) {
a03ef88f 590 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
cab3a356
KW
591 rwco->qiov->size, rwco->qiov,
592 rwco->flags);
61007b31 593 } else {
a03ef88f 594 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
cab3a356
KW
595 rwco->qiov->size, rwco->qiov,
596 rwco->flags);
61007b31
SH
597 }
598}
599
600/*
601 * Process a vectored synchronous request using coroutines
602 */
e293b7a3 603static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
61007b31
SH
604 QEMUIOVector *qiov, bool is_write,
605 BdrvRequestFlags flags)
606{
607 Coroutine *co;
608 RwCo rwco = {
e293b7a3 609 .child = child,
61007b31
SH
610 .offset = offset,
611 .qiov = qiov,
612 .is_write = is_write,
613 .ret = NOT_DONE,
614 .flags = flags,
615 };
616
61007b31
SH
617 if (qemu_in_coroutine()) {
618 /* Fast-path if already in coroutine context */
619 bdrv_rw_co_entry(&rwco);
620 } else {
0b8b8753 621 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
e92f0e19 622 bdrv_coroutine_enter(child->bs, co);
88b062c2 623 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
61007b31
SH
624 }
625 return rwco.ret;
626}
627
628/*
629 * Process a synchronous request using coroutines
630 */
e293b7a3 631static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
61007b31
SH
632 int nb_sectors, bool is_write, BdrvRequestFlags flags)
633{
634 QEMUIOVector qiov;
635 struct iovec iov = {
636 .iov_base = (void *)buf,
637 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
638 };
639
640 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
641 return -EINVAL;
642 }
643
644 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 645 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
61007b31
SH
646 &qiov, is_write, flags);
647}
648
649/* return < 0 if error. See bdrv_write() for the return codes */
fbcbbf4e 650int bdrv_read(BdrvChild *child, int64_t sector_num,
61007b31
SH
651 uint8_t *buf, int nb_sectors)
652{
e293b7a3 653 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
61007b31
SH
654}
655
61007b31
SH
656/* Return < 0 if error. Important errors are:
657 -EIO generic I/O error (may happen for all errors)
658 -ENOMEDIUM No media inserted.
659 -EINVAL Invalid sector number or nb_sectors
660 -EACCES Trying to write a read-only device
661*/
18d51c4b 662int bdrv_write(BdrvChild *child, int64_t sector_num,
61007b31
SH
663 const uint8_t *buf, int nb_sectors)
664{
e293b7a3 665 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
61007b31
SH
666}
667
720ff280 668int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
f5a5ca79 669 int bytes, BdrvRequestFlags flags)
61007b31 670{
74021bc4
EB
671 QEMUIOVector qiov;
672 struct iovec iov = {
673 .iov_base = NULL,
f5a5ca79 674 .iov_len = bytes,
74021bc4
EB
675 };
676
677 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 678 return bdrv_prwv_co(child, offset, &qiov, true,
74021bc4 679 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
680}
681
682/*
74021bc4 683 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
684 * The operation is sped up by checking the block status and only writing
685 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 686 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 687 * BDRV_REQ_FUA).
61007b31
SH
688 *
689 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
690 */
720ff280 691int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31
SH
692{
693 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
720ff280 694 BlockDriverState *bs = child->bs;
67a0fd2a 695 BlockDriverState *file;
61007b31
SH
696 int n;
697
698 target_sectors = bdrv_nb_sectors(bs);
699 if (target_sectors < 0) {
700 return target_sectors;
701 }
702
703 for (;;) {
704 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
705 if (nb_sectors <= 0) {
706 return 0;
707 }
67a0fd2a 708 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
61007b31
SH
709 if (ret < 0) {
710 error_report("error getting block status at sector %" PRId64 ": %s",
711 sector_num, strerror(-ret));
712 return ret;
713 }
714 if (ret & BDRV_BLOCK_ZERO) {
715 sector_num += n;
716 continue;
717 }
720ff280 718 ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
74021bc4 719 n << BDRV_SECTOR_BITS, flags);
61007b31
SH
720 if (ret < 0) {
721 error_report("error writing zeroes at sector %" PRId64 ": %s",
722 sector_num, strerror(-ret));
723 return ret;
724 }
725 sector_num += n;
726 }
727}
728
cf2ab8fc 729int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
f1e84741
KW
730{
731 int ret;
732
e293b7a3 733 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
f1e84741
KW
734 if (ret < 0) {
735 return ret;
736 }
737
738 return qiov->size;
739}
740
cf2ab8fc 741int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
61007b31
SH
742{
743 QEMUIOVector qiov;
744 struct iovec iov = {
745 .iov_base = (void *)buf,
746 .iov_len = bytes,
747 };
61007b31
SH
748
749 if (bytes < 0) {
750 return -EINVAL;
751 }
752
753 qemu_iovec_init_external(&qiov, &iov, 1);
cf2ab8fc 754 return bdrv_preadv(child, offset, &qiov);
61007b31
SH
755}
756
d9ca2ea2 757int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
61007b31
SH
758{
759 int ret;
760
e293b7a3 761 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
61007b31
SH
762 if (ret < 0) {
763 return ret;
764 }
765
766 return qiov->size;
767}
768
d9ca2ea2 769int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
61007b31
SH
770{
771 QEMUIOVector qiov;
772 struct iovec iov = {
773 .iov_base = (void *) buf,
774 .iov_len = bytes,
775 };
776
777 if (bytes < 0) {
778 return -EINVAL;
779 }
780
781 qemu_iovec_init_external(&qiov, &iov, 1);
d9ca2ea2 782 return bdrv_pwritev(child, offset, &qiov);
61007b31
SH
783}
784
785/*
786 * Writes to the file and ensures that no writes are reordered across this
787 * request (acts as a barrier)
788 *
789 * Returns 0 on success, -errno in error cases.
790 */
d9ca2ea2
KW
791int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
792 const void *buf, int count)
61007b31
SH
793{
794 int ret;
795
d9ca2ea2 796 ret = bdrv_pwrite(child, offset, buf, count);
61007b31
SH
797 if (ret < 0) {
798 return ret;
799 }
800
d9ca2ea2 801 ret = bdrv_flush(child->bs);
855a6a93
KW
802 if (ret < 0) {
803 return ret;
61007b31
SH
804 }
805
806 return 0;
807}
808
08844473
KW
809typedef struct CoroutineIOCompletion {
810 Coroutine *coroutine;
811 int ret;
812} CoroutineIOCompletion;
813
814static void bdrv_co_io_em_complete(void *opaque, int ret)
815{
816 CoroutineIOCompletion *co = opaque;
817
818 co->ret = ret;
b9e413dd 819 aio_co_wake(co->coroutine);
08844473
KW
820}
821
166fe960
KW
822static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
823 uint64_t offset, uint64_t bytes,
824 QEMUIOVector *qiov, int flags)
825{
826 BlockDriver *drv = bs->drv;
3fb06697
KW
827 int64_t sector_num;
828 unsigned int nb_sectors;
829
fa166538
EB
830 assert(!(flags & ~BDRV_REQ_MASK));
831
3fb06697
KW
832 if (drv->bdrv_co_preadv) {
833 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
834 }
835
836 sector_num = offset >> BDRV_SECTOR_BITS;
837 nb_sectors = bytes >> BDRV_SECTOR_BITS;
166fe960
KW
838
839 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
840 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
841 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
842
08844473
KW
843 if (drv->bdrv_co_readv) {
844 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
845 } else {
846 BlockAIOCB *acb;
847 CoroutineIOCompletion co = {
848 .coroutine = qemu_coroutine_self(),
849 };
850
851 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
852 bdrv_co_io_em_complete, &co);
853 if (acb == NULL) {
854 return -EIO;
855 } else {
856 qemu_coroutine_yield();
857 return co.ret;
858 }
859 }
166fe960
KW
860}
861
78a07294
KW
862static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
863 uint64_t offset, uint64_t bytes,
864 QEMUIOVector *qiov, int flags)
865{
866 BlockDriver *drv = bs->drv;
3fb06697
KW
867 int64_t sector_num;
868 unsigned int nb_sectors;
78a07294
KW
869 int ret;
870
fa166538
EB
871 assert(!(flags & ~BDRV_REQ_MASK));
872
3fb06697 873 if (drv->bdrv_co_pwritev) {
515c2f43
KW
874 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
875 flags & bs->supported_write_flags);
876 flags &= ~bs->supported_write_flags;
3fb06697
KW
877 goto emulate_flags;
878 }
879
880 sector_num = offset >> BDRV_SECTOR_BITS;
881 nb_sectors = bytes >> BDRV_SECTOR_BITS;
882
78a07294
KW
883 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
884 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
885 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
886
887 if (drv->bdrv_co_writev_flags) {
888 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
4df863f3
EB
889 flags & bs->supported_write_flags);
890 flags &= ~bs->supported_write_flags;
08844473 891 } else if (drv->bdrv_co_writev) {
4df863f3 892 assert(!bs->supported_write_flags);
78a07294 893 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
08844473
KW
894 } else {
895 BlockAIOCB *acb;
896 CoroutineIOCompletion co = {
897 .coroutine = qemu_coroutine_self(),
898 };
899
900 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
901 bdrv_co_io_em_complete, &co);
902 if (acb == NULL) {
3fb06697 903 ret = -EIO;
08844473
KW
904 } else {
905 qemu_coroutine_yield();
3fb06697 906 ret = co.ret;
08844473 907 }
78a07294
KW
908 }
909
3fb06697 910emulate_flags:
4df863f3 911 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
78a07294
KW
912 ret = bdrv_co_flush(bs);
913 }
914
915 return ret;
916}
917
29a298af
PB
918static int coroutine_fn
919bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
920 uint64_t bytes, QEMUIOVector *qiov)
921{
922 BlockDriver *drv = bs->drv;
923
924 if (!drv->bdrv_co_pwritev_compressed) {
925 return -ENOTSUP;
926 }
927
29a298af
PB
928 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
929}
930
85c97ca7 931static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
244483e6 932 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
61007b31 933{
85c97ca7
KW
934 BlockDriverState *bs = child->bs;
935
61007b31
SH
936 /* Perform I/O through a temporary buffer so that users who scribble over
937 * their read buffer while the operation is in progress do not end up
938 * modifying the image file. This is critical for zero-copy guest I/O
939 * where anything might happen inside guest memory.
940 */
941 void *bounce_buffer;
942
943 BlockDriver *drv = bs->drv;
944 struct iovec iov;
945 QEMUIOVector bounce_qiov;
244483e6
KW
946 int64_t cluster_offset;
947 unsigned int cluster_bytes;
61007b31
SH
948 size_t skip_bytes;
949 int ret;
950
1bf03e66
KW
951 /* FIXME We cannot require callers to have write permissions when all they
952 * are doing is a read request. If we did things right, write permissions
953 * would be obtained anyway, but internally by the copy-on-read code. As
954 * long as it is implemented here rather than in a separat filter driver,
955 * the copy-on-read code doesn't have its own BdrvChild, however, for which
956 * it could request permissions. Therefore we have to bypass the permission
957 * system for the moment. */
958 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
afa4b293 959
61007b31
SH
960 /* Cover entire cluster so no additional backing file I/O is required when
961 * allocating cluster in the image file.
962 */
244483e6 963 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
61007b31 964
244483e6
KW
965 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
966 cluster_offset, cluster_bytes);
61007b31 967
244483e6 968 iov.iov_len = cluster_bytes;
61007b31
SH
969 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
970 if (bounce_buffer == NULL) {
971 ret = -ENOMEM;
972 goto err;
973 }
974
975 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
976
244483e6 977 ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
166fe960 978 &bounce_qiov, 0);
61007b31
SH
979 if (ret < 0) {
980 goto err;
981 }
982
c1499a5e 983 if (drv->bdrv_co_pwrite_zeroes &&
61007b31 984 buffer_is_zero(bounce_buffer, iov.iov_len)) {
a604fa2b
EB
985 /* FIXME: Should we (perhaps conditionally) be setting
986 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
987 * that still correctly reads as zero? */
244483e6 988 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
61007b31
SH
989 } else {
990 /* This does not change the data on the disk, it is not necessary
991 * to flush even in cache=writethrough mode.
992 */
244483e6 993 ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
78a07294 994 &bounce_qiov, 0);
61007b31
SH
995 }
996
997 if (ret < 0) {
998 /* It might be okay to ignore write errors for guest requests. If this
999 * is a deliberate copy-on-read then we don't want to ignore the error.
1000 * Simply report it in all cases.
1001 */
1002 goto err;
1003 }
1004
244483e6
KW
1005 skip_bytes = offset - cluster_offset;
1006 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
61007b31
SH
1007
1008err:
1009 qemu_vfree(bounce_buffer);
1010 return ret;
1011}
1012
1013/*
1014 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
1015 * handles copy on read, zeroing after EOF, and fragmentation of large
1016 * reads; any other features must be implemented by the caller.
61007b31 1017 */
85c97ca7 1018static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
61007b31
SH
1019 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1020 int64_t align, QEMUIOVector *qiov, int flags)
1021{
85c97ca7 1022 BlockDriverState *bs = child->bs;
c9d20029 1023 int64_t total_bytes, max_bytes;
1a62d0ac
EB
1024 int ret = 0;
1025 uint64_t bytes_remaining = bytes;
1026 int max_transfer;
61007b31 1027
49c07526
KW
1028 assert(is_power_of_2(align));
1029 assert((offset & (align - 1)) == 0);
1030 assert((bytes & (align - 1)) == 0);
61007b31 1031 assert(!qiov || bytes == qiov->size);
abb06c5a 1032 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
1033 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1034 align);
a604fa2b
EB
1035
1036 /* TODO: We would need a per-BDS .supported_read_flags and
1037 * potential fallback support, if we ever implement any read flags
1038 * to pass through to drivers. For now, there aren't any
1039 * passthrough flags. */
1040 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
61007b31
SH
1041
1042 /* Handle Copy on Read and associated serialisation */
1043 if (flags & BDRV_REQ_COPY_ON_READ) {
1044 /* If we touch the same cluster it counts as an overlap. This
1045 * guarantees that allocating writes will be serialized and not race
1046 * with each other for the same cluster. For example, in copy-on-read
1047 * it ensures that the CoR read and write operations are atomic and
1048 * guest writes cannot interleave between them. */
1049 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1050 }
1051
61408b25
FZ
1052 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1053 wait_serialising_requests(req);
1054 }
61007b31
SH
1055
1056 if (flags & BDRV_REQ_COPY_ON_READ) {
49c07526
KW
1057 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1058 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1059 unsigned int nb_sectors = end_sector - start_sector;
61007b31
SH
1060 int pnum;
1061
49c07526 1062 ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
61007b31
SH
1063 if (ret < 0) {
1064 goto out;
1065 }
1066
1067 if (!ret || pnum != nb_sectors) {
85c97ca7 1068 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
61007b31
SH
1069 goto out;
1070 }
1071 }
1072
1a62d0ac 1073 /* Forward the request to the BlockDriver, possibly fragmenting it */
c9d20029
KW
1074 total_bytes = bdrv_getlength(bs);
1075 if (total_bytes < 0) {
1076 ret = total_bytes;
1077 goto out;
1078 }
61007b31 1079
c9d20029 1080 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1081 if (bytes <= max_bytes && bytes <= max_transfer) {
c9d20029 1082 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1a62d0ac
EB
1083 goto out;
1084 }
61007b31 1085
1a62d0ac
EB
1086 while (bytes_remaining) {
1087 int num;
61007b31 1088
1a62d0ac
EB
1089 if (max_bytes) {
1090 QEMUIOVector local_qiov;
61007b31 1091
1a62d0ac
EB
1092 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1093 assert(num);
1094 qemu_iovec_init(&local_qiov, qiov->niov);
1095 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
61007b31 1096
1a62d0ac
EB
1097 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1098 num, &local_qiov, 0);
1099 max_bytes -= num;
1100 qemu_iovec_destroy(&local_qiov);
1101 } else {
1102 num = bytes_remaining;
1103 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1104 bytes_remaining);
1105 }
1106 if (ret < 0) {
1107 goto out;
1108 }
1109 bytes_remaining -= num;
61007b31
SH
1110 }
1111
1112out:
1a62d0ac 1113 return ret < 0 ? ret : 0;
61007b31
SH
1114}
1115
61007b31
SH
1116/*
1117 * Handle a read request in coroutine context
1118 */
a03ef88f 1119int coroutine_fn bdrv_co_preadv(BdrvChild *child,
61007b31
SH
1120 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1121 BdrvRequestFlags flags)
1122{
a03ef88f 1123 BlockDriverState *bs = child->bs;
61007b31
SH
1124 BlockDriver *drv = bs->drv;
1125 BdrvTrackedRequest req;
1126
a5b8dd2c 1127 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1128 uint8_t *head_buf = NULL;
1129 uint8_t *tail_buf = NULL;
1130 QEMUIOVector local_qiov;
1131 bool use_local_qiov = false;
1132 int ret;
1133
1134 if (!drv) {
1135 return -ENOMEDIUM;
1136 }
1137
1138 ret = bdrv_check_byte_request(bs, offset, bytes);
1139 if (ret < 0) {
1140 return ret;
1141 }
1142
99723548
PB
1143 bdrv_inc_in_flight(bs);
1144
9568b511 1145 /* Don't do copy-on-read if we read data before write operation */
d3faa13e 1146 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
61007b31
SH
1147 flags |= BDRV_REQ_COPY_ON_READ;
1148 }
1149
61007b31
SH
1150 /* Align read if necessary by padding qiov */
1151 if (offset & (align - 1)) {
1152 head_buf = qemu_blockalign(bs, align);
1153 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1154 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1155 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1156 use_local_qiov = true;
1157
1158 bytes += offset & (align - 1);
1159 offset = offset & ~(align - 1);
1160 }
1161
1162 if ((offset + bytes) & (align - 1)) {
1163 if (!use_local_qiov) {
1164 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1165 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1166 use_local_qiov = true;
1167 }
1168 tail_buf = qemu_blockalign(bs, align);
1169 qemu_iovec_add(&local_qiov, tail_buf,
1170 align - ((offset + bytes) & (align - 1)));
1171
1172 bytes = ROUND_UP(bytes, align);
1173 }
1174
ebde595c 1175 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
85c97ca7 1176 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
61007b31
SH
1177 use_local_qiov ? &local_qiov : qiov,
1178 flags);
1179 tracked_request_end(&req);
99723548 1180 bdrv_dec_in_flight(bs);
61007b31
SH
1181
1182 if (use_local_qiov) {
1183 qemu_iovec_destroy(&local_qiov);
1184 qemu_vfree(head_buf);
1185 qemu_vfree(tail_buf);
1186 }
1187
1188 return ret;
1189}
1190
adad6496 1191static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
61007b31
SH
1192 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1193 BdrvRequestFlags flags)
1194{
1195 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1196 return -EINVAL;
1197 }
1198
a03ef88f 1199 return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
cab3a356 1200 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
61007b31
SH
1201}
1202
28b04a8f
KW
1203int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1204 int nb_sectors, QEMUIOVector *qiov)
61007b31 1205{
28b04a8f 1206 trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
61007b31 1207
adad6496 1208 return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
61007b31
SH
1209}
1210
5def6b80
EB
1211/* Maximum buffer for write zeroes fallback, in bytes */
1212#define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
61007b31 1213
d05aa8bb 1214static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
f5a5ca79 1215 int64_t offset, int bytes, BdrvRequestFlags flags)
61007b31
SH
1216{
1217 BlockDriver *drv = bs->drv;
1218 QEMUIOVector qiov;
1219 struct iovec iov = {0};
1220 int ret = 0;
465fe887 1221 bool need_flush = false;
443668ca
DL
1222 int head = 0;
1223 int tail = 0;
61007b31 1224
cf081fca 1225 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
a5b8dd2c
EB
1226 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1227 bs->bl.request_alignment);
b2f95fee
EB
1228 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1229 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
d05aa8bb 1230
b8d0a980
EB
1231 assert(alignment % bs->bl.request_alignment == 0);
1232 head = offset % alignment;
f5a5ca79 1233 tail = (offset + bytes) % alignment;
b8d0a980
EB
1234 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1235 assert(max_write_zeroes >= bs->bl.request_alignment);
61007b31 1236
f5a5ca79
MP
1237 while (bytes > 0 && !ret) {
1238 int num = bytes;
61007b31
SH
1239
1240 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1241 * to be aligned, and that unaligned requests do not cross cluster
1242 * boundaries.
61007b31 1243 */
443668ca 1244 if (head) {
b2f95fee
EB
1245 /* Make a small request up to the first aligned sector. For
1246 * convenience, limit this request to max_transfer even if
1247 * we don't need to fall back to writes. */
f5a5ca79 1248 num = MIN(MIN(bytes, max_transfer), alignment - head);
b2f95fee
EB
1249 head = (head + num) % alignment;
1250 assert(num < max_write_zeroes);
d05aa8bb 1251 } else if (tail && num > alignment) {
443668ca
DL
1252 /* Shorten the request to the last aligned sector. */
1253 num -= tail;
61007b31
SH
1254 }
1255
1256 /* limit request size */
1257 if (num > max_write_zeroes) {
1258 num = max_write_zeroes;
1259 }
1260
1261 ret = -ENOTSUP;
1262 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1263 if (drv->bdrv_co_pwrite_zeroes) {
1264 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1265 flags & bs->supported_zero_flags);
1266 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1267 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1268 need_flush = true;
1269 }
465fe887
EB
1270 } else {
1271 assert(!bs->supported_zero_flags);
61007b31
SH
1272 }
1273
1274 if (ret == -ENOTSUP) {
1275 /* Fall back to bounce buffer if write zeroes is unsupported */
465fe887
EB
1276 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1277
1278 if ((flags & BDRV_REQ_FUA) &&
1279 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1280 /* No need for bdrv_driver_pwrite() to do a fallback
1281 * flush on each chunk; use just one at the end */
1282 write_flags &= ~BDRV_REQ_FUA;
1283 need_flush = true;
1284 }
5def6b80 1285 num = MIN(num, max_transfer);
d05aa8bb 1286 iov.iov_len = num;
61007b31 1287 if (iov.iov_base == NULL) {
d05aa8bb 1288 iov.iov_base = qemu_try_blockalign(bs, num);
61007b31
SH
1289 if (iov.iov_base == NULL) {
1290 ret = -ENOMEM;
1291 goto fail;
1292 }
d05aa8bb 1293 memset(iov.iov_base, 0, num);
61007b31
SH
1294 }
1295 qemu_iovec_init_external(&qiov, &iov, 1);
1296
d05aa8bb 1297 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
61007b31
SH
1298
1299 /* Keep bounce buffer around if it is big enough for all
1300 * all future requests.
1301 */
5def6b80 1302 if (num < max_transfer) {
61007b31
SH
1303 qemu_vfree(iov.iov_base);
1304 iov.iov_base = NULL;
1305 }
1306 }
1307
d05aa8bb 1308 offset += num;
f5a5ca79 1309 bytes -= num;
61007b31
SH
1310 }
1311
1312fail:
465fe887
EB
1313 if (ret == 0 && need_flush) {
1314 ret = bdrv_co_flush(bs);
1315 }
61007b31
SH
1316 qemu_vfree(iov.iov_base);
1317 return ret;
1318}
1319
1320/*
04ed95f4
EB
1321 * Forwards an already correctly aligned write request to the BlockDriver,
1322 * after possibly fragmenting it.
61007b31 1323 */
85c97ca7 1324static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
61007b31 1325 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
cff86b38 1326 int64_t align, QEMUIOVector *qiov, int flags)
61007b31 1327{
85c97ca7 1328 BlockDriverState *bs = child->bs;
61007b31
SH
1329 BlockDriver *drv = bs->drv;
1330 bool waited;
1331 int ret;
1332
9896c876
KW
1333 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1334 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
04ed95f4
EB
1335 uint64_t bytes_remaining = bytes;
1336 int max_transfer;
61007b31 1337
cff86b38
EB
1338 assert(is_power_of_2(align));
1339 assert((offset & (align - 1)) == 0);
1340 assert((bytes & (align - 1)) == 0);
61007b31 1341 assert(!qiov || bytes == qiov->size);
abb06c5a 1342 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
fa166538 1343 assert(!(flags & ~BDRV_REQ_MASK));
04ed95f4
EB
1344 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1345 align);
61007b31
SH
1346
1347 waited = wait_serialising_requests(req);
1348 assert(!waited || !req->serialising);
1349 assert(req->overlap_offset <= offset);
1350 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
362b3786
HR
1351 assert(child->perm & BLK_PERM_WRITE);
1352 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
61007b31
SH
1353
1354 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1355
1356 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 1357 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
61007b31
SH
1358 qemu_iovec_is_zero(qiov)) {
1359 flags |= BDRV_REQ_ZERO_WRITE;
1360 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1361 flags |= BDRV_REQ_MAY_UNMAP;
1362 }
1363 }
1364
1365 if (ret < 0) {
1366 /* Do nothing, write notifier decided to fail this request */
1367 } else if (flags & BDRV_REQ_ZERO_WRITE) {
9a4f4c31 1368 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 1369 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
3ea1a091
PB
1370 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1371 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
04ed95f4 1372 } else if (bytes <= max_transfer) {
9a4f4c31 1373 bdrv_debug_event(bs, BLKDBG_PWRITEV);
78a07294 1374 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
04ed95f4
EB
1375 } else {
1376 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1377 while (bytes_remaining) {
1378 int num = MIN(bytes_remaining, max_transfer);
1379 QEMUIOVector local_qiov;
1380 int local_flags = flags;
1381
1382 assert(num);
1383 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1384 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1385 /* If FUA is going to be emulated by flush, we only
1386 * need to flush on the last iteration */
1387 local_flags &= ~BDRV_REQ_FUA;
1388 }
1389 qemu_iovec_init(&local_qiov, qiov->niov);
1390 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1391
1392 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1393 num, &local_qiov, local_flags);
1394 qemu_iovec_destroy(&local_qiov);
1395 if (ret < 0) {
1396 break;
1397 }
1398 bytes_remaining -= num;
1399 }
61007b31 1400 }
9a4f4c31 1401 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 1402
47fec599 1403 atomic_inc(&bs->write_gen);
9896c876 1404 bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
61007b31 1405
f7946da2 1406 stat64_max(&bs->wr_highest_offset, offset + bytes);
61007b31
SH
1407
1408 if (ret >= 0) {
9896c876 1409 bs->total_sectors = MAX(bs->total_sectors, end_sector);
04ed95f4 1410 ret = 0;
61007b31
SH
1411 }
1412
1413 return ret;
1414}
1415
85c97ca7 1416static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
9eeb6dd1
FZ
1417 int64_t offset,
1418 unsigned int bytes,
1419 BdrvRequestFlags flags,
1420 BdrvTrackedRequest *req)
1421{
85c97ca7 1422 BlockDriverState *bs = child->bs;
9eeb6dd1
FZ
1423 uint8_t *buf = NULL;
1424 QEMUIOVector local_qiov;
1425 struct iovec iov;
a5b8dd2c 1426 uint64_t align = bs->bl.request_alignment;
9eeb6dd1
FZ
1427 unsigned int head_padding_bytes, tail_padding_bytes;
1428 int ret = 0;
1429
1430 head_padding_bytes = offset & (align - 1);
f13ce1be 1431 tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
9eeb6dd1
FZ
1432
1433
1434 assert(flags & BDRV_REQ_ZERO_WRITE);
1435 if (head_padding_bytes || tail_padding_bytes) {
1436 buf = qemu_blockalign(bs, align);
1437 iov = (struct iovec) {
1438 .iov_base = buf,
1439 .iov_len = align,
1440 };
1441 qemu_iovec_init_external(&local_qiov, &iov, 1);
1442 }
1443 if (head_padding_bytes) {
1444 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1445
1446 /* RMW the unaligned part before head. */
1447 mark_request_serialising(req, align);
1448 wait_serialising_requests(req);
9a4f4c31 1449 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
85c97ca7 1450 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
9eeb6dd1
FZ
1451 align, &local_qiov, 0);
1452 if (ret < 0) {
1453 goto fail;
1454 }
9a4f4c31 1455 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
9eeb6dd1
FZ
1456
1457 memset(buf + head_padding_bytes, 0, zero_bytes);
85c97ca7 1458 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
cff86b38 1459 align, &local_qiov,
9eeb6dd1
FZ
1460 flags & ~BDRV_REQ_ZERO_WRITE);
1461 if (ret < 0) {
1462 goto fail;
1463 }
1464 offset += zero_bytes;
1465 bytes -= zero_bytes;
1466 }
1467
1468 assert(!bytes || (offset & (align - 1)) == 0);
1469 if (bytes >= align) {
1470 /* Write the aligned part in the middle. */
1471 uint64_t aligned_bytes = bytes & ~(align - 1);
85c97ca7 1472 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
9eeb6dd1
FZ
1473 NULL, flags);
1474 if (ret < 0) {
1475 goto fail;
1476 }
1477 bytes -= aligned_bytes;
1478 offset += aligned_bytes;
1479 }
1480
1481 assert(!bytes || (offset & (align - 1)) == 0);
1482 if (bytes) {
1483 assert(align == tail_padding_bytes + bytes);
1484 /* RMW the unaligned part after tail. */
1485 mark_request_serialising(req, align);
1486 wait_serialising_requests(req);
9a4f4c31 1487 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
85c97ca7 1488 ret = bdrv_aligned_preadv(child, req, offset, align,
9eeb6dd1
FZ
1489 align, &local_qiov, 0);
1490 if (ret < 0) {
1491 goto fail;
1492 }
9a4f4c31 1493 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
9eeb6dd1
FZ
1494
1495 memset(buf, 0, bytes);
85c97ca7 1496 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
9eeb6dd1
FZ
1497 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1498 }
1499fail:
1500 qemu_vfree(buf);
1501 return ret;
1502
1503}
1504
61007b31
SH
1505/*
1506 * Handle a write request in coroutine context
1507 */
a03ef88f 1508int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
61007b31
SH
1509 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1510 BdrvRequestFlags flags)
1511{
a03ef88f 1512 BlockDriverState *bs = child->bs;
61007b31 1513 BdrvTrackedRequest req;
a5b8dd2c 1514 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1515 uint8_t *head_buf = NULL;
1516 uint8_t *tail_buf = NULL;
1517 QEMUIOVector local_qiov;
1518 bool use_local_qiov = false;
1519 int ret;
1520
1521 if (!bs->drv) {
1522 return -ENOMEDIUM;
1523 }
1524 if (bs->read_only) {
eaf5fe2d 1525 return -EPERM;
61007b31 1526 }
04c01a5c 1527 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31
SH
1528
1529 ret = bdrv_check_byte_request(bs, offset, bytes);
1530 if (ret < 0) {
1531 return ret;
1532 }
1533
99723548 1534 bdrv_inc_in_flight(bs);
61007b31
SH
1535 /*
1536 * Align write if necessary by performing a read-modify-write cycle.
1537 * Pad qiov with the read parts and be sure to have a tracked request not
1538 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1539 */
ebde595c 1540 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 1541
9eeb6dd1 1542 if (!qiov) {
85c97ca7 1543 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
9eeb6dd1
FZ
1544 goto out;
1545 }
1546
61007b31
SH
1547 if (offset & (align - 1)) {
1548 QEMUIOVector head_qiov;
1549 struct iovec head_iov;
1550
1551 mark_request_serialising(&req, align);
1552 wait_serialising_requests(&req);
1553
1554 head_buf = qemu_blockalign(bs, align);
1555 head_iov = (struct iovec) {
1556 .iov_base = head_buf,
1557 .iov_len = align,
1558 };
1559 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1560
9a4f4c31 1561 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
85c97ca7 1562 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
61007b31
SH
1563 align, &head_qiov, 0);
1564 if (ret < 0) {
1565 goto fail;
1566 }
9a4f4c31 1567 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
61007b31
SH
1568
1569 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1570 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1571 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1572 use_local_qiov = true;
1573
1574 bytes += offset & (align - 1);
1575 offset = offset & ~(align - 1);
117bc3fa
PL
1576
1577 /* We have read the tail already if the request is smaller
1578 * than one aligned block.
1579 */
1580 if (bytes < align) {
1581 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1582 bytes = align;
1583 }
61007b31
SH
1584 }
1585
1586 if ((offset + bytes) & (align - 1)) {
1587 QEMUIOVector tail_qiov;
1588 struct iovec tail_iov;
1589 size_t tail_bytes;
1590 bool waited;
1591
1592 mark_request_serialising(&req, align);
1593 waited = wait_serialising_requests(&req);
1594 assert(!waited || !use_local_qiov);
1595
1596 tail_buf = qemu_blockalign(bs, align);
1597 tail_iov = (struct iovec) {
1598 .iov_base = tail_buf,
1599 .iov_len = align,
1600 };
1601 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1602
9a4f4c31 1603 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
85c97ca7
KW
1604 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1605 align, align, &tail_qiov, 0);
61007b31
SH
1606 if (ret < 0) {
1607 goto fail;
1608 }
9a4f4c31 1609 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
61007b31
SH
1610
1611 if (!use_local_qiov) {
1612 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1613 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1614 use_local_qiov = true;
1615 }
1616
1617 tail_bytes = (offset + bytes) & (align - 1);
1618 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1619
1620 bytes = ROUND_UP(bytes, align);
1621 }
1622
85c97ca7 1623 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
3ea1a091
PB
1624 use_local_qiov ? &local_qiov : qiov,
1625 flags);
61007b31
SH
1626
1627fail:
61007b31
SH
1628
1629 if (use_local_qiov) {
1630 qemu_iovec_destroy(&local_qiov);
1631 }
1632 qemu_vfree(head_buf);
1633 qemu_vfree(tail_buf);
9eeb6dd1
FZ
1634out:
1635 tracked_request_end(&req);
99723548 1636 bdrv_dec_in_flight(bs);
61007b31
SH
1637 return ret;
1638}
1639
adad6496 1640static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
61007b31
SH
1641 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1642 BdrvRequestFlags flags)
1643{
1644 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1645 return -EINVAL;
1646 }
1647
a03ef88f 1648 return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
cab3a356 1649 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
61007b31
SH
1650}
1651
25ec177d 1652int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
61007b31
SH
1653 int nb_sectors, QEMUIOVector *qiov)
1654{
25ec177d 1655 trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
61007b31 1656
adad6496 1657 return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
61007b31
SH
1658}
1659
a03ef88f 1660int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
f5a5ca79 1661 int bytes, BdrvRequestFlags flags)
61007b31 1662{
f5a5ca79 1663 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
61007b31 1664
a03ef88f 1665 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
1666 flags &= ~BDRV_REQ_MAY_UNMAP;
1667 }
61007b31 1668
f5a5ca79 1669 return bdrv_co_pwritev(child, offset, bytes, NULL,
74021bc4 1670 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
1671}
1672
4085f5c7
JS
1673/*
1674 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1675 */
1676int bdrv_flush_all(void)
1677{
1678 BdrvNextIterator it;
1679 BlockDriverState *bs = NULL;
1680 int result = 0;
1681
1682 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1683 AioContext *aio_context = bdrv_get_aio_context(bs);
1684 int ret;
1685
1686 aio_context_acquire(aio_context);
1687 ret = bdrv_flush(bs);
1688 if (ret < 0 && !result) {
1689 result = ret;
1690 }
1691 aio_context_release(aio_context);
1692 }
1693
1694 return result;
1695}
1696
1697
61007b31
SH
1698typedef struct BdrvCoGetBlockStatusData {
1699 BlockDriverState *bs;
1700 BlockDriverState *base;
67a0fd2a 1701 BlockDriverState **file;
61007b31
SH
1702 int64_t sector_num;
1703 int nb_sectors;
1704 int *pnum;
1705 int64_t ret;
1706 bool done;
1707} BdrvCoGetBlockStatusData;
1708
1709/*
1710 * Returns the allocation status of the specified sectors.
1711 * Drivers not implementing the functionality are assumed to not support
1712 * backing files, hence all their sectors are reported as allocated.
1713 *
fb0d8654
EB
1714 * If 'sector_num' is beyond the end of the disk image the return value is
1715 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
61007b31
SH
1716 *
1717 * 'pnum' is set to the number of sectors (including and immediately following
1718 * the specified sector) that are known to be in the same
1719 * allocated/unallocated state.
1720 *
1721 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
fb0d8654
EB
1722 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1723 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
67a0fd2a
FZ
1724 *
1725 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1726 * points to the BDS which the sector range is allocated in.
61007b31
SH
1727 */
1728static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1729 int64_t sector_num,
67a0fd2a
FZ
1730 int nb_sectors, int *pnum,
1731 BlockDriverState **file)
61007b31
SH
1732{
1733 int64_t total_sectors;
1734 int64_t n;
1735 int64_t ret, ret2;
1736
81c219ac 1737 *file = NULL;
61007b31
SH
1738 total_sectors = bdrv_nb_sectors(bs);
1739 if (total_sectors < 0) {
1740 return total_sectors;
1741 }
1742
1743 if (sector_num >= total_sectors) {
1744 *pnum = 0;
fb0d8654 1745 return BDRV_BLOCK_EOF;
61007b31
SH
1746 }
1747
1748 n = total_sectors - sector_num;
1749 if (n < nb_sectors) {
1750 nb_sectors = n;
1751 }
1752
1753 if (!bs->drv->bdrv_co_get_block_status) {
1754 *pnum = nb_sectors;
1755 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
fb0d8654
EB
1756 if (sector_num + nb_sectors == total_sectors) {
1757 ret |= BDRV_BLOCK_EOF;
1758 }
61007b31
SH
1759 if (bs->drv->protocol_name) {
1760 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
81c219ac 1761 *file = bs;
61007b31
SH
1762 }
1763 return ret;
1764 }
1765
99723548 1766 bdrv_inc_in_flight(bs);
67a0fd2a
FZ
1767 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1768 file);
61007b31
SH
1769 if (ret < 0) {
1770 *pnum = 0;
99723548 1771 goto out;
61007b31
SH
1772 }
1773
1774 if (ret & BDRV_BLOCK_RAW) {
81c219ac 1775 assert(ret & BDRV_BLOCK_OFFSET_VALID && *file);
ee29d6ad
EB
1776 ret = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1777 *pnum, pnum, file);
99723548 1778 goto out;
61007b31
SH
1779 }
1780
1781 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1782 ret |= BDRV_BLOCK_ALLOCATED;
a53f1a95 1783 } else {
61007b31
SH
1784 if (bdrv_unallocated_blocks_are_zero(bs)) {
1785 ret |= BDRV_BLOCK_ZERO;
760e0063
KW
1786 } else if (bs->backing) {
1787 BlockDriverState *bs2 = bs->backing->bs;
61007b31
SH
1788 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1789 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1790 ret |= BDRV_BLOCK_ZERO;
1791 }
1792 }
1793 }
1794
ac987b30 1795 if (*file && *file != bs &&
61007b31
SH
1796 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1797 (ret & BDRV_BLOCK_OFFSET_VALID)) {
67a0fd2a 1798 BlockDriverState *file2;
61007b31
SH
1799 int file_pnum;
1800
ac987b30 1801 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
67a0fd2a 1802 *pnum, &file_pnum, &file2);
61007b31
SH
1803 if (ret2 >= 0) {
1804 /* Ignore errors. This is just providing extra information, it
1805 * is useful but not necessary.
1806 */
c61e684e
EB
1807 if (ret2 & BDRV_BLOCK_EOF &&
1808 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
1809 /*
1810 * It is valid for the format block driver to read
1811 * beyond the end of the underlying file's current
1812 * size; such areas read as zero.
1813 */
61007b31
SH
1814 ret |= BDRV_BLOCK_ZERO;
1815 } else {
1816 /* Limit request to the range reported by the protocol driver */
1817 *pnum = file_pnum;
1818 ret |= (ret2 & BDRV_BLOCK_ZERO);
1819 }
1820 }
1821 }
1822
99723548
PB
1823out:
1824 bdrv_dec_in_flight(bs);
fb0d8654
EB
1825 if (ret >= 0 && sector_num + *pnum == total_sectors) {
1826 ret |= BDRV_BLOCK_EOF;
1827 }
61007b31
SH
1828 return ret;
1829}
1830
ba3f0e25
FZ
1831static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1832 BlockDriverState *base,
1833 int64_t sector_num,
1834 int nb_sectors,
67a0fd2a
FZ
1835 int *pnum,
1836 BlockDriverState **file)
ba3f0e25
FZ
1837{
1838 BlockDriverState *p;
1839 int64_t ret = 0;
c61e684e 1840 bool first = true;
ba3f0e25
FZ
1841
1842 assert(bs != base);
760e0063 1843 for (p = bs; p != base; p = backing_bs(p)) {
67a0fd2a 1844 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
c61e684e
EB
1845 if (ret < 0) {
1846 break;
1847 }
1848 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
1849 /*
1850 * Reading beyond the end of the file continues to read
1851 * zeroes, but we can only widen the result to the
1852 * unallocated length we learned from an earlier
1853 * iteration.
1854 */
1855 *pnum = nb_sectors;
1856 }
1857 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
ba3f0e25
FZ
1858 break;
1859 }
1860 /* [sector_num, pnum] unallocated on this layer, which could be only
1861 * the first part of [sector_num, nb_sectors]. */
1862 nb_sectors = MIN(nb_sectors, *pnum);
c61e684e 1863 first = false;
ba3f0e25
FZ
1864 }
1865 return ret;
1866}
1867
1868/* Coroutine wrapper for bdrv_get_block_status_above() */
1869static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
61007b31
SH
1870{
1871 BdrvCoGetBlockStatusData *data = opaque;
61007b31 1872
ba3f0e25
FZ
1873 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1874 data->sector_num,
1875 data->nb_sectors,
67a0fd2a
FZ
1876 data->pnum,
1877 data->file);
61007b31
SH
1878 data->done = true;
1879}
1880
1881/*
ba3f0e25 1882 * Synchronous wrapper around bdrv_co_get_block_status_above().
61007b31 1883 *
ba3f0e25 1884 * See bdrv_co_get_block_status_above() for details.
61007b31 1885 */
ba3f0e25
FZ
1886int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1887 BlockDriverState *base,
1888 int64_t sector_num,
67a0fd2a
FZ
1889 int nb_sectors, int *pnum,
1890 BlockDriverState **file)
61007b31
SH
1891{
1892 Coroutine *co;
1893 BdrvCoGetBlockStatusData data = {
1894 .bs = bs,
ba3f0e25 1895 .base = base,
67a0fd2a 1896 .file = file,
61007b31
SH
1897 .sector_num = sector_num,
1898 .nb_sectors = nb_sectors,
1899 .pnum = pnum,
1900 .done = false,
1901 };
1902
1903 if (qemu_in_coroutine()) {
1904 /* Fast-path if already in coroutine context */
ba3f0e25 1905 bdrv_get_block_status_above_co_entry(&data);
61007b31 1906 } else {
0b8b8753
PB
1907 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
1908 &data);
e92f0e19 1909 bdrv_coroutine_enter(bs, co);
88b062c2 1910 BDRV_POLL_WHILE(bs, !data.done);
61007b31
SH
1911 }
1912 return data.ret;
1913}
1914
ba3f0e25
FZ
1915int64_t bdrv_get_block_status(BlockDriverState *bs,
1916 int64_t sector_num,
67a0fd2a
FZ
1917 int nb_sectors, int *pnum,
1918 BlockDriverState **file)
ba3f0e25 1919{
760e0063 1920 return bdrv_get_block_status_above(bs, backing_bs(bs),
67a0fd2a 1921 sector_num, nb_sectors, pnum, file);
ba3f0e25
FZ
1922}
1923
61007b31
SH
1924int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1925 int nb_sectors, int *pnum)
1926{
67a0fd2a
FZ
1927 BlockDriverState *file;
1928 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1929 &file);
61007b31
SH
1930 if (ret < 0) {
1931 return ret;
1932 }
1933 return !!(ret & BDRV_BLOCK_ALLOCATED);
1934}
1935
1936/*
1937 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1938 *
1939 * Return true if the given sector is allocated in any image between
1940 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1941 * sector is allocated in any image of the chain. Return false otherwise.
1942 *
1943 * 'pnum' is set to the number of sectors (including and immediately following
1944 * the specified sector) that are known to be in the same
1945 * allocated/unallocated state.
1946 *
1947 */
1948int bdrv_is_allocated_above(BlockDriverState *top,
1949 BlockDriverState *base,
1950 int64_t sector_num,
1951 int nb_sectors, int *pnum)
1952{
1953 BlockDriverState *intermediate;
1954 int ret, n = nb_sectors;
1955
1956 intermediate = top;
1957 while (intermediate && intermediate != base) {
1958 int pnum_inter;
1959 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1960 &pnum_inter);
1961 if (ret < 0) {
1962 return ret;
1963 } else if (ret) {
1964 *pnum = pnum_inter;
1965 return 1;
1966 }
1967
1968 /*
1969 * [sector_num, nb_sectors] is unallocated on top but intermediate
1970 * might have
1971 *
1972 * [sector_num+x, nr_sectors] allocated.
1973 */
1974 if (n > pnum_inter &&
1975 (intermediate == top ||
1976 sector_num + pnum_inter < intermediate->total_sectors)) {
1977 n = pnum_inter;
1978 }
1979
760e0063 1980 intermediate = backing_bs(intermediate);
61007b31
SH
1981 }
1982
1983 *pnum = n;
1984 return 0;
1985}
1986
1a8ae822
KW
1987typedef struct BdrvVmstateCo {
1988 BlockDriverState *bs;
1989 QEMUIOVector *qiov;
1990 int64_t pos;
1991 bool is_read;
1992 int ret;
1993} BdrvVmstateCo;
1994
1995static int coroutine_fn
1996bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1997 bool is_read)
1998{
1999 BlockDriver *drv = bs->drv;
dc88a467
SH
2000 int ret = -ENOTSUP;
2001
2002 bdrv_inc_in_flight(bs);
1a8ae822
KW
2003
2004 if (!drv) {
dc88a467 2005 ret = -ENOMEDIUM;
1a8ae822 2006 } else if (drv->bdrv_load_vmstate) {
dc88a467
SH
2007 if (is_read) {
2008 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2009 } else {
2010 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2011 }
1a8ae822 2012 } else if (bs->file) {
dc88a467 2013 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1a8ae822
KW
2014 }
2015
dc88a467
SH
2016 bdrv_dec_in_flight(bs);
2017 return ret;
1a8ae822
KW
2018}
2019
2020static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2021{
2022 BdrvVmstateCo *co = opaque;
2023 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2024}
2025
2026static inline int
2027bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2028 bool is_read)
2029{
2030 if (qemu_in_coroutine()) {
2031 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2032 } else {
2033 BdrvVmstateCo data = {
2034 .bs = bs,
2035 .qiov = qiov,
2036 .pos = pos,
2037 .is_read = is_read,
2038 .ret = -EINPROGRESS,
2039 };
0b8b8753 2040 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1a8ae822 2041
e92f0e19 2042 bdrv_coroutine_enter(bs, co);
ea17c9d2 2043 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
1a8ae822
KW
2044 return data.ret;
2045 }
2046}
2047
61007b31
SH
2048int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2049 int64_t pos, int size)
2050{
2051 QEMUIOVector qiov;
2052 struct iovec iov = {
2053 .iov_base = (void *) buf,
2054 .iov_len = size,
2055 };
b433d942 2056 int ret;
61007b31
SH
2057
2058 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
2059
2060 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2061 if (ret < 0) {
2062 return ret;
2063 }
2064
2065 return size;
61007b31
SH
2066}
2067
2068int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2069{
1a8ae822 2070 return bdrv_rw_vmstate(bs, qiov, pos, false);
61007b31
SH
2071}
2072
2073int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2074 int64_t pos, int size)
5ddda0b8
KW
2075{
2076 QEMUIOVector qiov;
2077 struct iovec iov = {
2078 .iov_base = buf,
2079 .iov_len = size,
2080 };
b433d942 2081 int ret;
5ddda0b8
KW
2082
2083 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
2084 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2085 if (ret < 0) {
2086 return ret;
2087 }
2088
2089 return size;
5ddda0b8
KW
2090}
2091
2092int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2093{
1a8ae822 2094 return bdrv_rw_vmstate(bs, qiov, pos, true);
61007b31
SH
2095}
2096
2097/**************************************************************/
2098/* async I/Os */
2099
61007b31
SH
2100void bdrv_aio_cancel(BlockAIOCB *acb)
2101{
2102 qemu_aio_ref(acb);
2103 bdrv_aio_cancel_async(acb);
2104 while (acb->refcnt > 1) {
2105 if (acb->aiocb_info->get_aio_context) {
2106 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2107 } else if (acb->bs) {
2f47da5f
PB
2108 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2109 * assert that we're not using an I/O thread. Thread-safe
2110 * code should use bdrv_aio_cancel_async exclusively.
2111 */
2112 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
61007b31
SH
2113 aio_poll(bdrv_get_aio_context(acb->bs), true);
2114 } else {
2115 abort();
2116 }
2117 }
2118 qemu_aio_unref(acb);
2119}
2120
2121/* Async version of aio cancel. The caller is not blocked if the acb implements
2122 * cancel_async, otherwise we do nothing and let the request normally complete.
2123 * In either case the completion callback must be called. */
2124void bdrv_aio_cancel_async(BlockAIOCB *acb)
2125{
2126 if (acb->aiocb_info->cancel_async) {
2127 acb->aiocb_info->cancel_async(acb);
2128 }
2129}
2130
61007b31
SH
2131/**************************************************************/
2132/* Coroutine block device emulation */
2133
e293b7a3
KW
2134typedef struct FlushCo {
2135 BlockDriverState *bs;
2136 int ret;
2137} FlushCo;
2138
2139
61007b31
SH
2140static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2141{
e293b7a3 2142 FlushCo *rwco = opaque;
61007b31
SH
2143
2144 rwco->ret = bdrv_co_flush(rwco->bs);
2145}
2146
2147int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2148{
49ca6259
FZ
2149 int current_gen;
2150 int ret = 0;
2151
2152 bdrv_inc_in_flight(bs);
61007b31 2153
e914404e 2154 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
1b6bc94d 2155 bdrv_is_sg(bs)) {
49ca6259 2156 goto early_exit;
61007b31
SH
2157 }
2158
3783fa3d 2159 qemu_co_mutex_lock(&bs->reqs_lock);
47fec599 2160 current_gen = atomic_read(&bs->write_gen);
3ff2f67a
EY
2161
2162 /* Wait until any previous flushes are completed */
99723548 2163 while (bs->active_flush_req) {
3783fa3d 2164 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
3ff2f67a
EY
2165 }
2166
3783fa3d 2167 /* Flushes reach this point in nondecreasing current_gen order. */
99723548 2168 bs->active_flush_req = true;
3783fa3d 2169 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2170
c32b82af
PD
2171 /* Write back all layers by calling one driver function */
2172 if (bs->drv->bdrv_co_flush) {
2173 ret = bs->drv->bdrv_co_flush(bs);
2174 goto out;
2175 }
2176
61007b31
SH
2177 /* Write back cached data to the OS even with cache=unsafe */
2178 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2179 if (bs->drv->bdrv_co_flush_to_os) {
2180 ret = bs->drv->bdrv_co_flush_to_os(bs);
2181 if (ret < 0) {
cdb5e315 2182 goto out;
61007b31
SH
2183 }
2184 }
2185
2186 /* But don't actually force it to the disk with cache=unsafe */
2187 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2188 goto flush_parent;
2189 }
2190
3ff2f67a
EY
2191 /* Check if we really need to flush anything */
2192 if (bs->flushed_gen == current_gen) {
2193 goto flush_parent;
2194 }
2195
61007b31
SH
2196 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2197 if (bs->drv->bdrv_co_flush_to_disk) {
2198 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2199 } else if (bs->drv->bdrv_aio_flush) {
2200 BlockAIOCB *acb;
2201 CoroutineIOCompletion co = {
2202 .coroutine = qemu_coroutine_self(),
2203 };
2204
2205 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2206 if (acb == NULL) {
2207 ret = -EIO;
2208 } else {
2209 qemu_coroutine_yield();
2210 ret = co.ret;
2211 }
2212 } else {
2213 /*
2214 * Some block drivers always operate in either writethrough or unsafe
2215 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2216 * know how the server works (because the behaviour is hardcoded or
2217 * depends on server-side configuration), so we can't ensure that
2218 * everything is safe on disk. Returning an error doesn't work because
2219 * that would break guests even if the server operates in writethrough
2220 * mode.
2221 *
2222 * Let's hope the user knows what he's doing.
2223 */
2224 ret = 0;
2225 }
3ff2f67a 2226
61007b31 2227 if (ret < 0) {
cdb5e315 2228 goto out;
61007b31
SH
2229 }
2230
2231 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2232 * in the case of cache=unsafe, so there are no useless flushes.
2233 */
2234flush_parent:
cdb5e315
FZ
2235 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2236out:
3ff2f67a 2237 /* Notify any pending flushes that we have completed */
e6af1e08
KW
2238 if (ret == 0) {
2239 bs->flushed_gen = current_gen;
2240 }
3783fa3d
PB
2241
2242 qemu_co_mutex_lock(&bs->reqs_lock);
99723548 2243 bs->active_flush_req = false;
156af3ac
DL
2244 /* Return value is ignored - it's ok if wait queue is empty */
2245 qemu_co_queue_next(&bs->flush_queue);
3783fa3d 2246 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2247
49ca6259 2248early_exit:
99723548 2249 bdrv_dec_in_flight(bs);
cdb5e315 2250 return ret;
61007b31
SH
2251}
2252
2253int bdrv_flush(BlockDriverState *bs)
2254{
2255 Coroutine *co;
e293b7a3 2256 FlushCo flush_co = {
61007b31
SH
2257 .bs = bs,
2258 .ret = NOT_DONE,
2259 };
2260
2261 if (qemu_in_coroutine()) {
2262 /* Fast-path if already in coroutine context */
e293b7a3 2263 bdrv_flush_co_entry(&flush_co);
61007b31 2264 } else {
0b8b8753 2265 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
e92f0e19 2266 bdrv_coroutine_enter(bs, co);
88b062c2 2267 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
61007b31
SH
2268 }
2269
e293b7a3 2270 return flush_co.ret;
61007b31
SH
2271}
2272
2273typedef struct DiscardCo {
2274 BlockDriverState *bs;
0c51a893 2275 int64_t offset;
f5a5ca79 2276 int bytes;
61007b31
SH
2277 int ret;
2278} DiscardCo;
0c51a893 2279static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
61007b31
SH
2280{
2281 DiscardCo *rwco = opaque;
2282
f5a5ca79 2283 rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->bytes);
61007b31
SH
2284}
2285
9f1963b3 2286int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
f5a5ca79 2287 int bytes)
61007b31 2288{
b1066c87 2289 BdrvTrackedRequest req;
9f1963b3 2290 int max_pdiscard, ret;
3482b9bc 2291 int head, tail, align;
61007b31
SH
2292
2293 if (!bs->drv) {
2294 return -ENOMEDIUM;
2295 }
2296
f5a5ca79 2297 ret = bdrv_check_byte_request(bs, offset, bytes);
61007b31
SH
2298 if (ret < 0) {
2299 return ret;
2300 } else if (bs->read_only) {
eaf5fe2d 2301 return -EPERM;
61007b31 2302 }
04c01a5c 2303 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31 2304
61007b31
SH
2305 /* Do nothing if disabled. */
2306 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2307 return 0;
2308 }
2309
02aefe43 2310 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
61007b31
SH
2311 return 0;
2312 }
2313
3482b9bc
EB
2314 /* Discard is advisory, but some devices track and coalesce
2315 * unaligned requests, so we must pass everything down rather than
2316 * round here. Still, most devices will just silently ignore
2317 * unaligned requests (by returning -ENOTSUP), so we must fragment
2318 * the request accordingly. */
02aefe43 2319 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
b8d0a980
EB
2320 assert(align % bs->bl.request_alignment == 0);
2321 head = offset % align;
f5a5ca79 2322 tail = (offset + bytes) % align;
9f1963b3 2323
99723548 2324 bdrv_inc_in_flight(bs);
f5a5ca79 2325 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
50824995 2326
ec050f77
DL
2327 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2328 if (ret < 0) {
2329 goto out;
2330 }
2331
9f1963b3
EB
2332 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2333 align);
3482b9bc 2334 assert(max_pdiscard >= bs->bl.request_alignment);
61007b31 2335
f5a5ca79 2336 while (bytes > 0) {
9f1963b3 2337 int ret;
f5a5ca79 2338 int num = bytes;
3482b9bc
EB
2339
2340 if (head) {
2341 /* Make small requests to get to alignment boundaries. */
f5a5ca79 2342 num = MIN(bytes, align - head);
3482b9bc
EB
2343 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2344 num %= bs->bl.request_alignment;
2345 }
2346 head = (head + num) % align;
2347 assert(num < max_pdiscard);
2348 } else if (tail) {
2349 if (num > align) {
2350 /* Shorten the request to the last aligned cluster. */
2351 num -= tail;
2352 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2353 tail > bs->bl.request_alignment) {
2354 tail %= bs->bl.request_alignment;
2355 num -= tail;
2356 }
2357 }
2358 /* limit request size */
2359 if (num > max_pdiscard) {
2360 num = max_pdiscard;
2361 }
61007b31 2362
47a5486d
EB
2363 if (bs->drv->bdrv_co_pdiscard) {
2364 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
61007b31
SH
2365 } else {
2366 BlockAIOCB *acb;
2367 CoroutineIOCompletion co = {
2368 .coroutine = qemu_coroutine_self(),
2369 };
2370
4da444a0
EB
2371 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2372 bdrv_co_io_em_complete, &co);
61007b31 2373 if (acb == NULL) {
b1066c87
FZ
2374 ret = -EIO;
2375 goto out;
61007b31
SH
2376 } else {
2377 qemu_coroutine_yield();
2378 ret = co.ret;
2379 }
2380 }
2381 if (ret && ret != -ENOTSUP) {
b1066c87 2382 goto out;
61007b31
SH
2383 }
2384
9f1963b3 2385 offset += num;
f5a5ca79 2386 bytes -= num;
61007b31 2387 }
b1066c87
FZ
2388 ret = 0;
2389out:
47fec599 2390 atomic_inc(&bs->write_gen);
968d8b06
DL
2391 bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2392 req.bytes >> BDRV_SECTOR_BITS);
b1066c87 2393 tracked_request_end(&req);
99723548 2394 bdrv_dec_in_flight(bs);
b1066c87 2395 return ret;
61007b31
SH
2396}
2397
f5a5ca79 2398int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
61007b31
SH
2399{
2400 Coroutine *co;
2401 DiscardCo rwco = {
2402 .bs = bs,
0c51a893 2403 .offset = offset,
f5a5ca79 2404 .bytes = bytes,
61007b31
SH
2405 .ret = NOT_DONE,
2406 };
2407
2408 if (qemu_in_coroutine()) {
2409 /* Fast-path if already in coroutine context */
0c51a893 2410 bdrv_pdiscard_co_entry(&rwco);
61007b31 2411 } else {
0c51a893 2412 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
e92f0e19 2413 bdrv_coroutine_enter(bs, co);
88b062c2 2414 BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
61007b31
SH
2415 }
2416
2417 return rwco.ret;
2418}
2419
48af776a 2420int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
2421{
2422 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
2423 CoroutineIOCompletion co = {
2424 .coroutine = qemu_coroutine_self(),
2425 };
2426 BlockAIOCB *acb;
61007b31 2427
99723548 2428 bdrv_inc_in_flight(bs);
16a389dc 2429 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
5c5ae76a
FZ
2430 co.ret = -ENOTSUP;
2431 goto out;
2432 }
2433
16a389dc
KW
2434 if (drv->bdrv_co_ioctl) {
2435 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2436 } else {
2437 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2438 if (!acb) {
2439 co.ret = -ENOTSUP;
2440 goto out;
2441 }
2442 qemu_coroutine_yield();
5c5ae76a 2443 }
5c5ae76a 2444out:
99723548 2445 bdrv_dec_in_flight(bs);
5c5ae76a
FZ
2446 return co.ret;
2447}
2448
61007b31
SH
2449void *qemu_blockalign(BlockDriverState *bs, size_t size)
2450{
2451 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2452}
2453
2454void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2455{
2456 return memset(qemu_blockalign(bs, size), 0, size);
2457}
2458
2459void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2460{
2461 size_t align = bdrv_opt_mem_align(bs);
2462
2463 /* Ensure that NULL is never returned on success */
2464 assert(align > 0);
2465 if (size == 0) {
2466 size = align;
2467 }
2468
2469 return qemu_try_memalign(align, size);
2470}
2471
2472void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2473{
2474 void *mem = qemu_try_blockalign(bs, size);
2475
2476 if (mem) {
2477 memset(mem, 0, size);
2478 }
2479
2480 return mem;
2481}
2482
2483/*
2484 * Check if all memory in this vector is sector aligned.
2485 */
2486bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2487{
2488 int i;
4196d2f0 2489 size_t alignment = bdrv_min_mem_align(bs);
61007b31
SH
2490
2491 for (i = 0; i < qiov->niov; i++) {
2492 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2493 return false;
2494 }
2495 if (qiov->iov[i].iov_len % alignment) {
2496 return false;
2497 }
2498 }
2499
2500 return true;
2501}
2502
2503void bdrv_add_before_write_notifier(BlockDriverState *bs,
2504 NotifierWithReturn *notifier)
2505{
2506 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2507}
2508
2509void bdrv_io_plug(BlockDriverState *bs)
2510{
6b98bd64
PB
2511 BdrvChild *child;
2512
2513 QLIST_FOREACH(child, &bs->children, next) {
2514 bdrv_io_plug(child->bs);
2515 }
2516
850d54a2 2517 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
6b98bd64
PB
2518 BlockDriver *drv = bs->drv;
2519 if (drv && drv->bdrv_io_plug) {
2520 drv->bdrv_io_plug(bs);
2521 }
61007b31
SH
2522 }
2523}
2524
2525void bdrv_io_unplug(BlockDriverState *bs)
2526{
6b98bd64
PB
2527 BdrvChild *child;
2528
2529 assert(bs->io_plugged);
850d54a2 2530 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
6b98bd64
PB
2531 BlockDriver *drv = bs->drv;
2532 if (drv && drv->bdrv_io_unplug) {
2533 drv->bdrv_io_unplug(bs);
2534 }
2535 }
2536
2537 QLIST_FOREACH(child, &bs->children, next) {
2538 bdrv_io_unplug(child->bs);
61007b31
SH
2539 }
2540}
This page took 0.578162 seconds and 4 git commands to generate.