]> Git Repo - qemu.git/blob - block/io.c
block: Add @drained_end_counter
[qemu.git] / block / io.c
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
35
36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
37
38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
40
41 static void bdrv_parent_cb_resize(BlockDriverState *bs);
42 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
43     int64_t offset, int bytes, BdrvRequestFlags flags);
44
45 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
46                                bool ignore_bds_parents)
47 {
48     BdrvChild *c, *next;
49
50     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
51         if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
52             continue;
53         }
54         bdrv_parent_drained_begin_single(c, false);
55     }
56 }
57
58 void bdrv_parent_drained_end_single(BdrvChild *c)
59 {
60     assert(c->parent_quiesce_counter > 0);
61     c->parent_quiesce_counter--;
62     if (c->role->drained_end) {
63         c->role->drained_end(c);
64     }
65 }
66
67 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
68                              bool ignore_bds_parents)
69 {
70     BdrvChild *c, *next;
71
72     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
73         if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
74             continue;
75         }
76         bdrv_parent_drained_end_single(c);
77     }
78 }
79
80 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
81 {
82     if (c->role->drained_poll) {
83         return c->role->drained_poll(c);
84     }
85     return false;
86 }
87
88 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
89                                      bool ignore_bds_parents)
90 {
91     BdrvChild *c, *next;
92     bool busy = false;
93
94     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
95         if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
96             continue;
97         }
98         busy |= bdrv_parent_drained_poll_single(c);
99     }
100
101     return busy;
102 }
103
104 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
105 {
106     c->parent_quiesce_counter++;
107     if (c->role->drained_begin) {
108         c->role->drained_begin(c);
109     }
110     if (poll) {
111         BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
112     }
113 }
114
115 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
116 {
117     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
118     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
119     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
120                                  src->opt_mem_alignment);
121     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
122                                  src->min_mem_alignment);
123     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
124 }
125
126 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
127 {
128     BlockDriver *drv = bs->drv;
129     Error *local_err = NULL;
130
131     memset(&bs->bl, 0, sizeof(bs->bl));
132
133     if (!drv) {
134         return;
135     }
136
137     /* Default alignment based on whether driver has byte interface */
138     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
139                                 drv->bdrv_aio_preadv) ? 1 : 512;
140
141     /* Take some limits from the children as a default */
142     if (bs->file) {
143         bdrv_refresh_limits(bs->file->bs, &local_err);
144         if (local_err) {
145             error_propagate(errp, local_err);
146             return;
147         }
148         bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
149     } else {
150         bs->bl.min_mem_alignment = 512;
151         bs->bl.opt_mem_alignment = getpagesize();
152
153         /* Safe default since most protocols use readv()/writev()/etc */
154         bs->bl.max_iov = IOV_MAX;
155     }
156
157     if (bs->backing) {
158         bdrv_refresh_limits(bs->backing->bs, &local_err);
159         if (local_err) {
160             error_propagate(errp, local_err);
161             return;
162         }
163         bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
164     }
165
166     /* Then let the driver override it */
167     if (drv->bdrv_refresh_limits) {
168         drv->bdrv_refresh_limits(bs, errp);
169     }
170 }
171
172 /**
173  * The copy-on-read flag is actually a reference count so multiple users may
174  * use the feature without worrying about clobbering its previous state.
175  * Copy-on-read stays enabled until all users have called to disable it.
176  */
177 void bdrv_enable_copy_on_read(BlockDriverState *bs)
178 {
179     atomic_inc(&bs->copy_on_read);
180 }
181
182 void bdrv_disable_copy_on_read(BlockDriverState *bs)
183 {
184     int old = atomic_fetch_dec(&bs->copy_on_read);
185     assert(old >= 1);
186 }
187
188 typedef struct {
189     Coroutine *co;
190     BlockDriverState *bs;
191     bool done;
192     bool begin;
193     bool recursive;
194     bool poll;
195     BdrvChild *parent;
196     bool ignore_bds_parents;
197     int *drained_end_counter;
198 } BdrvCoDrainData;
199
200 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
201 {
202     BdrvCoDrainData *data = opaque;
203     BlockDriverState *bs = data->bs;
204
205     if (data->begin) {
206         bs->drv->bdrv_co_drain_begin(bs);
207     } else {
208         bs->drv->bdrv_co_drain_end(bs);
209     }
210
211     /* Set data->done before reading bs->wakeup.  */
212     atomic_mb_set(&data->done, true);
213     bdrv_dec_in_flight(bs);
214
215     if (data->drained_end_counter) {
216         atomic_dec(data->drained_end_counter);
217     }
218
219     if (data->begin || data->drained_end_counter) {
220         g_free(data);
221     }
222 }
223
224 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
225 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
226                               int *drained_end_counter)
227 {
228     BdrvCoDrainData *data;
229
230     if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
231             (!begin && !bs->drv->bdrv_co_drain_end)) {
232         return;
233     }
234
235     data = g_new(BdrvCoDrainData, 1);
236     *data = (BdrvCoDrainData) {
237         .bs = bs,
238         .done = false,
239         .begin = begin,
240         .drained_end_counter = drained_end_counter,
241     };
242
243     if (!begin && drained_end_counter) {
244         atomic_inc(drained_end_counter);
245     }
246
247     /* Make sure the driver callback completes during the polling phase for
248      * drain_begin. */
249     bdrv_inc_in_flight(bs);
250     data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
251     aio_co_schedule(bdrv_get_aio_context(bs), data->co);
252
253     /*
254      * TODO: Drop this and make callers pass @drained_end_counter and poll
255      * themselves
256      */
257     if (!begin && !drained_end_counter) {
258         BDRV_POLL_WHILE(bs, !data->done);
259         g_free(data);
260     }
261 }
262
263 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
264 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
265                      BdrvChild *ignore_parent, bool ignore_bds_parents)
266 {
267     BdrvChild *child, *next;
268
269     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
270         return true;
271     }
272
273     if (atomic_read(&bs->in_flight)) {
274         return true;
275     }
276
277     if (recursive) {
278         assert(!ignore_bds_parents);
279         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
280             if (bdrv_drain_poll(child->bs, recursive, child, false)) {
281                 return true;
282             }
283         }
284     }
285
286     return false;
287 }
288
289 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
290                                       BdrvChild *ignore_parent)
291 {
292     return bdrv_drain_poll(bs, recursive, ignore_parent, false);
293 }
294
295 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
296                                   BdrvChild *parent, bool ignore_bds_parents,
297                                   bool poll);
298 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
299                                 BdrvChild *parent, bool ignore_bds_parents,
300                                 int *drained_end_counter);
301
302 static void bdrv_co_drain_bh_cb(void *opaque)
303 {
304     BdrvCoDrainData *data = opaque;
305     Coroutine *co = data->co;
306     BlockDriverState *bs = data->bs;
307
308     if (bs) {
309         AioContext *ctx = bdrv_get_aio_context(bs);
310         AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
311
312         /*
313          * When the coroutine yielded, the lock for its home context was
314          * released, so we need to re-acquire it here. If it explicitly
315          * acquired a different context, the lock is still held and we don't
316          * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
317          */
318         if (ctx == co_ctx) {
319             aio_context_acquire(ctx);
320         }
321         bdrv_dec_in_flight(bs);
322         if (data->begin) {
323             bdrv_do_drained_begin(bs, data->recursive, data->parent,
324                                   data->ignore_bds_parents, data->poll);
325         } else {
326             bdrv_do_drained_end(bs, data->recursive, data->parent,
327                                 data->ignore_bds_parents,
328                                 data->drained_end_counter);
329         }
330         if (ctx == co_ctx) {
331             aio_context_release(ctx);
332         }
333     } else {
334         assert(data->begin);
335         bdrv_drain_all_begin();
336     }
337
338     data->done = true;
339     aio_co_wake(co);
340 }
341
342 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
343                                                 bool begin, bool recursive,
344                                                 BdrvChild *parent,
345                                                 bool ignore_bds_parents,
346                                                 bool poll,
347                                                 int *drained_end_counter)
348 {
349     BdrvCoDrainData data;
350
351     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
352      * other coroutines run if they were queued by aio_co_enter(). */
353
354     assert(qemu_in_coroutine());
355     data = (BdrvCoDrainData) {
356         .co = qemu_coroutine_self(),
357         .bs = bs,
358         .done = false,
359         .begin = begin,
360         .recursive = recursive,
361         .parent = parent,
362         .ignore_bds_parents = ignore_bds_parents,
363         .poll = poll,
364         .drained_end_counter = drained_end_counter,
365     };
366
367     if (bs) {
368         bdrv_inc_in_flight(bs);
369     }
370     aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
371                             bdrv_co_drain_bh_cb, &data);
372
373     qemu_coroutine_yield();
374     /* If we are resumed from some other event (such as an aio completion or a
375      * timer callback), it is a bug in the caller that should be fixed. */
376     assert(data.done);
377 }
378
379 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
380                                    BdrvChild *parent, bool ignore_bds_parents)
381 {
382     assert(!qemu_in_coroutine());
383
384     /* Stop things in parent-to-child order */
385     if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
386         aio_disable_external(bdrv_get_aio_context(bs));
387     }
388
389     bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
390     bdrv_drain_invoke(bs, true, NULL);
391 }
392
393 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
394                                   BdrvChild *parent, bool ignore_bds_parents,
395                                   bool poll)
396 {
397     BdrvChild *child, *next;
398
399     if (qemu_in_coroutine()) {
400         bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
401                                poll, NULL);
402         return;
403     }
404
405     bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
406
407     if (recursive) {
408         assert(!ignore_bds_parents);
409         bs->recursive_quiesce_counter++;
410         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
411             bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
412                                   false);
413         }
414     }
415
416     /*
417      * Wait for drained requests to finish.
418      *
419      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
420      * call is needed so things in this AioContext can make progress even
421      * though we don't return to the main AioContext loop - this automatically
422      * includes other nodes in the same AioContext and therefore all child
423      * nodes.
424      */
425     if (poll) {
426         assert(!ignore_bds_parents);
427         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
428     }
429 }
430
431 void bdrv_drained_begin(BlockDriverState *bs)
432 {
433     bdrv_do_drained_begin(bs, false, NULL, false, true);
434 }
435
436 void bdrv_subtree_drained_begin(BlockDriverState *bs)
437 {
438     bdrv_do_drained_begin(bs, true, NULL, false, true);
439 }
440
441 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
442                                 BdrvChild *parent, bool ignore_bds_parents,
443                                 int *drained_end_counter)
444 {
445     BdrvChild *child, *next;
446     int old_quiesce_counter;
447
448     if (qemu_in_coroutine()) {
449         bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
450                                false, drained_end_counter);
451         return;
452     }
453     assert(bs->quiesce_counter > 0);
454
455     /* Re-enable things in child-to-parent order */
456     bdrv_drain_invoke(bs, false, drained_end_counter);
457     bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
458
459     old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
460     if (old_quiesce_counter == 1) {
461         aio_enable_external(bdrv_get_aio_context(bs));
462     }
463
464     if (recursive) {
465         assert(!ignore_bds_parents);
466         bs->recursive_quiesce_counter--;
467         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
468             bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
469                                 drained_end_counter);
470         }
471     }
472 }
473
474 void bdrv_drained_end(BlockDriverState *bs)
475 {
476     bdrv_do_drained_end(bs, false, NULL, false, NULL);
477 }
478
479 void bdrv_subtree_drained_end(BlockDriverState *bs)
480 {
481     bdrv_do_drained_end(bs, true, NULL, false, NULL);
482 }
483
484 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
485 {
486     int i;
487
488     for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
489         bdrv_do_drained_begin(child->bs, true, child, false, true);
490     }
491 }
492
493 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
494 {
495     int i;
496
497     for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
498         bdrv_do_drained_end(child->bs, true, child, false, NULL);
499     }
500 }
501
502 /*
503  * Wait for pending requests to complete on a single BlockDriverState subtree,
504  * and suspend block driver's internal I/O until next request arrives.
505  *
506  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
507  * AioContext.
508  */
509 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
510 {
511     assert(qemu_in_coroutine());
512     bdrv_drained_begin(bs);
513     bdrv_drained_end(bs);
514 }
515
516 void bdrv_drain(BlockDriverState *bs)
517 {
518     bdrv_drained_begin(bs);
519     bdrv_drained_end(bs);
520 }
521
522 static void bdrv_drain_assert_idle(BlockDriverState *bs)
523 {
524     BdrvChild *child, *next;
525
526     assert(atomic_read(&bs->in_flight) == 0);
527     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
528         bdrv_drain_assert_idle(child->bs);
529     }
530 }
531
532 unsigned int bdrv_drain_all_count = 0;
533
534 static bool bdrv_drain_all_poll(void)
535 {
536     BlockDriverState *bs = NULL;
537     bool result = false;
538
539     /* bdrv_drain_poll() can't make changes to the graph and we are holding the
540      * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
541     while ((bs = bdrv_next_all_states(bs))) {
542         AioContext *aio_context = bdrv_get_aio_context(bs);
543         aio_context_acquire(aio_context);
544         result |= bdrv_drain_poll(bs, false, NULL, true);
545         aio_context_release(aio_context);
546     }
547
548     return result;
549 }
550
551 /*
552  * Wait for pending requests to complete across all BlockDriverStates
553  *
554  * This function does not flush data to disk, use bdrv_flush_all() for that
555  * after calling this function.
556  *
557  * This pauses all block jobs and disables external clients. It must
558  * be paired with bdrv_drain_all_end().
559  *
560  * NOTE: no new block jobs or BlockDriverStates can be created between
561  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
562  */
563 void bdrv_drain_all_begin(void)
564 {
565     BlockDriverState *bs = NULL;
566
567     if (qemu_in_coroutine()) {
568         bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
569         return;
570     }
571
572     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
573      * loop AioContext, so make sure we're in the main context. */
574     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
575     assert(bdrv_drain_all_count < INT_MAX);
576     bdrv_drain_all_count++;
577
578     /* Quiesce all nodes, without polling in-flight requests yet. The graph
579      * cannot change during this loop. */
580     while ((bs = bdrv_next_all_states(bs))) {
581         AioContext *aio_context = bdrv_get_aio_context(bs);
582
583         aio_context_acquire(aio_context);
584         bdrv_do_drained_begin(bs, false, NULL, true, false);
585         aio_context_release(aio_context);
586     }
587
588     /* Now poll the in-flight requests */
589     AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
590
591     while ((bs = bdrv_next_all_states(bs))) {
592         bdrv_drain_assert_idle(bs);
593     }
594 }
595
596 void bdrv_drain_all_end(void)
597 {
598     BlockDriverState *bs = NULL;
599
600     while ((bs = bdrv_next_all_states(bs))) {
601         AioContext *aio_context = bdrv_get_aio_context(bs);
602
603         aio_context_acquire(aio_context);
604         bdrv_do_drained_end(bs, false, NULL, true, NULL);
605         aio_context_release(aio_context);
606     }
607
608     assert(bdrv_drain_all_count > 0);
609     bdrv_drain_all_count--;
610 }
611
612 void bdrv_drain_all(void)
613 {
614     bdrv_drain_all_begin();
615     bdrv_drain_all_end();
616 }
617
618 /**
619  * Remove an active request from the tracked requests list
620  *
621  * This function should be called when a tracked request is completing.
622  */
623 static void tracked_request_end(BdrvTrackedRequest *req)
624 {
625     if (req->serialising) {
626         atomic_dec(&req->bs->serialising_in_flight);
627     }
628
629     qemu_co_mutex_lock(&req->bs->reqs_lock);
630     QLIST_REMOVE(req, list);
631     qemu_co_queue_restart_all(&req->wait_queue);
632     qemu_co_mutex_unlock(&req->bs->reqs_lock);
633 }
634
635 /**
636  * Add an active request to the tracked requests list
637  */
638 static void tracked_request_begin(BdrvTrackedRequest *req,
639                                   BlockDriverState *bs,
640                                   int64_t offset,
641                                   uint64_t bytes,
642                                   enum BdrvTrackedRequestType type)
643 {
644     assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
645
646     *req = (BdrvTrackedRequest){
647         .bs = bs,
648         .offset         = offset,
649         .bytes          = bytes,
650         .type           = type,
651         .co             = qemu_coroutine_self(),
652         .serialising    = false,
653         .overlap_offset = offset,
654         .overlap_bytes  = bytes,
655     };
656
657     qemu_co_queue_init(&req->wait_queue);
658
659     qemu_co_mutex_lock(&bs->reqs_lock);
660     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
661     qemu_co_mutex_unlock(&bs->reqs_lock);
662 }
663
664 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
665 {
666     int64_t overlap_offset = req->offset & ~(align - 1);
667     uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
668                                - overlap_offset;
669
670     if (!req->serialising) {
671         atomic_inc(&req->bs->serialising_in_flight);
672         req->serialising = true;
673     }
674
675     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
676     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
677 }
678
679 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req)
680 {
681     /*
682      * If the request is serialising, overlap_offset and overlap_bytes are set,
683      * so we can check if the request is aligned. Otherwise, don't care and
684      * return false.
685      */
686
687     return req->serialising && (req->offset == req->overlap_offset) &&
688            (req->bytes == req->overlap_bytes);
689 }
690
691 /**
692  * Round a region to cluster boundaries
693  */
694 void bdrv_round_to_clusters(BlockDriverState *bs,
695                             int64_t offset, int64_t bytes,
696                             int64_t *cluster_offset,
697                             int64_t *cluster_bytes)
698 {
699     BlockDriverInfo bdi;
700
701     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
702         *cluster_offset = offset;
703         *cluster_bytes = bytes;
704     } else {
705         int64_t c = bdi.cluster_size;
706         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
707         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
708     }
709 }
710
711 static int bdrv_get_cluster_size(BlockDriverState *bs)
712 {
713     BlockDriverInfo bdi;
714     int ret;
715
716     ret = bdrv_get_info(bs, &bdi);
717     if (ret < 0 || bdi.cluster_size == 0) {
718         return bs->bl.request_alignment;
719     } else {
720         return bdi.cluster_size;
721     }
722 }
723
724 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
725                                      int64_t offset, uint64_t bytes)
726 {
727     /*        aaaa   bbbb */
728     if (offset >= req->overlap_offset + req->overlap_bytes) {
729         return false;
730     }
731     /* bbbb   aaaa        */
732     if (req->overlap_offset >= offset + bytes) {
733         return false;
734     }
735     return true;
736 }
737
738 void bdrv_inc_in_flight(BlockDriverState *bs)
739 {
740     atomic_inc(&bs->in_flight);
741 }
742
743 void bdrv_wakeup(BlockDriverState *bs)
744 {
745     aio_wait_kick();
746 }
747
748 void bdrv_dec_in_flight(BlockDriverState *bs)
749 {
750     atomic_dec(&bs->in_flight);
751     bdrv_wakeup(bs);
752 }
753
754 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
755 {
756     BlockDriverState *bs = self->bs;
757     BdrvTrackedRequest *req;
758     bool retry;
759     bool waited = false;
760
761     if (!atomic_read(&bs->serialising_in_flight)) {
762         return false;
763     }
764
765     do {
766         retry = false;
767         qemu_co_mutex_lock(&bs->reqs_lock);
768         QLIST_FOREACH(req, &bs->tracked_requests, list) {
769             if (req == self || (!req->serialising && !self->serialising)) {
770                 continue;
771             }
772             if (tracked_request_overlaps(req, self->overlap_offset,
773                                          self->overlap_bytes))
774             {
775                 /* Hitting this means there was a reentrant request, for
776                  * example, a block driver issuing nested requests.  This must
777                  * never happen since it means deadlock.
778                  */
779                 assert(qemu_coroutine_self() != req->co);
780
781                 /* If the request is already (indirectly) waiting for us, or
782                  * will wait for us as soon as it wakes up, then just go on
783                  * (instead of producing a deadlock in the former case). */
784                 if (!req->waiting_for) {
785                     self->waiting_for = req;
786                     qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
787                     self->waiting_for = NULL;
788                     retry = true;
789                     waited = true;
790                     break;
791                 }
792             }
793         }
794         qemu_co_mutex_unlock(&bs->reqs_lock);
795     } while (retry);
796
797     return waited;
798 }
799
800 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
801                                    size_t size)
802 {
803     if (size > BDRV_REQUEST_MAX_BYTES) {
804         return -EIO;
805     }
806
807     if (!bdrv_is_inserted(bs)) {
808         return -ENOMEDIUM;
809     }
810
811     if (offset < 0) {
812         return -EIO;
813     }
814
815     return 0;
816 }
817
818 typedef struct RwCo {
819     BdrvChild *child;
820     int64_t offset;
821     QEMUIOVector *qiov;
822     bool is_write;
823     int ret;
824     BdrvRequestFlags flags;
825 } RwCo;
826
827 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
828 {
829     RwCo *rwco = opaque;
830
831     if (!rwco->is_write) {
832         rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
833                                    rwco->qiov->size, rwco->qiov,
834                                    rwco->flags);
835     } else {
836         rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
837                                     rwco->qiov->size, rwco->qiov,
838                                     rwco->flags);
839     }
840     aio_wait_kick();
841 }
842
843 /*
844  * Process a vectored synchronous request using coroutines
845  */
846 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
847                         QEMUIOVector *qiov, bool is_write,
848                         BdrvRequestFlags flags)
849 {
850     Coroutine *co;
851     RwCo rwco = {
852         .child = child,
853         .offset = offset,
854         .qiov = qiov,
855         .is_write = is_write,
856         .ret = NOT_DONE,
857         .flags = flags,
858     };
859
860     if (qemu_in_coroutine()) {
861         /* Fast-path if already in coroutine context */
862         bdrv_rw_co_entry(&rwco);
863     } else {
864         co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
865         bdrv_coroutine_enter(child->bs, co);
866         BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
867     }
868     return rwco.ret;
869 }
870
871 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
872                        int bytes, BdrvRequestFlags flags)
873 {
874     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
875
876     return bdrv_prwv_co(child, offset, &qiov, true,
877                         BDRV_REQ_ZERO_WRITE | flags);
878 }
879
880 /*
881  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
882  * The operation is sped up by checking the block status and only writing
883  * zeroes to the device if they currently do not return zeroes. Optional
884  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
885  * BDRV_REQ_FUA).
886  *
887  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
888  */
889 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
890 {
891     int ret;
892     int64_t target_size, bytes, offset = 0;
893     BlockDriverState *bs = child->bs;
894
895     target_size = bdrv_getlength(bs);
896     if (target_size < 0) {
897         return target_size;
898     }
899
900     for (;;) {
901         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
902         if (bytes <= 0) {
903             return 0;
904         }
905         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
906         if (ret < 0) {
907             return ret;
908         }
909         if (ret & BDRV_BLOCK_ZERO) {
910             offset += bytes;
911             continue;
912         }
913         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
914         if (ret < 0) {
915             return ret;
916         }
917         offset += bytes;
918     }
919 }
920
921 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
922 {
923     int ret;
924
925     ret = bdrv_prwv_co(child, offset, qiov, false, 0);
926     if (ret < 0) {
927         return ret;
928     }
929
930     return qiov->size;
931 }
932
933 /* See bdrv_pwrite() for the return codes */
934 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
935 {
936     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
937
938     if (bytes < 0) {
939         return -EINVAL;
940     }
941
942     return bdrv_preadv(child, offset, &qiov);
943 }
944
945 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
946 {
947     int ret;
948
949     ret = bdrv_prwv_co(child, offset, qiov, true, 0);
950     if (ret < 0) {
951         return ret;
952     }
953
954     return qiov->size;
955 }
956
957 /* Return no. of bytes on success or < 0 on error. Important errors are:
958   -EIO         generic I/O error (may happen for all errors)
959   -ENOMEDIUM   No media inserted.
960   -EINVAL      Invalid offset or number of bytes
961   -EACCES      Trying to write a read-only device
962 */
963 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
964 {
965     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
966
967     if (bytes < 0) {
968         return -EINVAL;
969     }
970
971     return bdrv_pwritev(child, offset, &qiov);
972 }
973
974 /*
975  * Writes to the file and ensures that no writes are reordered across this
976  * request (acts as a barrier)
977  *
978  * Returns 0 on success, -errno in error cases.
979  */
980 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
981                      const void *buf, int count)
982 {
983     int ret;
984
985     ret = bdrv_pwrite(child, offset, buf, count);
986     if (ret < 0) {
987         return ret;
988     }
989
990     ret = bdrv_flush(child->bs);
991     if (ret < 0) {
992         return ret;
993     }
994
995     return 0;
996 }
997
998 typedef struct CoroutineIOCompletion {
999     Coroutine *coroutine;
1000     int ret;
1001 } CoroutineIOCompletion;
1002
1003 static void bdrv_co_io_em_complete(void *opaque, int ret)
1004 {
1005     CoroutineIOCompletion *co = opaque;
1006
1007     co->ret = ret;
1008     aio_co_wake(co->coroutine);
1009 }
1010
1011 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1012                                            uint64_t offset, uint64_t bytes,
1013                                            QEMUIOVector *qiov, int flags)
1014 {
1015     BlockDriver *drv = bs->drv;
1016     int64_t sector_num;
1017     unsigned int nb_sectors;
1018
1019     assert(!(flags & ~BDRV_REQ_MASK));
1020     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1021
1022     if (!drv) {
1023         return -ENOMEDIUM;
1024     }
1025
1026     if (drv->bdrv_co_preadv) {
1027         return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1028     }
1029
1030     if (drv->bdrv_aio_preadv) {
1031         BlockAIOCB *acb;
1032         CoroutineIOCompletion co = {
1033             .coroutine = qemu_coroutine_self(),
1034         };
1035
1036         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1037                                    bdrv_co_io_em_complete, &co);
1038         if (acb == NULL) {
1039             return -EIO;
1040         } else {
1041             qemu_coroutine_yield();
1042             return co.ret;
1043         }
1044     }
1045
1046     sector_num = offset >> BDRV_SECTOR_BITS;
1047     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1048
1049     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1050     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1051     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1052     assert(drv->bdrv_co_readv);
1053
1054     return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1055 }
1056
1057 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1058                                             uint64_t offset, uint64_t bytes,
1059                                             QEMUIOVector *qiov, int flags)
1060 {
1061     BlockDriver *drv = bs->drv;
1062     int64_t sector_num;
1063     unsigned int nb_sectors;
1064     int ret;
1065
1066     assert(!(flags & ~BDRV_REQ_MASK));
1067     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1068
1069     if (!drv) {
1070         return -ENOMEDIUM;
1071     }
1072
1073     if (drv->bdrv_co_pwritev) {
1074         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1075                                    flags & bs->supported_write_flags);
1076         flags &= ~bs->supported_write_flags;
1077         goto emulate_flags;
1078     }
1079
1080     if (drv->bdrv_aio_pwritev) {
1081         BlockAIOCB *acb;
1082         CoroutineIOCompletion co = {
1083             .coroutine = qemu_coroutine_self(),
1084         };
1085
1086         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1087                                     flags & bs->supported_write_flags,
1088                                     bdrv_co_io_em_complete, &co);
1089         flags &= ~bs->supported_write_flags;
1090         if (acb == NULL) {
1091             ret = -EIO;
1092         } else {
1093             qemu_coroutine_yield();
1094             ret = co.ret;
1095         }
1096         goto emulate_flags;
1097     }
1098
1099     sector_num = offset >> BDRV_SECTOR_BITS;
1100     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1101
1102     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1103     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1104     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1105
1106     assert(drv->bdrv_co_writev);
1107     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1108                               flags & bs->supported_write_flags);
1109     flags &= ~bs->supported_write_flags;
1110
1111 emulate_flags:
1112     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1113         ret = bdrv_co_flush(bs);
1114     }
1115
1116     return ret;
1117 }
1118
1119 static int coroutine_fn
1120 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1121                                uint64_t bytes, QEMUIOVector *qiov)
1122 {
1123     BlockDriver *drv = bs->drv;
1124
1125     if (!drv) {
1126         return -ENOMEDIUM;
1127     }
1128
1129     if (!drv->bdrv_co_pwritev_compressed) {
1130         return -ENOTSUP;
1131     }
1132
1133     return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1134 }
1135
1136 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1137         int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
1138 {
1139     BlockDriverState *bs = child->bs;
1140
1141     /* Perform I/O through a temporary buffer so that users who scribble over
1142      * their read buffer while the operation is in progress do not end up
1143      * modifying the image file.  This is critical for zero-copy guest I/O
1144      * where anything might happen inside guest memory.
1145      */
1146     void *bounce_buffer;
1147
1148     BlockDriver *drv = bs->drv;
1149     QEMUIOVector local_qiov;
1150     int64_t cluster_offset;
1151     int64_t cluster_bytes;
1152     size_t skip_bytes;
1153     int ret;
1154     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1155                                     BDRV_REQUEST_MAX_BYTES);
1156     unsigned int progress = 0;
1157
1158     if (!drv) {
1159         return -ENOMEDIUM;
1160     }
1161
1162     /* FIXME We cannot require callers to have write permissions when all they
1163      * are doing is a read request. If we did things right, write permissions
1164      * would be obtained anyway, but internally by the copy-on-read code. As
1165      * long as it is implemented here rather than in a separate filter driver,
1166      * the copy-on-read code doesn't have its own BdrvChild, however, for which
1167      * it could request permissions. Therefore we have to bypass the permission
1168      * system for the moment. */
1169     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1170
1171     /* Cover entire cluster so no additional backing file I/O is required when
1172      * allocating cluster in the image file.  Note that this value may exceed
1173      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1174      * is one reason we loop rather than doing it all at once.
1175      */
1176     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1177     skip_bytes = offset - cluster_offset;
1178
1179     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1180                                    cluster_offset, cluster_bytes);
1181
1182     bounce_buffer = qemu_try_blockalign(bs,
1183                                         MIN(MIN(max_transfer, cluster_bytes),
1184                                             MAX_BOUNCE_BUFFER));
1185     if (bounce_buffer == NULL) {
1186         ret = -ENOMEM;
1187         goto err;
1188     }
1189
1190     while (cluster_bytes) {
1191         int64_t pnum;
1192
1193         ret = bdrv_is_allocated(bs, cluster_offset,
1194                                 MIN(cluster_bytes, max_transfer), &pnum);
1195         if (ret < 0) {
1196             /* Safe to treat errors in querying allocation as if
1197              * unallocated; we'll probably fail again soon on the
1198              * read, but at least that will set a decent errno.
1199              */
1200             pnum = MIN(cluster_bytes, max_transfer);
1201         }
1202
1203         /* Stop at EOF if the image ends in the middle of the cluster */
1204         if (ret == 0 && pnum == 0) {
1205             assert(progress >= bytes);
1206             break;
1207         }
1208
1209         assert(skip_bytes < pnum);
1210
1211         if (ret <= 0) {
1212             /* Must copy-on-read; use the bounce buffer */
1213             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1214             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1215
1216             ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1217                                      &local_qiov, 0);
1218             if (ret < 0) {
1219                 goto err;
1220             }
1221
1222             bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1223             if (drv->bdrv_co_pwrite_zeroes &&
1224                 buffer_is_zero(bounce_buffer, pnum)) {
1225                 /* FIXME: Should we (perhaps conditionally) be setting
1226                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1227                  * that still correctly reads as zero? */
1228                 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1229                                                BDRV_REQ_WRITE_UNCHANGED);
1230             } else {
1231                 /* This does not change the data on the disk, it is not
1232                  * necessary to flush even in cache=writethrough mode.
1233                  */
1234                 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1235                                           &local_qiov,
1236                                           BDRV_REQ_WRITE_UNCHANGED);
1237             }
1238
1239             if (ret < 0) {
1240                 /* It might be okay to ignore write errors for guest
1241                  * requests.  If this is a deliberate copy-on-read
1242                  * then we don't want to ignore the error.  Simply
1243                  * report it in all cases.
1244                  */
1245                 goto err;
1246             }
1247
1248             qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1249                                 pnum - skip_bytes);
1250         } else {
1251             /* Read directly into the destination */
1252             qemu_iovec_init(&local_qiov, qiov->niov);
1253             qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1254             ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1255                                      &local_qiov, 0);
1256             qemu_iovec_destroy(&local_qiov);
1257             if (ret < 0) {
1258                 goto err;
1259             }
1260         }
1261
1262         cluster_offset += pnum;
1263         cluster_bytes -= pnum;
1264         progress += pnum - skip_bytes;
1265         skip_bytes = 0;
1266     }
1267     ret = 0;
1268
1269 err:
1270     qemu_vfree(bounce_buffer);
1271     return ret;
1272 }
1273
1274 /*
1275  * Forwards an already correctly aligned request to the BlockDriver. This
1276  * handles copy on read, zeroing after EOF, and fragmentation of large
1277  * reads; any other features must be implemented by the caller.
1278  */
1279 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1280     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1281     int64_t align, QEMUIOVector *qiov, int flags)
1282 {
1283     BlockDriverState *bs = child->bs;
1284     int64_t total_bytes, max_bytes;
1285     int ret = 0;
1286     uint64_t bytes_remaining = bytes;
1287     int max_transfer;
1288
1289     assert(is_power_of_2(align));
1290     assert((offset & (align - 1)) == 0);
1291     assert((bytes & (align - 1)) == 0);
1292     assert(!qiov || bytes == qiov->size);
1293     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1294     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1295                                    align);
1296
1297     /* TODO: We would need a per-BDS .supported_read_flags and
1298      * potential fallback support, if we ever implement any read flags
1299      * to pass through to drivers.  For now, there aren't any
1300      * passthrough flags.  */
1301     assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
1302
1303     /* Handle Copy on Read and associated serialisation */
1304     if (flags & BDRV_REQ_COPY_ON_READ) {
1305         /* If we touch the same cluster it counts as an overlap.  This
1306          * guarantees that allocating writes will be serialized and not race
1307          * with each other for the same cluster.  For example, in copy-on-read
1308          * it ensures that the CoR read and write operations are atomic and
1309          * guest writes cannot interleave between them. */
1310         mark_request_serialising(req, bdrv_get_cluster_size(bs));
1311     }
1312
1313     /* BDRV_REQ_SERIALISING is only for write operation */
1314     assert(!(flags & BDRV_REQ_SERIALISING));
1315
1316     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1317         wait_serialising_requests(req);
1318     }
1319
1320     if (flags & BDRV_REQ_COPY_ON_READ) {
1321         int64_t pnum;
1322
1323         ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1324         if (ret < 0) {
1325             goto out;
1326         }
1327
1328         if (!ret || pnum != bytes) {
1329             ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
1330             goto out;
1331         }
1332     }
1333
1334     /* Forward the request to the BlockDriver, possibly fragmenting it */
1335     total_bytes = bdrv_getlength(bs);
1336     if (total_bytes < 0) {
1337         ret = total_bytes;
1338         goto out;
1339     }
1340
1341     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1342     if (bytes <= max_bytes && bytes <= max_transfer) {
1343         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1344         goto out;
1345     }
1346
1347     while (bytes_remaining) {
1348         int num;
1349
1350         if (max_bytes) {
1351             QEMUIOVector local_qiov;
1352
1353             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1354             assert(num);
1355             qemu_iovec_init(&local_qiov, qiov->niov);
1356             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1357
1358             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1359                                      num, &local_qiov, 0);
1360             max_bytes -= num;
1361             qemu_iovec_destroy(&local_qiov);
1362         } else {
1363             num = bytes_remaining;
1364             ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1365                                     bytes_remaining);
1366         }
1367         if (ret < 0) {
1368             goto out;
1369         }
1370         bytes_remaining -= num;
1371     }
1372
1373 out:
1374     return ret < 0 ? ret : 0;
1375 }
1376
1377 /*
1378  * Handle a read request in coroutine context
1379  */
1380 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1381     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1382     BdrvRequestFlags flags)
1383 {
1384     BlockDriverState *bs = child->bs;
1385     BlockDriver *drv = bs->drv;
1386     BdrvTrackedRequest req;
1387
1388     uint64_t align = bs->bl.request_alignment;
1389     uint8_t *head_buf = NULL;
1390     uint8_t *tail_buf = NULL;
1391     QEMUIOVector local_qiov;
1392     bool use_local_qiov = false;
1393     int ret;
1394
1395     trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1396
1397     if (!drv) {
1398         return -ENOMEDIUM;
1399     }
1400
1401     ret = bdrv_check_byte_request(bs, offset, bytes);
1402     if (ret < 0) {
1403         return ret;
1404     }
1405
1406     bdrv_inc_in_flight(bs);
1407
1408     /* Don't do copy-on-read if we read data before write operation */
1409     if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
1410         flags |= BDRV_REQ_COPY_ON_READ;
1411     }
1412
1413     /* Align read if necessary by padding qiov */
1414     if (offset & (align - 1)) {
1415         head_buf = qemu_blockalign(bs, align);
1416         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1417         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1418         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1419         use_local_qiov = true;
1420
1421         bytes += offset & (align - 1);
1422         offset = offset & ~(align - 1);
1423     }
1424
1425     if ((offset + bytes) & (align - 1)) {
1426         if (!use_local_qiov) {
1427             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1428             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1429             use_local_qiov = true;
1430         }
1431         tail_buf = qemu_blockalign(bs, align);
1432         qemu_iovec_add(&local_qiov, tail_buf,
1433                        align - ((offset + bytes) & (align - 1)));
1434
1435         bytes = ROUND_UP(bytes, align);
1436     }
1437
1438     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1439     ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
1440                               use_local_qiov ? &local_qiov : qiov,
1441                               flags);
1442     tracked_request_end(&req);
1443     bdrv_dec_in_flight(bs);
1444
1445     if (use_local_qiov) {
1446         qemu_iovec_destroy(&local_qiov);
1447         qemu_vfree(head_buf);
1448         qemu_vfree(tail_buf);
1449     }
1450
1451     return ret;
1452 }
1453
1454 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1455     int64_t offset, int bytes, BdrvRequestFlags flags)
1456 {
1457     BlockDriver *drv = bs->drv;
1458     QEMUIOVector qiov;
1459     void *buf = NULL;
1460     int ret = 0;
1461     bool need_flush = false;
1462     int head = 0;
1463     int tail = 0;
1464
1465     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1466     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1467                         bs->bl.request_alignment);
1468     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1469
1470     if (!drv) {
1471         return -ENOMEDIUM;
1472     }
1473
1474     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1475         return -ENOTSUP;
1476     }
1477
1478     assert(alignment % bs->bl.request_alignment == 0);
1479     head = offset % alignment;
1480     tail = (offset + bytes) % alignment;
1481     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1482     assert(max_write_zeroes >= bs->bl.request_alignment);
1483
1484     while (bytes > 0 && !ret) {
1485         int num = bytes;
1486
1487         /* Align request.  Block drivers can expect the "bulk" of the request
1488          * to be aligned, and that unaligned requests do not cross cluster
1489          * boundaries.
1490          */
1491         if (head) {
1492             /* Make a small request up to the first aligned sector. For
1493              * convenience, limit this request to max_transfer even if
1494              * we don't need to fall back to writes.  */
1495             num = MIN(MIN(bytes, max_transfer), alignment - head);
1496             head = (head + num) % alignment;
1497             assert(num < max_write_zeroes);
1498         } else if (tail && num > alignment) {
1499             /* Shorten the request to the last aligned sector.  */
1500             num -= tail;
1501         }
1502
1503         /* limit request size */
1504         if (num > max_write_zeroes) {
1505             num = max_write_zeroes;
1506         }
1507
1508         ret = -ENOTSUP;
1509         /* First try the efficient write zeroes operation */
1510         if (drv->bdrv_co_pwrite_zeroes) {
1511             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1512                                              flags & bs->supported_zero_flags);
1513             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1514                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1515                 need_flush = true;
1516             }
1517         } else {
1518             assert(!bs->supported_zero_flags);
1519         }
1520
1521         if (ret < 0 && !(flags & BDRV_REQ_NO_FALLBACK)) {
1522             /* Fall back to bounce buffer if write zeroes is unsupported */
1523             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1524
1525             if ((flags & BDRV_REQ_FUA) &&
1526                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1527                 /* No need for bdrv_driver_pwrite() to do a fallback
1528                  * flush on each chunk; use just one at the end */
1529                 write_flags &= ~BDRV_REQ_FUA;
1530                 need_flush = true;
1531             }
1532             num = MIN(num, max_transfer);
1533             if (buf == NULL) {
1534                 buf = qemu_try_blockalign0(bs, num);
1535                 if (buf == NULL) {
1536                     ret = -ENOMEM;
1537                     goto fail;
1538                 }
1539             }
1540             qemu_iovec_init_buf(&qiov, buf, num);
1541
1542             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1543
1544             /* Keep bounce buffer around if it is big enough for all
1545              * all future requests.
1546              */
1547             if (num < max_transfer) {
1548                 qemu_vfree(buf);
1549                 buf = NULL;
1550             }
1551         }
1552
1553         offset += num;
1554         bytes -= num;
1555     }
1556
1557 fail:
1558     if (ret == 0 && need_flush) {
1559         ret = bdrv_co_flush(bs);
1560     }
1561     qemu_vfree(buf);
1562     return ret;
1563 }
1564
1565 static inline int coroutine_fn
1566 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
1567                           BdrvTrackedRequest *req, int flags)
1568 {
1569     BlockDriverState *bs = child->bs;
1570     bool waited;
1571     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1572
1573     if (bs->read_only) {
1574         return -EPERM;
1575     }
1576
1577     /* BDRV_REQ_NO_SERIALISING is only for read operation */
1578     assert(!(flags & BDRV_REQ_NO_SERIALISING));
1579     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1580     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1581     assert(!(flags & ~BDRV_REQ_MASK));
1582
1583     if (flags & BDRV_REQ_SERIALISING) {
1584         mark_request_serialising(req, bdrv_get_cluster_size(bs));
1585     }
1586
1587     waited = wait_serialising_requests(req);
1588
1589     assert(!waited || !req->serialising ||
1590            is_request_serialising_and_aligned(req));
1591     assert(req->overlap_offset <= offset);
1592     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1593     assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
1594
1595     switch (req->type) {
1596     case BDRV_TRACKED_WRITE:
1597     case BDRV_TRACKED_DISCARD:
1598         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1599             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1600         } else {
1601             assert(child->perm & BLK_PERM_WRITE);
1602         }
1603         return notifier_with_return_list_notify(&bs->before_write_notifiers,
1604                                                 req);
1605     case BDRV_TRACKED_TRUNCATE:
1606         assert(child->perm & BLK_PERM_RESIZE);
1607         return 0;
1608     default:
1609         abort();
1610     }
1611 }
1612
1613 static inline void coroutine_fn
1614 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
1615                          BdrvTrackedRequest *req, int ret)
1616 {
1617     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1618     BlockDriverState *bs = child->bs;
1619
1620     atomic_inc(&bs->write_gen);
1621
1622     /*
1623      * Discard cannot extend the image, but in error handling cases, such as
1624      * when reverting a qcow2 cluster allocation, the discarded range can pass
1625      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1626      * here. Instead, just skip it, since semantically a discard request
1627      * beyond EOF cannot expand the image anyway.
1628      */
1629     if (ret == 0 &&
1630         (req->type == BDRV_TRACKED_TRUNCATE ||
1631          end_sector > bs->total_sectors) &&
1632         req->type != BDRV_TRACKED_DISCARD) {
1633         bs->total_sectors = end_sector;
1634         bdrv_parent_cb_resize(bs);
1635         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1636     }
1637     if (req->bytes) {
1638         switch (req->type) {
1639         case BDRV_TRACKED_WRITE:
1640             stat64_max(&bs->wr_highest_offset, offset + bytes);
1641             /* fall through, to set dirty bits */
1642         case BDRV_TRACKED_DISCARD:
1643             bdrv_set_dirty(bs, offset, bytes);
1644             break;
1645         default:
1646             break;
1647         }
1648     }
1649 }
1650
1651 /*
1652  * Forwards an already correctly aligned write request to the BlockDriver,
1653  * after possibly fragmenting it.
1654  */
1655 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
1656     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1657     int64_t align, QEMUIOVector *qiov, int flags)
1658 {
1659     BlockDriverState *bs = child->bs;
1660     BlockDriver *drv = bs->drv;
1661     int ret;
1662
1663     uint64_t bytes_remaining = bytes;
1664     int max_transfer;
1665
1666     if (!drv) {
1667         return -ENOMEDIUM;
1668     }
1669
1670     if (bdrv_has_readonly_bitmaps(bs)) {
1671         return -EPERM;
1672     }
1673
1674     assert(is_power_of_2(align));
1675     assert((offset & (align - 1)) == 0);
1676     assert((bytes & (align - 1)) == 0);
1677     assert(!qiov || bytes == qiov->size);
1678     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1679                                    align);
1680
1681     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1682
1683     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1684         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1685         qemu_iovec_is_zero(qiov)) {
1686         flags |= BDRV_REQ_ZERO_WRITE;
1687         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1688             flags |= BDRV_REQ_MAY_UNMAP;
1689         }
1690     }
1691
1692     if (ret < 0) {
1693         /* Do nothing, write notifier decided to fail this request */
1694     } else if (flags & BDRV_REQ_ZERO_WRITE) {
1695         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1696         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1697     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1698         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1699     } else if (bytes <= max_transfer) {
1700         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1701         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1702     } else {
1703         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1704         while (bytes_remaining) {
1705             int num = MIN(bytes_remaining, max_transfer);
1706             QEMUIOVector local_qiov;
1707             int local_flags = flags;
1708
1709             assert(num);
1710             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1711                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1712                 /* If FUA is going to be emulated by flush, we only
1713                  * need to flush on the last iteration */
1714                 local_flags &= ~BDRV_REQ_FUA;
1715             }
1716             qemu_iovec_init(&local_qiov, qiov->niov);
1717             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1718
1719             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1720                                       num, &local_qiov, local_flags);
1721             qemu_iovec_destroy(&local_qiov);
1722             if (ret < 0) {
1723                 break;
1724             }
1725             bytes_remaining -= num;
1726         }
1727     }
1728     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1729
1730     if (ret >= 0) {
1731         ret = 0;
1732     }
1733     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
1734
1735     return ret;
1736 }
1737
1738 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
1739                                                 int64_t offset,
1740                                                 unsigned int bytes,
1741                                                 BdrvRequestFlags flags,
1742                                                 BdrvTrackedRequest *req)
1743 {
1744     BlockDriverState *bs = child->bs;
1745     uint8_t *buf = NULL;
1746     QEMUIOVector local_qiov;
1747     uint64_t align = bs->bl.request_alignment;
1748     unsigned int head_padding_bytes, tail_padding_bytes;
1749     int ret = 0;
1750
1751     head_padding_bytes = offset & (align - 1);
1752     tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
1753
1754
1755     assert(flags & BDRV_REQ_ZERO_WRITE);
1756     if (head_padding_bytes || tail_padding_bytes) {
1757         buf = qemu_blockalign(bs, align);
1758         qemu_iovec_init_buf(&local_qiov, buf, align);
1759     }
1760     if (head_padding_bytes) {
1761         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1762
1763         /* RMW the unaligned part before head. */
1764         mark_request_serialising(req, align);
1765         wait_serialising_requests(req);
1766         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1767         ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
1768                                   align, &local_qiov, 0);
1769         if (ret < 0) {
1770             goto fail;
1771         }
1772         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1773
1774         memset(buf + head_padding_bytes, 0, zero_bytes);
1775         ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1776                                    align, &local_qiov,
1777                                    flags & ~BDRV_REQ_ZERO_WRITE);
1778         if (ret < 0) {
1779             goto fail;
1780         }
1781         offset += zero_bytes;
1782         bytes -= zero_bytes;
1783     }
1784
1785     assert(!bytes || (offset & (align - 1)) == 0);
1786     if (bytes >= align) {
1787         /* Write the aligned part in the middle. */
1788         uint64_t aligned_bytes = bytes & ~(align - 1);
1789         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
1790                                    NULL, flags);
1791         if (ret < 0) {
1792             goto fail;
1793         }
1794         bytes -= aligned_bytes;
1795         offset += aligned_bytes;
1796     }
1797
1798     assert(!bytes || (offset & (align - 1)) == 0);
1799     if (bytes) {
1800         assert(align == tail_padding_bytes + bytes);
1801         /* RMW the unaligned part after tail. */
1802         mark_request_serialising(req, align);
1803         wait_serialising_requests(req);
1804         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1805         ret = bdrv_aligned_preadv(child, req, offset, align,
1806                                   align, &local_qiov, 0);
1807         if (ret < 0) {
1808             goto fail;
1809         }
1810         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1811
1812         memset(buf, 0, bytes);
1813         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
1814                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1815     }
1816 fail:
1817     qemu_vfree(buf);
1818     return ret;
1819
1820 }
1821
1822 /*
1823  * Handle a write request in coroutine context
1824  */
1825 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1826     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1827     BdrvRequestFlags flags)
1828 {
1829     BlockDriverState *bs = child->bs;
1830     BdrvTrackedRequest req;
1831     uint64_t align = bs->bl.request_alignment;
1832     uint8_t *head_buf = NULL;
1833     uint8_t *tail_buf = NULL;
1834     QEMUIOVector local_qiov;
1835     bool use_local_qiov = false;
1836     int ret;
1837
1838     trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1839
1840     if (!bs->drv) {
1841         return -ENOMEDIUM;
1842     }
1843
1844     ret = bdrv_check_byte_request(bs, offset, bytes);
1845     if (ret < 0) {
1846         return ret;
1847     }
1848
1849     bdrv_inc_in_flight(bs);
1850     /*
1851      * Align write if necessary by performing a read-modify-write cycle.
1852      * Pad qiov with the read parts and be sure to have a tracked request not
1853      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1854      */
1855     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1856
1857     if (flags & BDRV_REQ_ZERO_WRITE) {
1858         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
1859         goto out;
1860     }
1861
1862     if (offset & (align - 1)) {
1863         QEMUIOVector head_qiov;
1864
1865         mark_request_serialising(&req, align);
1866         wait_serialising_requests(&req);
1867
1868         head_buf = qemu_blockalign(bs, align);
1869         qemu_iovec_init_buf(&head_qiov, head_buf, align);
1870
1871         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1872         ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
1873                                   align, &head_qiov, 0);
1874         if (ret < 0) {
1875             goto fail;
1876         }
1877         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1878
1879         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1880         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1881         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1882         use_local_qiov = true;
1883
1884         bytes += offset & (align - 1);
1885         offset = offset & ~(align - 1);
1886
1887         /* We have read the tail already if the request is smaller
1888          * than one aligned block.
1889          */
1890         if (bytes < align) {
1891             qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1892             bytes = align;
1893         }
1894     }
1895
1896     if ((offset + bytes) & (align - 1)) {
1897         QEMUIOVector tail_qiov;
1898         size_t tail_bytes;
1899         bool waited;
1900
1901         mark_request_serialising(&req, align);
1902         waited = wait_serialising_requests(&req);
1903         assert(!waited || !use_local_qiov);
1904
1905         tail_buf = qemu_blockalign(bs, align);
1906         qemu_iovec_init_buf(&tail_qiov, tail_buf, align);
1907
1908         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1909         ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1910                                   align, align, &tail_qiov, 0);
1911         if (ret < 0) {
1912             goto fail;
1913         }
1914         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1915
1916         if (!use_local_qiov) {
1917             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1918             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1919             use_local_qiov = true;
1920         }
1921
1922         tail_bytes = (offset + bytes) & (align - 1);
1923         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1924
1925         bytes = ROUND_UP(bytes, align);
1926     }
1927
1928     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1929                                use_local_qiov ? &local_qiov : qiov,
1930                                flags);
1931
1932 fail:
1933
1934     if (use_local_qiov) {
1935         qemu_iovec_destroy(&local_qiov);
1936     }
1937     qemu_vfree(head_buf);
1938     qemu_vfree(tail_buf);
1939 out:
1940     tracked_request_end(&req);
1941     bdrv_dec_in_flight(bs);
1942     return ret;
1943 }
1944
1945 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1946                                        int bytes, BdrvRequestFlags flags)
1947 {
1948     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
1949
1950     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1951         flags &= ~BDRV_REQ_MAY_UNMAP;
1952     }
1953
1954     return bdrv_co_pwritev(child, offset, bytes, NULL,
1955                            BDRV_REQ_ZERO_WRITE | flags);
1956 }
1957
1958 /*
1959  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1960  */
1961 int bdrv_flush_all(void)
1962 {
1963     BdrvNextIterator it;
1964     BlockDriverState *bs = NULL;
1965     int result = 0;
1966
1967     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1968         AioContext *aio_context = bdrv_get_aio_context(bs);
1969         int ret;
1970
1971         aio_context_acquire(aio_context);
1972         ret = bdrv_flush(bs);
1973         if (ret < 0 && !result) {
1974             result = ret;
1975         }
1976         aio_context_release(aio_context);
1977     }
1978
1979     return result;
1980 }
1981
1982
1983 typedef struct BdrvCoBlockStatusData {
1984     BlockDriverState *bs;
1985     BlockDriverState *base;
1986     bool want_zero;
1987     int64_t offset;
1988     int64_t bytes;
1989     int64_t *pnum;
1990     int64_t *map;
1991     BlockDriverState **file;
1992     int ret;
1993     bool done;
1994 } BdrvCoBlockStatusData;
1995
1996 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
1997                                                 bool want_zero,
1998                                                 int64_t offset,
1999                                                 int64_t bytes,
2000                                                 int64_t *pnum,
2001                                                 int64_t *map,
2002                                                 BlockDriverState **file)
2003 {
2004     assert(bs->file && bs->file->bs);
2005     *pnum = bytes;
2006     *map = offset;
2007     *file = bs->file->bs;
2008     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2009 }
2010
2011 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
2012                                                    bool want_zero,
2013                                                    int64_t offset,
2014                                                    int64_t bytes,
2015                                                    int64_t *pnum,
2016                                                    int64_t *map,
2017                                                    BlockDriverState **file)
2018 {
2019     assert(bs->backing && bs->backing->bs);
2020     *pnum = bytes;
2021     *map = offset;
2022     *file = bs->backing->bs;
2023     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2024 }
2025
2026 /*
2027  * Returns the allocation status of the specified sectors.
2028  * Drivers not implementing the functionality are assumed to not support
2029  * backing files, hence all their sectors are reported as allocated.
2030  *
2031  * If 'want_zero' is true, the caller is querying for mapping
2032  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2033  * _ZERO where possible; otherwise, the result favors larger 'pnum',
2034  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2035  *
2036  * If 'offset' is beyond the end of the disk image the return value is
2037  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2038  *
2039  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2040  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2041  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2042  *
2043  * 'pnum' is set to the number of bytes (including and immediately
2044  * following the specified offset) that are easily known to be in the
2045  * same allocated/unallocated state.  Note that a second call starting
2046  * at the original offset plus returned pnum may have the same status.
2047  * The returned value is non-zero on success except at end-of-file.
2048  *
2049  * Returns negative errno on failure.  Otherwise, if the
2050  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2051  * set to the host mapping and BDS corresponding to the guest offset.
2052  */
2053 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2054                                              bool want_zero,
2055                                              int64_t offset, int64_t bytes,
2056                                              int64_t *pnum, int64_t *map,
2057                                              BlockDriverState **file)
2058 {
2059     int64_t total_size;
2060     int64_t n; /* bytes */
2061     int ret;
2062     int64_t local_map = 0;
2063     BlockDriverState *local_file = NULL;
2064     int64_t aligned_offset, aligned_bytes;
2065     uint32_t align;
2066
2067     assert(pnum);
2068     *pnum = 0;
2069     total_size = bdrv_getlength(bs);
2070     if (total_size < 0) {
2071         ret = total_size;
2072         goto early_out;
2073     }
2074
2075     if (offset >= total_size) {
2076         ret = BDRV_BLOCK_EOF;
2077         goto early_out;
2078     }
2079     if (!bytes) {
2080         ret = 0;
2081         goto early_out;
2082     }
2083
2084     n = total_size - offset;
2085     if (n < bytes) {
2086         bytes = n;
2087     }
2088
2089     /* Must be non-NULL or bdrv_getlength() would have failed */
2090     assert(bs->drv);
2091     if (!bs->drv->bdrv_co_block_status) {
2092         *pnum = bytes;
2093         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2094         if (offset + bytes == total_size) {
2095             ret |= BDRV_BLOCK_EOF;
2096         }
2097         if (bs->drv->protocol_name) {
2098             ret |= BDRV_BLOCK_OFFSET_VALID;
2099             local_map = offset;
2100             local_file = bs;
2101         }
2102         goto early_out;
2103     }
2104
2105     bdrv_inc_in_flight(bs);
2106
2107     /* Round out to request_alignment boundaries */
2108     align = bs->bl.request_alignment;
2109     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2110     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2111
2112     ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2113                                         aligned_bytes, pnum, &local_map,
2114                                         &local_file);
2115     if (ret < 0) {
2116         *pnum = 0;
2117         goto out;
2118     }
2119
2120     /*
2121      * The driver's result must be a non-zero multiple of request_alignment.
2122      * Clamp pnum and adjust map to original request.
2123      */
2124     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2125            align > offset - aligned_offset);
2126     if (ret & BDRV_BLOCK_RECURSE) {
2127         assert(ret & BDRV_BLOCK_DATA);
2128         assert(ret & BDRV_BLOCK_OFFSET_VALID);
2129         assert(!(ret & BDRV_BLOCK_ZERO));
2130     }
2131
2132     *pnum -= offset - aligned_offset;
2133     if (*pnum > bytes) {
2134         *pnum = bytes;
2135     }
2136     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2137         local_map += offset - aligned_offset;
2138     }
2139
2140     if (ret & BDRV_BLOCK_RAW) {
2141         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2142         ret = bdrv_co_block_status(local_file, want_zero, local_map,
2143                                    *pnum, pnum, &local_map, &local_file);
2144         goto out;
2145     }
2146
2147     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2148         ret |= BDRV_BLOCK_ALLOCATED;
2149     } else if (want_zero) {
2150         if (bdrv_unallocated_blocks_are_zero(bs)) {
2151             ret |= BDRV_BLOCK_ZERO;
2152         } else if (bs->backing) {
2153             BlockDriverState *bs2 = bs->backing->bs;
2154             int64_t size2 = bdrv_getlength(bs2);
2155
2156             if (size2 >= 0 && offset >= size2) {
2157                 ret |= BDRV_BLOCK_ZERO;
2158             }
2159         }
2160     }
2161
2162     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2163         local_file && local_file != bs &&
2164         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2165         (ret & BDRV_BLOCK_OFFSET_VALID)) {
2166         int64_t file_pnum;
2167         int ret2;
2168
2169         ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2170                                     *pnum, &file_pnum, NULL, NULL);
2171         if (ret2 >= 0) {
2172             /* Ignore errors.  This is just providing extra information, it
2173              * is useful but not necessary.
2174              */
2175             if (ret2 & BDRV_BLOCK_EOF &&
2176                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2177                 /*
2178                  * It is valid for the format block driver to read
2179                  * beyond the end of the underlying file's current
2180                  * size; such areas read as zero.
2181                  */
2182                 ret |= BDRV_BLOCK_ZERO;
2183             } else {
2184                 /* Limit request to the range reported by the protocol driver */
2185                 *pnum = file_pnum;
2186                 ret |= (ret2 & BDRV_BLOCK_ZERO);
2187             }
2188         }
2189     }
2190
2191 out:
2192     bdrv_dec_in_flight(bs);
2193     if (ret >= 0 && offset + *pnum == total_size) {
2194         ret |= BDRV_BLOCK_EOF;
2195     }
2196 early_out:
2197     if (file) {
2198         *file = local_file;
2199     }
2200     if (map) {
2201         *map = local_map;
2202     }
2203     return ret;
2204 }
2205
2206 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2207                                                    BlockDriverState *base,
2208                                                    bool want_zero,
2209                                                    int64_t offset,
2210                                                    int64_t bytes,
2211                                                    int64_t *pnum,
2212                                                    int64_t *map,
2213                                                    BlockDriverState **file)
2214 {
2215     BlockDriverState *p;
2216     int ret = 0;
2217     bool first = true;
2218
2219     assert(bs != base);
2220     for (p = bs; p != base; p = backing_bs(p)) {
2221         ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2222                                    file);
2223         if (ret < 0) {
2224             break;
2225         }
2226         if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2227             /*
2228              * Reading beyond the end of the file continues to read
2229              * zeroes, but we can only widen the result to the
2230              * unallocated length we learned from an earlier
2231              * iteration.
2232              */
2233             *pnum = bytes;
2234         }
2235         if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2236             break;
2237         }
2238         /* [offset, pnum] unallocated on this layer, which could be only
2239          * the first part of [offset, bytes].  */
2240         bytes = MIN(bytes, *pnum);
2241         first = false;
2242     }
2243     return ret;
2244 }
2245
2246 /* Coroutine wrapper for bdrv_block_status_above() */
2247 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
2248 {
2249     BdrvCoBlockStatusData *data = opaque;
2250
2251     data->ret = bdrv_co_block_status_above(data->bs, data->base,
2252                                            data->want_zero,
2253                                            data->offset, data->bytes,
2254                                            data->pnum, data->map, data->file);
2255     data->done = true;
2256     aio_wait_kick();
2257 }
2258
2259 /*
2260  * Synchronous wrapper around bdrv_co_block_status_above().
2261  *
2262  * See bdrv_co_block_status_above() for details.
2263  */
2264 static int bdrv_common_block_status_above(BlockDriverState *bs,
2265                                           BlockDriverState *base,
2266                                           bool want_zero, int64_t offset,
2267                                           int64_t bytes, int64_t *pnum,
2268                                           int64_t *map,
2269                                           BlockDriverState **file)
2270 {
2271     Coroutine *co;
2272     BdrvCoBlockStatusData data = {
2273         .bs = bs,
2274         .base = base,
2275         .want_zero = want_zero,
2276         .offset = offset,
2277         .bytes = bytes,
2278         .pnum = pnum,
2279         .map = map,
2280         .file = file,
2281         .done = false,
2282     };
2283
2284     if (qemu_in_coroutine()) {
2285         /* Fast-path if already in coroutine context */
2286         bdrv_block_status_above_co_entry(&data);
2287     } else {
2288         co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
2289         bdrv_coroutine_enter(bs, co);
2290         BDRV_POLL_WHILE(bs, !data.done);
2291     }
2292     return data.ret;
2293 }
2294
2295 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2296                             int64_t offset, int64_t bytes, int64_t *pnum,
2297                             int64_t *map, BlockDriverState **file)
2298 {
2299     return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2300                                           pnum, map, file);
2301 }
2302
2303 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2304                       int64_t *pnum, int64_t *map, BlockDriverState **file)
2305 {
2306     return bdrv_block_status_above(bs, backing_bs(bs),
2307                                    offset, bytes, pnum, map, file);
2308 }
2309
2310 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2311                                    int64_t bytes, int64_t *pnum)
2312 {
2313     int ret;
2314     int64_t dummy;
2315
2316     ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2317                                          bytes, pnum ? pnum : &dummy, NULL,
2318                                          NULL);
2319     if (ret < 0) {
2320         return ret;
2321     }
2322     return !!(ret & BDRV_BLOCK_ALLOCATED);
2323 }
2324
2325 /*
2326  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2327  *
2328  * Return 1 if (a prefix of) the given range is allocated in any image
2329  * between BASE and TOP (BASE is only included if include_base is set).
2330  * BASE can be NULL to check if the given offset is allocated in any
2331  * image of the chain.  Return 0 otherwise, or negative errno on
2332  * failure.
2333  *
2334  * 'pnum' is set to the number of bytes (including and immediately
2335  * following the specified offset) that are known to be in the same
2336  * allocated/unallocated state.  Note that a subsequent call starting
2337  * at 'offset + *pnum' may return the same allocation status (in other
2338  * words, the result is not necessarily the maximum possible range);
2339  * but 'pnum' will only be 0 when end of file is reached.
2340  *
2341  */
2342 int bdrv_is_allocated_above(BlockDriverState *top,
2343                             BlockDriverState *base,
2344                             bool include_base, int64_t offset,
2345                             int64_t bytes, int64_t *pnum)
2346 {
2347     BlockDriverState *intermediate;
2348     int ret;
2349     int64_t n = bytes;
2350
2351     assert(base || !include_base);
2352
2353     intermediate = top;
2354     while (include_base || intermediate != base) {
2355         int64_t pnum_inter;
2356         int64_t size_inter;
2357
2358         assert(intermediate);
2359         ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
2360         if (ret < 0) {
2361             return ret;
2362         }
2363         if (ret) {
2364             *pnum = pnum_inter;
2365             return 1;
2366         }
2367
2368         size_inter = bdrv_getlength(intermediate);
2369         if (size_inter < 0) {
2370             return size_inter;
2371         }
2372         if (n > pnum_inter &&
2373             (intermediate == top || offset + pnum_inter < size_inter)) {
2374             n = pnum_inter;
2375         }
2376
2377         if (intermediate == base) {
2378             break;
2379         }
2380
2381         intermediate = backing_bs(intermediate);
2382     }
2383
2384     *pnum = n;
2385     return 0;
2386 }
2387
2388 typedef struct BdrvVmstateCo {
2389     BlockDriverState    *bs;
2390     QEMUIOVector        *qiov;
2391     int64_t             pos;
2392     bool                is_read;
2393     int                 ret;
2394 } BdrvVmstateCo;
2395
2396 static int coroutine_fn
2397 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2398                    bool is_read)
2399 {
2400     BlockDriver *drv = bs->drv;
2401     int ret = -ENOTSUP;
2402
2403     bdrv_inc_in_flight(bs);
2404
2405     if (!drv) {
2406         ret = -ENOMEDIUM;
2407     } else if (drv->bdrv_load_vmstate) {
2408         if (is_read) {
2409             ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2410         } else {
2411             ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2412         }
2413     } else if (bs->file) {
2414         ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
2415     }
2416
2417     bdrv_dec_in_flight(bs);
2418     return ret;
2419 }
2420
2421 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2422 {
2423     BdrvVmstateCo *co = opaque;
2424     co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2425     aio_wait_kick();
2426 }
2427
2428 static inline int
2429 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2430                 bool is_read)
2431 {
2432     if (qemu_in_coroutine()) {
2433         return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2434     } else {
2435         BdrvVmstateCo data = {
2436             .bs         = bs,
2437             .qiov       = qiov,
2438             .pos        = pos,
2439             .is_read    = is_read,
2440             .ret        = -EINPROGRESS,
2441         };
2442         Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
2443
2444         bdrv_coroutine_enter(bs, co);
2445         BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
2446         return data.ret;
2447     }
2448 }
2449
2450 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2451                       int64_t pos, int size)
2452 {
2453     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2454     int ret;
2455
2456     ret = bdrv_writev_vmstate(bs, &qiov, pos);
2457     if (ret < 0) {
2458         return ret;
2459     }
2460
2461     return size;
2462 }
2463
2464 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2465 {
2466     return bdrv_rw_vmstate(bs, qiov, pos, false);
2467 }
2468
2469 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2470                       int64_t pos, int size)
2471 {
2472     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2473     int ret;
2474
2475     ret = bdrv_readv_vmstate(bs, &qiov, pos);
2476     if (ret < 0) {
2477         return ret;
2478     }
2479
2480     return size;
2481 }
2482
2483 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2484 {
2485     return bdrv_rw_vmstate(bs, qiov, pos, true);
2486 }
2487
2488 /**************************************************************/
2489 /* async I/Os */
2490
2491 void bdrv_aio_cancel(BlockAIOCB *acb)
2492 {
2493     qemu_aio_ref(acb);
2494     bdrv_aio_cancel_async(acb);
2495     while (acb->refcnt > 1) {
2496         if (acb->aiocb_info->get_aio_context) {
2497             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2498         } else if (acb->bs) {
2499             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2500              * assert that we're not using an I/O thread.  Thread-safe
2501              * code should use bdrv_aio_cancel_async exclusively.
2502              */
2503             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2504             aio_poll(bdrv_get_aio_context(acb->bs), true);
2505         } else {
2506             abort();
2507         }
2508     }
2509     qemu_aio_unref(acb);
2510 }
2511
2512 /* Async version of aio cancel. The caller is not blocked if the acb implements
2513  * cancel_async, otherwise we do nothing and let the request normally complete.
2514  * In either case the completion callback must be called. */
2515 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2516 {
2517     if (acb->aiocb_info->cancel_async) {
2518         acb->aiocb_info->cancel_async(acb);
2519     }
2520 }
2521
2522 /**************************************************************/
2523 /* Coroutine block device emulation */
2524
2525 typedef struct FlushCo {
2526     BlockDriverState *bs;
2527     int ret;
2528 } FlushCo;
2529
2530
2531 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2532 {
2533     FlushCo *rwco = opaque;
2534
2535     rwco->ret = bdrv_co_flush(rwco->bs);
2536     aio_wait_kick();
2537 }
2538
2539 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2540 {
2541     int current_gen;
2542     int ret = 0;
2543
2544     bdrv_inc_in_flight(bs);
2545
2546     if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2547         bdrv_is_sg(bs)) {
2548         goto early_exit;
2549     }
2550
2551     qemu_co_mutex_lock(&bs->reqs_lock);
2552     current_gen = atomic_read(&bs->write_gen);
2553
2554     /* Wait until any previous flushes are completed */
2555     while (bs->active_flush_req) {
2556         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2557     }
2558
2559     /* Flushes reach this point in nondecreasing current_gen order.  */
2560     bs->active_flush_req = true;
2561     qemu_co_mutex_unlock(&bs->reqs_lock);
2562
2563     /* Write back all layers by calling one driver function */
2564     if (bs->drv->bdrv_co_flush) {
2565         ret = bs->drv->bdrv_co_flush(bs);
2566         goto out;
2567     }
2568
2569     /* Write back cached data to the OS even with cache=unsafe */
2570     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2571     if (bs->drv->bdrv_co_flush_to_os) {
2572         ret = bs->drv->bdrv_co_flush_to_os(bs);
2573         if (ret < 0) {
2574             goto out;
2575         }
2576     }
2577
2578     /* But don't actually force it to the disk with cache=unsafe */
2579     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2580         goto flush_parent;
2581     }
2582
2583     /* Check if we really need to flush anything */
2584     if (bs->flushed_gen == current_gen) {
2585         goto flush_parent;
2586     }
2587
2588     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2589     if (!bs->drv) {
2590         /* bs->drv->bdrv_co_flush() might have ejected the BDS
2591          * (even in case of apparent success) */
2592         ret = -ENOMEDIUM;
2593         goto out;
2594     }
2595     if (bs->drv->bdrv_co_flush_to_disk) {
2596         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2597     } else if (bs->drv->bdrv_aio_flush) {
2598         BlockAIOCB *acb;
2599         CoroutineIOCompletion co = {
2600             .coroutine = qemu_coroutine_self(),
2601         };
2602
2603         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2604         if (acb == NULL) {
2605             ret = -EIO;
2606         } else {
2607             qemu_coroutine_yield();
2608             ret = co.ret;
2609         }
2610     } else {
2611         /*
2612          * Some block drivers always operate in either writethrough or unsafe
2613          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2614          * know how the server works (because the behaviour is hardcoded or
2615          * depends on server-side configuration), so we can't ensure that
2616          * everything is safe on disk. Returning an error doesn't work because
2617          * that would break guests even if the server operates in writethrough
2618          * mode.
2619          *
2620          * Let's hope the user knows what he's doing.
2621          */
2622         ret = 0;
2623     }
2624
2625     if (ret < 0) {
2626         goto out;
2627     }
2628
2629     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2630      * in the case of cache=unsafe, so there are no useless flushes.
2631      */
2632 flush_parent:
2633     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2634 out:
2635     /* Notify any pending flushes that we have completed */
2636     if (ret == 0) {
2637         bs->flushed_gen = current_gen;
2638     }
2639
2640     qemu_co_mutex_lock(&bs->reqs_lock);
2641     bs->active_flush_req = false;
2642     /* Return value is ignored - it's ok if wait queue is empty */
2643     qemu_co_queue_next(&bs->flush_queue);
2644     qemu_co_mutex_unlock(&bs->reqs_lock);
2645
2646 early_exit:
2647     bdrv_dec_in_flight(bs);
2648     return ret;
2649 }
2650
2651 int bdrv_flush(BlockDriverState *bs)
2652 {
2653     Coroutine *co;
2654     FlushCo flush_co = {
2655         .bs = bs,
2656         .ret = NOT_DONE,
2657     };
2658
2659     if (qemu_in_coroutine()) {
2660         /* Fast-path if already in coroutine context */
2661         bdrv_flush_co_entry(&flush_co);
2662     } else {
2663         co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2664         bdrv_coroutine_enter(bs, co);
2665         BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2666     }
2667
2668     return flush_co.ret;
2669 }
2670
2671 typedef struct DiscardCo {
2672     BdrvChild *child;
2673     int64_t offset;
2674     int64_t bytes;
2675     int ret;
2676 } DiscardCo;
2677 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2678 {
2679     DiscardCo *rwco = opaque;
2680
2681     rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
2682     aio_wait_kick();
2683 }
2684
2685 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2686                                   int64_t bytes)
2687 {
2688     BdrvTrackedRequest req;
2689     int max_pdiscard, ret;
2690     int head, tail, align;
2691     BlockDriverState *bs = child->bs;
2692
2693     if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
2694         return -ENOMEDIUM;
2695     }
2696
2697     if (bdrv_has_readonly_bitmaps(bs)) {
2698         return -EPERM;
2699     }
2700
2701     if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) {
2702         return -EIO;
2703     }
2704
2705     /* Do nothing if disabled.  */
2706     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2707         return 0;
2708     }
2709
2710     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2711         return 0;
2712     }
2713
2714     /* Discard is advisory, but some devices track and coalesce
2715      * unaligned requests, so we must pass everything down rather than
2716      * round here.  Still, most devices will just silently ignore
2717      * unaligned requests (by returning -ENOTSUP), so we must fragment
2718      * the request accordingly.  */
2719     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2720     assert(align % bs->bl.request_alignment == 0);
2721     head = offset % align;
2722     tail = (offset + bytes) % align;
2723
2724     bdrv_inc_in_flight(bs);
2725     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
2726
2727     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2728     if (ret < 0) {
2729         goto out;
2730     }
2731
2732     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2733                                    align);
2734     assert(max_pdiscard >= bs->bl.request_alignment);
2735
2736     while (bytes > 0) {
2737         int64_t num = bytes;
2738
2739         if (head) {
2740             /* Make small requests to get to alignment boundaries. */
2741             num = MIN(bytes, align - head);
2742             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2743                 num %= bs->bl.request_alignment;
2744             }
2745             head = (head + num) % align;
2746             assert(num < max_pdiscard);
2747         } else if (tail) {
2748             if (num > align) {
2749                 /* Shorten the request to the last aligned cluster.  */
2750                 num -= tail;
2751             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2752                        tail > bs->bl.request_alignment) {
2753                 tail %= bs->bl.request_alignment;
2754                 num -= tail;
2755             }
2756         }
2757         /* limit request size */
2758         if (num > max_pdiscard) {
2759             num = max_pdiscard;
2760         }
2761
2762         if (!bs->drv) {
2763             ret = -ENOMEDIUM;
2764             goto out;
2765         }
2766         if (bs->drv->bdrv_co_pdiscard) {
2767             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2768         } else {
2769             BlockAIOCB *acb;
2770             CoroutineIOCompletion co = {
2771                 .coroutine = qemu_coroutine_self(),
2772             };
2773
2774             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2775                                              bdrv_co_io_em_complete, &co);
2776             if (acb == NULL) {
2777                 ret = -EIO;
2778                 goto out;
2779             } else {
2780                 qemu_coroutine_yield();
2781                 ret = co.ret;
2782             }
2783         }
2784         if (ret && ret != -ENOTSUP) {
2785             goto out;
2786         }
2787
2788         offset += num;
2789         bytes -= num;
2790     }
2791     ret = 0;
2792 out:
2793     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
2794     tracked_request_end(&req);
2795     bdrv_dec_in_flight(bs);
2796     return ret;
2797 }
2798
2799 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
2800 {
2801     Coroutine *co;
2802     DiscardCo rwco = {
2803         .child = child,
2804         .offset = offset,
2805         .bytes = bytes,
2806         .ret = NOT_DONE,
2807     };
2808
2809     if (qemu_in_coroutine()) {
2810         /* Fast-path if already in coroutine context */
2811         bdrv_pdiscard_co_entry(&rwco);
2812     } else {
2813         co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2814         bdrv_coroutine_enter(child->bs, co);
2815         BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
2816     }
2817
2818     return rwco.ret;
2819 }
2820
2821 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
2822 {
2823     BlockDriver *drv = bs->drv;
2824     CoroutineIOCompletion co = {
2825         .coroutine = qemu_coroutine_self(),
2826     };
2827     BlockAIOCB *acb;
2828
2829     bdrv_inc_in_flight(bs);
2830     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
2831         co.ret = -ENOTSUP;
2832         goto out;
2833     }
2834
2835     if (drv->bdrv_co_ioctl) {
2836         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2837     } else {
2838         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2839         if (!acb) {
2840             co.ret = -ENOTSUP;
2841             goto out;
2842         }
2843         qemu_coroutine_yield();
2844     }
2845 out:
2846     bdrv_dec_in_flight(bs);
2847     return co.ret;
2848 }
2849
2850 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2851 {
2852     return qemu_memalign(bdrv_opt_mem_align(bs), size);
2853 }
2854
2855 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2856 {
2857     return memset(qemu_blockalign(bs, size), 0, size);
2858 }
2859
2860 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2861 {
2862     size_t align = bdrv_opt_mem_align(bs);
2863
2864     /* Ensure that NULL is never returned on success */
2865     assert(align > 0);
2866     if (size == 0) {
2867         size = align;
2868     }
2869
2870     return qemu_try_memalign(align, size);
2871 }
2872
2873 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2874 {
2875     void *mem = qemu_try_blockalign(bs, size);
2876
2877     if (mem) {
2878         memset(mem, 0, size);
2879     }
2880
2881     return mem;
2882 }
2883
2884 /*
2885  * Check if all memory in this vector is sector aligned.
2886  */
2887 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2888 {
2889     int i;
2890     size_t alignment = bdrv_min_mem_align(bs);
2891
2892     for (i = 0; i < qiov->niov; i++) {
2893         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2894             return false;
2895         }
2896         if (qiov->iov[i].iov_len % alignment) {
2897             return false;
2898         }
2899     }
2900
2901     return true;
2902 }
2903
2904 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2905                                     NotifierWithReturn *notifier)
2906 {
2907     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2908 }
2909
2910 void bdrv_io_plug(BlockDriverState *bs)
2911 {
2912     BdrvChild *child;
2913
2914     QLIST_FOREACH(child, &bs->children, next) {
2915         bdrv_io_plug(child->bs);
2916     }
2917
2918     if (atomic_fetch_inc(&bs->io_plugged) == 0) {
2919         BlockDriver *drv = bs->drv;
2920         if (drv && drv->bdrv_io_plug) {
2921             drv->bdrv_io_plug(bs);
2922         }
2923     }
2924 }
2925
2926 void bdrv_io_unplug(BlockDriverState *bs)
2927 {
2928     BdrvChild *child;
2929
2930     assert(bs->io_plugged);
2931     if (atomic_fetch_dec(&bs->io_plugged) == 1) {
2932         BlockDriver *drv = bs->drv;
2933         if (drv && drv->bdrv_io_unplug) {
2934             drv->bdrv_io_unplug(bs);
2935         }
2936     }
2937
2938     QLIST_FOREACH(child, &bs->children, next) {
2939         bdrv_io_unplug(child->bs);
2940     }
2941 }
2942
2943 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
2944 {
2945     BdrvChild *child;
2946
2947     if (bs->drv && bs->drv->bdrv_register_buf) {
2948         bs->drv->bdrv_register_buf(bs, host, size);
2949     }
2950     QLIST_FOREACH(child, &bs->children, next) {
2951         bdrv_register_buf(child->bs, host, size);
2952     }
2953 }
2954
2955 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
2956 {
2957     BdrvChild *child;
2958
2959     if (bs->drv && bs->drv->bdrv_unregister_buf) {
2960         bs->drv->bdrv_unregister_buf(bs, host);
2961     }
2962     QLIST_FOREACH(child, &bs->children, next) {
2963         bdrv_unregister_buf(child->bs, host);
2964     }
2965 }
2966
2967 static int coroutine_fn bdrv_co_copy_range_internal(
2968         BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
2969         uint64_t dst_offset, uint64_t bytes,
2970         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
2971         bool recurse_src)
2972 {
2973     BdrvTrackedRequest req;
2974     int ret;
2975
2976     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
2977     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
2978     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
2979
2980     if (!dst || !dst->bs) {
2981         return -ENOMEDIUM;
2982     }
2983     ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
2984     if (ret) {
2985         return ret;
2986     }
2987     if (write_flags & BDRV_REQ_ZERO_WRITE) {
2988         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
2989     }
2990
2991     if (!src || !src->bs) {
2992         return -ENOMEDIUM;
2993     }
2994     ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
2995     if (ret) {
2996         return ret;
2997     }
2998
2999     if (!src->bs->drv->bdrv_co_copy_range_from
3000         || !dst->bs->drv->bdrv_co_copy_range_to
3001         || src->bs->encrypted || dst->bs->encrypted) {
3002         return -ENOTSUP;
3003     }
3004
3005     if (recurse_src) {
3006         bdrv_inc_in_flight(src->bs);
3007         tracked_request_begin(&req, src->bs, src_offset, bytes,
3008                               BDRV_TRACKED_READ);
3009
3010         /* BDRV_REQ_SERIALISING is only for write operation */
3011         assert(!(read_flags & BDRV_REQ_SERIALISING));
3012         if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
3013             wait_serialising_requests(&req);
3014         }
3015
3016         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3017                                                     src, src_offset,
3018                                                     dst, dst_offset,
3019                                                     bytes,
3020                                                     read_flags, write_flags);
3021
3022         tracked_request_end(&req);
3023         bdrv_dec_in_flight(src->bs);
3024     } else {
3025         bdrv_inc_in_flight(dst->bs);
3026         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3027                               BDRV_TRACKED_WRITE);
3028         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3029                                         write_flags);
3030         if (!ret) {
3031             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3032                                                       src, src_offset,
3033                                                       dst, dst_offset,
3034                                                       bytes,
3035                                                       read_flags, write_flags);
3036         }
3037         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3038         tracked_request_end(&req);
3039         bdrv_dec_in_flight(dst->bs);
3040     }
3041
3042     return ret;
3043 }
3044
3045 /* Copy range from @src to @dst.
3046  *
3047  * See the comment of bdrv_co_copy_range for the parameter and return value
3048  * semantics. */
3049 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3050                                          BdrvChild *dst, uint64_t dst_offset,
3051                                          uint64_t bytes,
3052                                          BdrvRequestFlags read_flags,
3053                                          BdrvRequestFlags write_flags)
3054 {
3055     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3056                                   read_flags, write_flags);
3057     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3058                                        bytes, read_flags, write_flags, true);
3059 }
3060
3061 /* Copy range from @src to @dst.
3062  *
3063  * See the comment of bdrv_co_copy_range for the parameter and return value
3064  * semantics. */
3065 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3066                                        BdrvChild *dst, uint64_t dst_offset,
3067                                        uint64_t bytes,
3068                                        BdrvRequestFlags read_flags,
3069                                        BdrvRequestFlags write_flags)
3070 {
3071     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3072                                 read_flags, write_flags);
3073     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3074                                        bytes, read_flags, write_flags, false);
3075 }
3076
3077 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3078                                     BdrvChild *dst, uint64_t dst_offset,
3079                                     uint64_t bytes, BdrvRequestFlags read_flags,
3080                                     BdrvRequestFlags write_flags)
3081 {
3082     return bdrv_co_copy_range_from(src, src_offset,
3083                                    dst, dst_offset,
3084                                    bytes, read_flags, write_flags);
3085 }
3086
3087 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3088 {
3089     BdrvChild *c;
3090     QLIST_FOREACH(c, &bs->parents, next_parent) {
3091         if (c->role->resize) {
3092             c->role->resize(c);
3093         }
3094     }
3095 }
3096
3097 /**
3098  * Truncate file to 'offset' bytes (needed only for file protocols)
3099  */
3100 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
3101                                   PreallocMode prealloc, Error **errp)
3102 {
3103     BlockDriverState *bs = child->bs;
3104     BlockDriver *drv = bs->drv;
3105     BdrvTrackedRequest req;
3106     int64_t old_size, new_bytes;
3107     int ret;
3108
3109
3110     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3111     if (!drv) {
3112         error_setg(errp, "No medium inserted");
3113         return -ENOMEDIUM;
3114     }
3115     if (offset < 0) {
3116         error_setg(errp, "Image size cannot be negative");
3117         return -EINVAL;
3118     }
3119
3120     old_size = bdrv_getlength(bs);
3121     if (old_size < 0) {
3122         error_setg_errno(errp, -old_size, "Failed to get old image size");
3123         return old_size;
3124     }
3125
3126     if (offset > old_size) {
3127         new_bytes = offset - old_size;
3128     } else {
3129         new_bytes = 0;
3130     }
3131
3132     bdrv_inc_in_flight(bs);
3133     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3134                           BDRV_TRACKED_TRUNCATE);
3135
3136     /* If we are growing the image and potentially using preallocation for the
3137      * new area, we need to make sure that no write requests are made to it
3138      * concurrently or they might be overwritten by preallocation. */
3139     if (new_bytes) {
3140         mark_request_serialising(&req, 1);
3141     }
3142     if (bs->read_only) {
3143         error_setg(errp, "Image is read-only");
3144         ret = -EACCES;
3145         goto out;
3146     }
3147     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3148                                     0);
3149     if (ret < 0) {
3150         error_setg_errno(errp, -ret,
3151                          "Failed to prepare request for truncation");
3152         goto out;
3153     }
3154
3155     if (!drv->bdrv_co_truncate) {
3156         if (bs->file && drv->is_filter) {
3157             ret = bdrv_co_truncate(bs->file, offset, prealloc, errp);
3158             goto out;
3159         }
3160         error_setg(errp, "Image format driver does not support resize");
3161         ret = -ENOTSUP;
3162         goto out;
3163     }
3164
3165     ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp);
3166     if (ret < 0) {
3167         goto out;
3168     }
3169     ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3170     if (ret < 0) {
3171         error_setg_errno(errp, -ret, "Could not refresh total sector count");
3172     } else {
3173         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3174     }
3175     /* It's possible that truncation succeeded but refresh_total_sectors
3176      * failed, but the latter doesn't affect how we should finish the request.
3177      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3178     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3179
3180 out:
3181     tracked_request_end(&req);
3182     bdrv_dec_in_flight(bs);
3183
3184     return ret;
3185 }
3186
3187 typedef struct TruncateCo {
3188     BdrvChild *child;
3189     int64_t offset;
3190     PreallocMode prealloc;
3191     Error **errp;
3192     int ret;
3193 } TruncateCo;
3194
3195 static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3196 {
3197     TruncateCo *tco = opaque;
3198     tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc,
3199                                 tco->errp);
3200     aio_wait_kick();
3201 }
3202
3203 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
3204                   Error **errp)
3205 {
3206     Coroutine *co;
3207     TruncateCo tco = {
3208         .child      = child,
3209         .offset     = offset,
3210         .prealloc   = prealloc,
3211         .errp       = errp,
3212         .ret        = NOT_DONE,
3213     };
3214
3215     if (qemu_in_coroutine()) {
3216         /* Fast-path if already in coroutine context */
3217         bdrv_truncate_co_entry(&tco);
3218     } else {
3219         co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
3220         bdrv_coroutine_enter(child->bs, co);
3221         BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
3222     }
3223
3224     return tco.ret;
3225 }
This page took 0.201006 seconds and 4 git commands to generate.