#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
-static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BdrvRequestFlags flags,
- BlockCompletionFunc *cb,
- void *opaque,
- bool is_write);
+static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
+ int64_t offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags,
+ BlockCompletionFunc *cb,
+ void *opaque,
+ bool is_write);
static void coroutine_fn bdrv_co_do_rw(void *opaque);
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags);
{
BdrvChild *child;
- if (!QLIST_EMPTY(&bs->tracked_requests)) {
+ if (atomic_read(&bs->in_flight)) {
return true;
}
return false;
}
-static void bdrv_drain_recurse(BlockDriverState *bs)
+static bool bdrv_drain_recurse(BlockDriverState *bs)
{
BdrvChild *child;
+ bool waited;
+
+ waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
if (bs->drv && bs->drv->bdrv_drain) {
bs->drv->bdrv_drain(bs);
}
+
QLIST_FOREACH(child, &bs->children, next) {
- bdrv_drain_recurse(child->bs);
+ waited |= bdrv_drain_recurse(child->bs);
}
+
+ return waited;
}
typedef struct {
Coroutine *co;
BlockDriverState *bs;
- QEMUBH *bh;
bool done;
} BdrvCoDrainData;
-static void bdrv_drain_poll(BlockDriverState *bs)
-{
- bool busy = true;
-
- while (busy) {
- /* Keep iterating */
- busy = bdrv_requests_pending(bs);
- busy |= aio_poll(bdrv_get_aio_context(bs), busy);
- }
-}
-
static void bdrv_co_drain_bh_cb(void *opaque)
{
BdrvCoDrainData *data = opaque;
Coroutine *co = data->co;
+ BlockDriverState *bs = data->bs;
- qemu_bh_delete(data->bh);
- bdrv_drain_poll(data->bs);
+ bdrv_dec_in_flight(bs);
+ bdrv_drained_begin(bs);
data->done = true;
- qemu_coroutine_enter(co, NULL);
+ qemu_coroutine_enter(co);
}
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
.co = qemu_coroutine_self(),
.bs = bs,
.done = false,
- .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
};
- qemu_bh_schedule(data.bh);
+ bdrv_inc_in_flight(bs);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
+ bdrv_co_drain_bh_cb, &data);
qemu_coroutine_yield();
/* If we are resumed from some other event (such as an aio completion or a
void bdrv_drained_begin(BlockDriverState *bs)
{
+ if (qemu_in_coroutine()) {
+ bdrv_co_yield_to_drain(bs);
+ return;
+ }
+
if (!bs->quiesce_counter++) {
aio_disable_external(bdrv_get_aio_context(bs));
bdrv_parent_drained_begin(bs);
}
- bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
- if (qemu_in_coroutine()) {
- bdrv_co_yield_to_drain(bs);
- } else {
- bdrv_drain_poll(bs);
- }
- bdrv_io_unplugged_end(bs);
}
void bdrv_drained_end(BlockDriverState *bs)
*
* This function does not flush data to disk, use bdrv_flush_all() for that
* after calling this function.
+ *
+ * This pauses all block jobs and disables external clients. It must
+ * be paired with bdrv_drain_all_end().
+ *
+ * NOTE: no new block jobs or BlockDriverStates can be created between
+ * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
*/
-void bdrv_drain_all(void)
+void bdrv_drain_all_begin(void)
{
/* Always run first iteration so any pending completion BHs run */
- bool busy = true;
+ bool waited = true;
BlockDriverState *bs;
BdrvNextIterator it;
BlockJob *job = NULL;
aio_context_acquire(aio_context);
bdrv_parent_drained_begin(bs);
- bdrv_io_unplugged_begin(bs);
- bdrv_drain_recurse(bs);
+ aio_disable_external(aio_context);
aio_context_release(aio_context);
if (!g_slist_find(aio_ctxs, aio_context)) {
* request completion. Therefore we must keep looping until there was no
* more activity rather than simply draining each device independently.
*/
- while (busy) {
- busy = false;
+ while (waited) {
+ waited = false;
for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
AioContext *aio_context = ctx->data;
aio_context_acquire(aio_context);
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
if (aio_context == bdrv_get_aio_context(bs)) {
- if (bdrv_requests_pending(bs)) {
- busy = true;
- aio_poll(aio_context, busy);
- }
+ waited |= bdrv_drain_recurse(bs);
}
}
- busy |= aio_poll(aio_context, false);
aio_context_release(aio_context);
}
}
+ g_slist_free(aio_ctxs);
+}
+
+void bdrv_drain_all_end(void)
+{
+ BlockDriverState *bs;
+ BdrvNextIterator it;
+ BlockJob *job = NULL;
+
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- bdrv_io_unplugged_end(bs);
+ aio_enable_external(aio_context);
bdrv_parent_drained_end(bs);
aio_context_release(aio_context);
}
- g_slist_free(aio_ctxs);
- job = NULL;
while ((job = block_job_next(job))) {
AioContext *aio_context = blk_get_aio_context(job->blk);
}
}
+void bdrv_drain_all(void)
+{
+ bdrv_drain_all_begin();
+ bdrv_drain_all_end();
+}
+
/**
* Remove an active request from the tracked requests list
*
return true;
}
+void bdrv_inc_in_flight(BlockDriverState *bs)
+{
+ atomic_inc(&bs->in_flight);
+}
+
+static void dummy_bh_cb(void *opaque)
+{
+}
+
+void bdrv_wakeup(BlockDriverState *bs)
+{
+ if (bs->wakeup) {
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
+ }
+}
+
+void bdrv_dec_in_flight(BlockDriverState *bs)
+{
+ atomic_dec(&bs->in_flight);
+ bdrv_wakeup(bs);
+}
+
static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
{
BlockDriverState *bs = self->bs;
return 0;
}
-static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors)
-{
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return -EIO;
- }
-
- return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
- nb_sectors * BDRV_SECTOR_SIZE);
-}
-
typedef struct RwCo {
- BlockDriverState *bs;
+ BdrvChild *child;
int64_t offset;
QEMUIOVector *qiov;
bool is_write;
RwCo *rwco = opaque;
if (!rwco->is_write) {
- rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset,
+ rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
rwco->qiov->size, rwco->qiov,
rwco->flags);
} else {
- rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset,
+ rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
rwco->qiov->size, rwco->qiov,
rwco->flags);
}
/*
* Process a vectored synchronous request using coroutines
*/
-static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
+static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
QEMUIOVector *qiov, bool is_write,
BdrvRequestFlags flags)
{
Coroutine *co;
RwCo rwco = {
- .bs = bs,
+ .child = child,
.offset = offset,
.qiov = qiov,
.is_write = is_write,
/* Fast-path if already in coroutine context */
bdrv_rw_co_entry(&rwco);
} else {
- AioContext *aio_context = bdrv_get_aio_context(bs);
-
- co = qemu_coroutine_create(bdrv_rw_co_entry);
- qemu_coroutine_enter(co, &rwco);
- while (rwco.ret == NOT_DONE) {
- aio_poll(aio_context, true);
- }
+ co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
+ qemu_coroutine_enter(co);
+ BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
}
return rwco.ret;
}
/*
* Process a synchronous request using coroutines
*/
-static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
+static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
int nb_sectors, bool is_write, BdrvRequestFlags flags)
{
QEMUIOVector qiov;
}
qemu_iovec_init_external(&qiov, &iov, 1);
- return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
+ return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
&qiov, is_write, flags);
}
int bdrv_read(BdrvChild *child, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
- return bdrv_rw_co(child->bs, sector_num, buf, nb_sectors, false, 0);
+ return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
}
/* Return < 0 if error. Important errors are:
int bdrv_write(BdrvChild *child, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
- return bdrv_rw_co(child->bs, sector_num, (uint8_t *)buf, nb_sectors,
- true, 0);
+ return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
}
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
};
qemu_iovec_init_external(&qiov, &iov, 1);
- return bdrv_prwv_co(child->bs, offset, &qiov, true,
+ return bdrv_prwv_co(child, offset, &qiov, true,
BDRV_REQ_ZERO_WRITE | flags);
}
{
int ret;
- ret = bdrv_prwv_co(child->bs, offset, qiov, false, 0);
+ ret = bdrv_prwv_co(child, offset, qiov, false, 0);
if (ret < 0) {
return ret;
}
{
int ret;
- ret = bdrv_prwv_co(child->bs, offset, qiov, true, 0);
+ ret = bdrv_prwv_co(child, offset, qiov, true, 0);
if (ret < 0) {
return ret;
}
CoroutineIOCompletion *co = opaque;
co->ret = ret;
- qemu_coroutine_enter(co->coroutine, NULL);
+ qemu_coroutine_enter(co->coroutine);
}
static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
return ret;
}
+static int coroutine_fn
+bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov)
+{
+ BlockDriver *drv = bs->drv;
+
+ if (!drv->bdrv_co_pwritev_compressed) {
+ return -ENOTSUP;
+ }
+
+ return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
+}
+
static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
{
/*
* Forwards an already correctly aligned request to the BlockDriver. This
- * handles copy on read and zeroing after EOF; any other features must be
- * implemented by the caller.
+ * handles copy on read, zeroing after EOF, and fragmentation of large
+ * reads; any other features must be implemented by the caller.
*/
static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
int64_t align, QEMUIOVector *qiov, int flags)
{
int64_t total_bytes, max_bytes;
- int ret;
+ int ret = 0;
+ uint64_t bytes_remaining = bytes;
+ int max_transfer;
assert(is_power_of_2(align));
assert((offset & (align - 1)) == 0);
assert((bytes & (align - 1)) == 0);
assert(!qiov || bytes == qiov->size);
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
+ max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
+ align);
/* TODO: We would need a per-BDS .supported_read_flags and
* potential fallback support, if we ever implement any read flags
}
}
- /* Forward the request to the BlockDriver */
+ /* Forward the request to the BlockDriver, possibly fragmenting it */
total_bytes = bdrv_getlength(bs);
if (total_bytes < 0) {
ret = total_bytes;
}
max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
- if (bytes <= max_bytes) {
+ if (bytes <= max_bytes && bytes <= max_transfer) {
ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
- } else if (max_bytes > 0) {
- QEMUIOVector local_qiov;
+ goto out;
+ }
- qemu_iovec_init(&local_qiov, qiov->niov);
- qemu_iovec_concat(&local_qiov, qiov, 0, max_bytes);
+ while (bytes_remaining) {
+ int num;
- ret = bdrv_driver_preadv(bs, offset, max_bytes, &local_qiov, 0);
+ if (max_bytes) {
+ QEMUIOVector local_qiov;
- qemu_iovec_destroy(&local_qiov);
- } else {
- ret = 0;
- }
+ num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
+ assert(num);
+ qemu_iovec_init(&local_qiov, qiov->niov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
- /* Reading beyond end of file is supposed to produce zeroes */
- if (ret == 0 && total_bytes < offset + bytes) {
- uint64_t zero_offset = MAX(0, total_bytes - offset);
- uint64_t zero_bytes = offset + bytes - zero_offset;
- qemu_iovec_memset(qiov, zero_offset, 0, zero_bytes);
+ ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
+ num, &local_qiov, 0);
+ max_bytes -= num;
+ qemu_iovec_destroy(&local_qiov);
+ } else {
+ num = bytes_remaining;
+ ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
+ bytes_remaining);
+ }
+ if (ret < 0) {
+ goto out;
+ }
+ bytes_remaining -= num;
}
out:
- return ret;
+ return ret < 0 ? ret : 0;
}
/*
* Handle a read request in coroutine context
*/
-int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
+int coroutine_fn bdrv_co_preadv(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ BlockDriverState *bs = child->bs;
BlockDriver *drv = bs->drv;
BdrvTrackedRequest req;
return ret;
}
+ bdrv_inc_in_flight(bs);
+
/* Don't do copy-on-read if we read data before write operation */
if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
flags |= BDRV_REQ_COPY_ON_READ;
use_local_qiov ? &local_qiov : qiov,
flags);
tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
if (use_local_qiov) {
qemu_iovec_destroy(&local_qiov);
return -EINVAL;
}
- return bdrv_co_preadv(child->bs, sector_num << BDRV_SECTOR_BITS,
+ return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
}
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
bs->bl.request_alignment);
+ int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
+ MAX_WRITE_ZEROES_BOUNCE_BUFFER);
- assert(is_power_of_2(alignment));
- head = offset & (alignment - 1);
- tail = (offset + count) & (alignment - 1);
- max_write_zeroes &= ~(alignment - 1);
+ assert(alignment % bs->bl.request_alignment == 0);
+ head = offset % alignment;
+ tail = (offset + count) % alignment;
+ max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
+ assert(max_write_zeroes >= bs->bl.request_alignment);
while (count > 0 && !ret) {
int num = count;
* boundaries.
*/
if (head) {
- /* Make a small request up to the first aligned sector. */
- num = MIN(count, alignment - head);
- head = 0;
+ /* Make a small request up to the first aligned sector. For
+ * convenience, limit this request to max_transfer even if
+ * we don't need to fall back to writes. */
+ num = MIN(MIN(count, max_transfer), alignment - head);
+ head = (head + num) % alignment;
+ assert(num < max_write_zeroes);
} else if (tail && num > alignment) {
/* Shorten the request to the last aligned sector. */
num -= tail;
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
- int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
- MAX_WRITE_ZEROES_BOUNCE_BUFFER);
BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
if ((flags & BDRV_REQ_FUA) &&
}
/*
- * Forwards an already correctly aligned write request to the BlockDriver.
+ * Forwards an already correctly aligned write request to the BlockDriver,
+ * after possibly fragmenting it.
*/
static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
int64_t start_sector = offset >> BDRV_SECTOR_BITS;
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
+ uint64_t bytes_remaining = bytes;
+ int max_transfer;
assert(is_power_of_2(align));
assert((offset & (align - 1)) == 0);
assert(!qiov || bytes == qiov->size);
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
assert(!(flags & ~BDRV_REQ_MASK));
+ max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
+ align);
waited = wait_serialising_requests(req);
assert(!waited || !req->serialising);
} else if (flags & BDRV_REQ_ZERO_WRITE) {
bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
- } else {
+ } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
+ ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
+ } else if (bytes <= max_transfer) {
bdrv_debug_event(bs, BLKDBG_PWRITEV);
ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
+ } else {
+ bdrv_debug_event(bs, BLKDBG_PWRITEV);
+ while (bytes_remaining) {
+ int num = MIN(bytes_remaining, max_transfer);
+ QEMUIOVector local_qiov;
+ int local_flags = flags;
+
+ assert(num);
+ if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
+ !(bs->supported_write_flags & BDRV_REQ_FUA)) {
+ /* If FUA is going to be emulated by flush, we only
+ * need to flush on the last iteration */
+ local_flags &= ~BDRV_REQ_FUA;
+ }
+ qemu_iovec_init(&local_qiov, qiov->niov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
+
+ ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
+ num, &local_qiov, local_flags);
+ qemu_iovec_destroy(&local_qiov);
+ if (ret < 0) {
+ break;
+ }
+ bytes_remaining -= num;
+ }
}
bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
+ ++bs->write_gen;
bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
if (bs->wr_highest_offset < offset + bytes) {
if (ret >= 0) {
bs->total_sectors = MAX(bs->total_sectors, end_sector);
+ ret = 0;
}
return ret;
/*
* Handle a write request in coroutine context
*/
-int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
+int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ BlockDriverState *bs = child->bs;
BdrvTrackedRequest req;
uint64_t align = bs->bl.request_alignment;
uint8_t *head_buf = NULL;
return ret;
}
+ bdrv_inc_in_flight(bs);
/*
* Align write if necessary by performing a read-modify-write cycle.
* Pad qiov with the read parts and be sure to have a tracked request not
qemu_vfree(tail_buf);
out:
tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
return ret;
}
return -EINVAL;
}
- return bdrv_co_pwritev(child->bs, sector_num << BDRV_SECTOR_BITS,
+ return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
}
return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
}
-int coroutine_fn bdrv_co_pwrite_zeroes(BlockDriverState *bs,
- int64_t offset, int count,
- BdrvRequestFlags flags)
+int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
+ int count, BdrvRequestFlags flags)
{
- trace_bdrv_co_pwrite_zeroes(bs, offset, count, flags);
+ trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
- if (!(bs->open_flags & BDRV_O_UNMAP)) {
+ if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
flags &= ~BDRV_REQ_MAY_UNMAP;
}
- return bdrv_co_pwritev(bs, offset, count, NULL,
+ return bdrv_co_pwritev(child, offset, count, NULL,
BDRV_REQ_ZERO_WRITE | flags);
}
+/*
+ * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
+ */
+int bdrv_flush_all(void)
+{
+ BdrvNextIterator it;
+ BlockDriverState *bs = NULL;
+ int result = 0;
+
+ for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
+ AioContext *aio_context = bdrv_get_aio_context(bs);
+ int ret;
+
+ aio_context_acquire(aio_context);
+ ret = bdrv_flush(bs);
+ if (ret < 0 && !result) {
+ result = ret;
+ }
+ aio_context_release(aio_context);
+ }
+
+ return result;
+}
+
+
typedef struct BdrvCoGetBlockStatusData {
BlockDriverState *bs;
BlockDriverState *base;
}
*file = NULL;
+ bdrv_inc_in_flight(bs);
ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
file);
if (ret < 0) {
*pnum = 0;
- return ret;
+ goto out;
}
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID);
- return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
- *pnum, pnum, file);
+ ret = bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
+ *pnum, pnum, file);
+ goto out;
}
if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
}
}
+out:
+ bdrv_dec_in_flight(bs);
return ret;
}
/* Fast-path if already in coroutine context */
bdrv_get_block_status_above_co_entry(&data);
} else {
- AioContext *aio_context = bdrv_get_aio_context(bs);
-
- co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
- qemu_coroutine_enter(co, &data);
- while (!data.done) {
- aio_poll(aio_context, true);
- }
+ co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
+ &data);
+ qemu_coroutine_enter(co);
+ BDRV_POLL_WHILE(bs, !data.done);
}
return data.ret;
}
return 0;
}
-int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
-{
- BlockDriver *drv = bs->drv;
- int ret;
-
- if (!drv) {
- return -ENOMEDIUM;
- }
- if (!drv->bdrv_write_compressed) {
- return -ENOTSUP;
- }
- ret = bdrv_check_request(bs, sector_num, nb_sectors);
- if (ret < 0) {
- return ret;
- }
-
- assert(QLIST_EMPTY(&bs->dirty_bitmaps));
-
- return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
-}
-
typedef struct BdrvVmstateCo {
BlockDriverState *bs;
QEMUIOVector *qiov;
.is_read = is_read,
.ret = -EINPROGRESS,
};
- Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry);
+ Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
- qemu_coroutine_enter(co, &data);
+ qemu_coroutine_enter(co);
while (data.ret == -EINPROGRESS) {
aio_poll(bdrv_get_aio_context(bs), true);
}
{
trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
- return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
- cb, opaque, false);
+ assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
+ return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
+ 0, cb, opaque, false);
}
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
{
trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
- return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
- cb, opaque, true);
+ assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
+ return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
+ 0, cb, opaque, true);
}
void bdrv_aio_cancel(BlockAIOCB *acb)
if (acb->aiocb_info->get_aio_context) {
aio_poll(acb->aiocb_info->get_aio_context(acb), true);
} else if (acb->bs) {
+ /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
+ * assert that we're not using an I/O thread. Thread-safe
+ * code should use bdrv_aio_cancel_async exclusively.
+ */
+ assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
aio_poll(bdrv_get_aio_context(acb->bs), true);
} else {
abort();
union {
/* Used during read, write, trim */
struct {
- int64_t sector;
- int nb_sectors;
+ int64_t offset;
+ int bytes;
int flags;
QEMUIOVector *qiov;
};
bool is_write;
bool need_bh;
bool *done;
- QEMUBH* bh;
} BlockAIOCBCoroutine;
static const AIOCBInfo bdrv_em_co_aiocb_info = {
static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
{
if (!acb->need_bh) {
+ bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->req.error);
qemu_aio_unref(acb);
}
BlockAIOCBCoroutine *acb = opaque;
assert(!acb->need_bh);
- qemu_bh_delete(acb->bh);
bdrv_co_complete(acb);
}
if (acb->req.error != -EINPROGRESS) {
BlockDriverState *bs = acb->common.bs;
- acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
}
}
BlockAIOCBCoroutine *acb = opaque;
if (!acb->is_write) {
- acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector,
- acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
+ acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
+ acb->req.qiov->size, acb->req.qiov, acb->req.flags);
} else {
- acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector,
- acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
+ acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
+ acb->req.qiov->size, acb->req.qiov, acb->req.flags);
}
bdrv_co_complete(acb);
}
-static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BdrvRequestFlags flags,
- BlockCompletionFunc *cb,
- void *opaque,
- bool is_write)
+static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
+ int64_t offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags,
+ BlockCompletionFunc *cb,
+ void *opaque,
+ bool is_write)
{
Coroutine *co;
BlockAIOCBCoroutine *acb;
+ /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
+ bdrv_inc_in_flight(child->bs);
+
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
acb->child = child;
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
- acb->req.sector = sector_num;
- acb->req.nb_sectors = nb_sectors;
+ acb->req.offset = offset;
acb->req.qiov = qiov;
acb->req.flags = flags;
acb->is_write = is_write;
- co = qemu_coroutine_create(bdrv_co_do_rw);
- qemu_coroutine_enter(co, acb);
+ co = qemu_coroutine_create(bdrv_co_do_rw, acb);
+ qemu_coroutine_enter(co);
bdrv_co_maybe_schedule_bh(acb);
return &acb->common;
Coroutine *co;
BlockAIOCBCoroutine *acb;
- acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
- acb->need_bh = true;
- acb->req.error = -EINPROGRESS;
-
- co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
- qemu_coroutine_enter(co, acb);
-
- bdrv_co_maybe_schedule_bh(acb);
- return &acb->common;
-}
-
-static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
-{
- BlockAIOCBCoroutine *acb = opaque;
- BlockDriverState *bs = acb->common.bs;
-
- acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
- bdrv_co_complete(acb);
-}
-
-BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
-{
- Coroutine *co;
- BlockAIOCBCoroutine *acb;
-
- trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
+ /* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
+ bdrv_inc_in_flight(bs);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
- acb->req.sector = sector_num;
- acb->req.nb_sectors = nb_sectors;
- co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
- qemu_coroutine_enter(co, acb);
+
+ co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
+ qemu_coroutine_enter(co);
bdrv_co_maybe_schedule_bh(acb);
return &acb->common;
}
-void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque)
-{
- BlockAIOCB *acb;
-
- acb = g_malloc(aiocb_info->aiocb_size);
- acb->aiocb_info = aiocb_info;
- acb->bs = bs;
- acb->cb = cb;
- acb->opaque = opaque;
- acb->refcnt = 1;
- return acb;
-}
-
-void qemu_aio_ref(void *p)
-{
- BlockAIOCB *acb = p;
- acb->refcnt++;
-}
-
-void qemu_aio_unref(void *p)
-{
- BlockAIOCB *acb = p;
- assert(acb->refcnt > 0);
- if (--acb->refcnt == 0) {
- g_free(acb);
- }
-}
-
/**************************************************************/
/* Coroutine block device emulation */
+typedef struct FlushCo {
+ BlockDriverState *bs;
+ int ret;
+} FlushCo;
+
+
static void coroutine_fn bdrv_flush_co_entry(void *opaque)
{
- RwCo *rwco = opaque;
+ FlushCo *rwco = opaque;
rwco->ret = bdrv_co_flush(rwco->bs);
}
int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
{
int ret;
- BdrvTrackedRequest req;
if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
bdrv_is_sg(bs)) {
return 0;
}
- tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
+ bdrv_inc_in_flight(bs);
+
+ int current_gen = bs->write_gen;
+
+ /* Wait until any previous flushes are completed */
+ while (bs->active_flush_req) {
+ qemu_co_queue_wait(&bs->flush_queue);
+ }
+
+ bs->active_flush_req = true;
/* Write back all layers by calling one driver function */
if (bs->drv->bdrv_co_flush) {
goto flush_parent;
}
+ /* Check if we really need to flush anything */
+ if (bs->flushed_gen == current_gen) {
+ goto flush_parent;
+ }
+
BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
if (bs->drv->bdrv_co_flush_to_disk) {
ret = bs->drv->bdrv_co_flush_to_disk(bs);
*/
ret = 0;
}
+
if (ret < 0) {
goto out;
}
flush_parent:
ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
out:
- tracked_request_end(&req);
+ /* Notify any pending flushes that we have completed */
+ if (ret == 0) {
+ bs->flushed_gen = current_gen;
+ }
+ bs->active_flush_req = false;
+ /* Return value is ignored - it's ok if wait queue is empty */
+ qemu_co_queue_next(&bs->flush_queue);
+
+ bdrv_dec_in_flight(bs);
return ret;
}
int bdrv_flush(BlockDriverState *bs)
{
Coroutine *co;
- RwCo rwco = {
+ FlushCo flush_co = {
.bs = bs,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
- bdrv_flush_co_entry(&rwco);
+ bdrv_flush_co_entry(&flush_co);
} else {
- AioContext *aio_context = bdrv_get_aio_context(bs);
-
- co = qemu_coroutine_create(bdrv_flush_co_entry);
- qemu_coroutine_enter(co, &rwco);
- while (rwco.ret == NOT_DONE) {
- aio_poll(aio_context, true);
- }
+ co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
+ qemu_coroutine_enter(co);
+ BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
}
- return rwco.ret;
+ return flush_co.ret;
}
typedef struct DiscardCo {
BlockDriverState *bs;
- int64_t sector_num;
- int nb_sectors;
+ int64_t offset;
+ int count;
int ret;
} DiscardCo;
-static void coroutine_fn bdrv_discard_co_entry(void *opaque)
+static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
{
DiscardCo *rwco = opaque;
- rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
+ rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
}
-int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors)
+int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
+ int count)
{
BdrvTrackedRequest req;
- int max_discard, ret;
+ int max_pdiscard, ret;
+ int head, tail, align;
if (!bs->drv) {
return -ENOMEDIUM;
}
- ret = bdrv_check_request(bs, sector_num, nb_sectors);
+ ret = bdrv_check_byte_request(bs, offset, count);
if (ret < 0) {
return ret;
} else if (bs->read_only) {
return 0;
}
- if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
+ if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
return 0;
}
- tracked_request_begin(&req, bs, sector_num << BDRV_SECTOR_BITS,
- nb_sectors << BDRV_SECTOR_BITS, BDRV_TRACKED_DISCARD);
+ /* Discard is advisory, but some devices track and coalesce
+ * unaligned requests, so we must pass everything down rather than
+ * round here. Still, most devices will just silently ignore
+ * unaligned requests (by returning -ENOTSUP), so we must fragment
+ * the request accordingly. */
+ align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
+ assert(align % bs->bl.request_alignment == 0);
+ head = offset % align;
+ tail = (offset + count) % align;
+
+ bdrv_inc_in_flight(bs);
+ tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
if (ret < 0) {
goto out;
}
- max_discard = MIN_NON_ZERO(bs->bl.max_pdiscard >> BDRV_SECTOR_BITS,
- BDRV_REQUEST_MAX_SECTORS);
- while (nb_sectors > 0) {
+ max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
+ align);
+ assert(max_pdiscard >= bs->bl.request_alignment);
+
+ while (count > 0) {
int ret;
- int num = nb_sectors;
- int discard_alignment = bs->bl.pdiscard_alignment >> BDRV_SECTOR_BITS;
-
- /* align request */
- if (discard_alignment &&
- num >= discard_alignment &&
- sector_num % discard_alignment) {
- if (num > discard_alignment) {
- num = discard_alignment;
+ int num = count;
+
+ if (head) {
+ /* Make small requests to get to alignment boundaries. */
+ num = MIN(count, align - head);
+ if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
+ num %= bs->bl.request_alignment;
+ }
+ head = (head + num) % align;
+ assert(num < max_pdiscard);
+ } else if (tail) {
+ if (num > align) {
+ /* Shorten the request to the last aligned cluster. */
+ num -= tail;
+ } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
+ tail > bs->bl.request_alignment) {
+ tail %= bs->bl.request_alignment;
+ num -= tail;
}
- num -= sector_num % discard_alignment;
}
-
/* limit request size */
- if (num > max_discard) {
- num = max_discard;
+ if (num > max_pdiscard) {
+ num = max_pdiscard;
}
- if (bs->drv->bdrv_co_discard) {
- ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
+ if (bs->drv->bdrv_co_pdiscard) {
+ ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
} else {
BlockAIOCB *acb;
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
- acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
- bdrv_co_io_em_complete, &co);
+ acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
+ bdrv_co_io_em_complete, &co);
if (acb == NULL) {
ret = -EIO;
goto out;
goto out;
}
- sector_num += num;
- nb_sectors -= num;
+ offset += num;
+ count -= num;
}
ret = 0;
out:
+ ++bs->write_gen;
bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
req.bytes >> BDRV_SECTOR_BITS);
tracked_request_end(&req);
+ bdrv_dec_in_flight(bs);
return ret;
}
-int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
+int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
{
Coroutine *co;
DiscardCo rwco = {
.bs = bs,
- .sector_num = sector_num,
- .nb_sectors = nb_sectors,
+ .offset = offset,
+ .count = count,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
- bdrv_discard_co_entry(&rwco);
+ bdrv_pdiscard_co_entry(&rwco);
} else {
- AioContext *aio_context = bdrv_get_aio_context(bs);
-
- co = qemu_coroutine_create(bdrv_discard_co_entry);
- qemu_coroutine_enter(co, &rwco);
- while (rwco.ret == NOT_DONE) {
- aio_poll(aio_context, true);
- }
+ co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
+ qemu_coroutine_enter(co);
+ BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
}
return rwco.ret;
}
-static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
+int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
{
BlockDriver *drv = bs->drv;
- BdrvTrackedRequest tracked_req;
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
BlockAIOCB *acb;
- tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
- if (!drv || !drv->bdrv_aio_ioctl) {
- co.ret = -ENOTSUP;
- goto out;
- }
-
- acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
- if (!acb) {
+ bdrv_inc_in_flight(bs);
+ if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
co.ret = -ENOTSUP;
goto out;
}
- qemu_coroutine_yield();
-out:
- tracked_request_end(&tracked_req);
- return co.ret;
-}
-
-typedef struct {
- BlockDriverState *bs;
- int req;
- void *buf;
- int ret;
-} BdrvIoctlCoData;
-
-static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
-{
- BdrvIoctlCoData *data = opaque;
- data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
-}
-
-/* needed for generic scsi interface */
-int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
-{
- BdrvIoctlCoData data = {
- .bs = bs,
- .req = req,
- .buf = buf,
- .ret = -EINPROGRESS,
- };
- if (qemu_in_coroutine()) {
- /* Fast-path if already in coroutine context */
- bdrv_co_ioctl_entry(&data);
+ if (drv->bdrv_co_ioctl) {
+ co.ret = drv->bdrv_co_ioctl(bs, req, buf);
} else {
- Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
-
- qemu_coroutine_enter(co, &data);
- while (data.ret == -EINPROGRESS) {
- aio_poll(bdrv_get_aio_context(bs), true);
+ acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
+ if (!acb) {
+ co.ret = -ENOTSUP;
+ goto out;
}
+ qemu_coroutine_yield();
}
- return data.ret;
-}
-
-static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
-{
- BlockAIOCBCoroutine *acb = opaque;
- acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
- acb->req.req, acb->req.buf);
- bdrv_co_complete(acb);
-}
-
-BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
- unsigned long int req, void *buf,
- BlockCompletionFunc *cb, void *opaque)
-{
- BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
- bs, cb, opaque);
- Coroutine *co;
-
- acb->need_bh = true;
- acb->req.error = -EINPROGRESS;
- acb->req.req = req;
- acb->req.buf = buf;
- co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
- qemu_coroutine_enter(co, acb);
-
- bdrv_co_maybe_schedule_bh(acb);
- return &acb->common;
+out:
+ bdrv_dec_in_flight(bs);
+ return co.ret;
}
void *qemu_blockalign(BlockDriverState *bs, size_t size)
bdrv_io_plug(child->bs);
}
- if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
+ if (bs->io_plugged++ == 0) {
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_plug) {
drv->bdrv_io_plug(bs);
BdrvChild *child;
assert(bs->io_plugged);
- if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
+ if (--bs->io_plugged == 0) {
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_unplug) {
drv->bdrv_io_unplug(bs);
bdrv_io_unplug(child->bs);
}
}
-
-void bdrv_io_unplugged_begin(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_unplug) {
- drv->bdrv_io_unplug(bs);
- }
- }
-
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_io_unplugged_begin(child->bs);
- }
-}
-
-void bdrv_io_unplugged_end(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- assert(bs->io_plug_disabled);
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_io_unplugged_end(child->bs);
- }
-
- if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_plug) {
- drv->bdrv_io_plug(bs);
- }
- }
-}