bdrv_parent_drained_begin(bs);
}
- bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
- bdrv_io_unplugged_end(bs);
}
void bdrv_drained_end(BlockDriverState *bs)
aio_context_acquire(aio_context);
bdrv_parent_drained_begin(bs);
- bdrv_io_unplugged_begin(bs);
aio_disable_external(aio_context);
aio_context_release(aio_context);
aio_context_acquire(aio_context);
aio_enable_external(aio_context);
- bdrv_io_unplugged_end(bs);
bdrv_parent_drained_end(bs);
aio_context_release(aio_context);
}
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
bs->bl.request_alignment);
+ int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
+ MAX_WRITE_ZEROES_BOUNCE_BUFFER);
assert(alignment % bs->bl.request_alignment == 0);
head = offset % alignment;
* boundaries.
*/
if (head) {
- /* Make a small request up to the first aligned sector. */
- num = MIN(count, alignment - head);
- head = 0;
+ /* Make a small request up to the first aligned sector. For
+ * convenience, limit this request to max_transfer even if
+ * we don't need to fall back to writes. */
+ num = MIN(MIN(count, max_transfer), alignment - head);
+ head = (head + num) % alignment;
+ assert(num < max_write_zeroes);
} else if (tail && num > alignment) {
/* Shorten the request to the last aligned sector. */
num -= tail;
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
- int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
- MAX_WRITE_ZEROES_BOUNCE_BUFFER);
BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
if ((flags & BDRV_REQ_FUA) &&
if (acb->aiocb_info->get_aio_context) {
aio_poll(acb->aiocb_info->get_aio_context(acb), true);
} else if (acb->bs) {
+ /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
+ * assert that we're not using an I/O thread. Thread-safe
+ * code should use bdrv_aio_cancel_async exclusively.
+ */
+ assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
aio_poll(bdrv_get_aio_context(acb->bs), true);
} else {
abort();
return &acb->common;
}
-void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque)
-{
- BlockAIOCB *acb;
-
- acb = g_malloc(aiocb_info->aiocb_size);
- acb->aiocb_info = aiocb_info;
- acb->bs = bs;
- acb->cb = cb;
- acb->opaque = opaque;
- acb->refcnt = 1;
- return acb;
-}
-
-void qemu_aio_ref(void *p)
-{
- BlockAIOCB *acb = p;
- acb->refcnt++;
-}
-
-void qemu_aio_unref(void *p)
-{
- BlockAIOCB *acb = p;
- assert(acb->refcnt > 0);
- if (--acb->refcnt == 0) {
- g_free(acb);
- }
-}
-
/**************************************************************/
/* Coroutine block device emulation */
{
BdrvTrackedRequest req;
int max_pdiscard, ret;
- int head, align;
+ int head, tail, align;
if (!bs->drv) {
return -ENOMEDIUM;
return 0;
}
- /* Discard is advisory, so ignore any unaligned head or tail */
+ /* Discard is advisory, but some devices track and coalesce
+ * unaligned requests, so we must pass everything down rather than
+ * round here. Still, most devices will just silently ignore
+ * unaligned requests (by returning -ENOTSUP), so we must fragment
+ * the request accordingly. */
align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
assert(align % bs->bl.request_alignment == 0);
head = offset % align;
- if (head) {
- head = MIN(count, align - head);
- count -= head;
- offset += head;
- }
- count = QEMU_ALIGN_DOWN(count, align);
- if (!count) {
- return 0;
- }
+ tail = (offset + count) % align;
bdrv_inc_in_flight(bs);
tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
align);
- assert(max_pdiscard);
+ assert(max_pdiscard >= bs->bl.request_alignment);
while (count > 0) {
int ret;
- int num = MIN(count, max_pdiscard);
+ int num = count;
+
+ if (head) {
+ /* Make small requests to get to alignment boundaries. */
+ num = MIN(count, align - head);
+ if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
+ num %= bs->bl.request_alignment;
+ }
+ head = (head + num) % align;
+ assert(num < max_pdiscard);
+ } else if (tail) {
+ if (num > align) {
+ /* Shorten the request to the last aligned cluster. */
+ num -= tail;
+ } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
+ tail > bs->bl.request_alignment) {
+ tail %= bs->bl.request_alignment;
+ num -= tail;
+ }
+ }
+ /* limit request size */
+ if (num > max_pdiscard) {
+ num = max_pdiscard;
+ }
if (bs->drv->bdrv_co_pdiscard) {
ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
bdrv_io_plug(child->bs);
}
- if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
+ if (bs->io_plugged++ == 0) {
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_plug) {
drv->bdrv_io_plug(bs);
BdrvChild *child;
assert(bs->io_plugged);
- if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
+ if (--bs->io_plugged == 0) {
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_unplug) {
drv->bdrv_io_unplug(bs);
bdrv_io_unplug(child->bs);
}
}
-
-void bdrv_io_unplugged_begin(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_unplug) {
- drv->bdrv_io_unplug(bs);
- }
- }
-
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_io_unplugged_begin(child->bs);
- }
-}
-
-void bdrv_io_unplugged_end(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- assert(bs->io_plug_disabled);
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_io_unplugged_end(child->bs);
- }
-
- if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_plug) {
- drv->bdrv_io_plug(bs);
- }
- }
-}