{
AIORequestData *reqdata = (AIORequestData *) opaque;
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
+ AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
+ aio_context_acquire(ctx);
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
+ aio_context_release(ctx);
aio_cb->status = 0;
qemu_aio_unref(aio_cb);
static void blkreplay_bh_cb(void *opaque)
{
Request *req = opaque;
- qemu_coroutine_enter(req->co);
+ aio_co_wake(req->co);
qemu_bh_delete(req->bh);
g_free(req);
}
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
bdrv_dec_in_flight(acb->common.bs);
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret);
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
static void blk_aio_complete_bh(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
assert(acb->has_returned);
+ aio_context_acquire(ctx);
blk_aio_complete(acb);
+ aio_context_release(ctx);
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
{
CURLState *state;
int running;
+ int ret = -EINPROGRESS;
CURLAIOCB *acb = p;
- BDRVCURLState *s = acb->common.bs->opaque;
+ BlockDriverState *bs = acb->common.bs;
+ BDRVCURLState *s = bs->opaque;
+ AioContext *ctx = bdrv_get_aio_context(bs);
size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
size_t end;
+ aio_context_acquire(ctx);
+
// In case we have the requested data already (e.g. read-ahead),
// we can just call the callback and be done.
switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) {
qemu_aio_unref(acb);
// fall through
case FIND_RET_WAIT:
- return;
+ goto out;
default:
break;
}
// No cache found, so let's start a new request
state = curl_init_state(acb->common.bs, s);
if (!state) {
- acb->common.cb(acb->common.opaque, -EIO);
- qemu_aio_unref(acb);
- return;
+ ret = -EIO;
+ goto out;
}
acb->start = 0;
state->orig_buf = g_try_malloc(state->buf_len);
if (state->buf_len && state->orig_buf == NULL) {
curl_clean_state(state);
- acb->common.cb(acb->common.opaque, -ENOMEM);
- qemu_aio_unref(acb);
- return;
+ ret = -ENOMEM;
+ goto out;
}
state->acb[0] = acb;
/* Tell curl it needs to kick things off */
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
+
+out:
+ if (ret != -EINPROGRESS) {
+ acb->common.cb(acb->common.opaque, ret);
+ qemu_aio_unref(acb);
+ }
+ aio_context_release(ctx);
}
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
return qemu_gluster_glfs_init(gconf, errp);
}
-static void qemu_gluster_complete_aio(void *opaque)
-{
- GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
-
- qemu_coroutine_enter(acb->coroutine);
-}
-
/*
* AIO callback routine called from GlusterFS thread.
*/
acb->ret = -EIO; /* Partial read/write - fail it */
}
- aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
+ aio_co_schedule(acb->aio_context, acb->coroutine);
}
static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
bdrv_dec_in_flight(bs);
bdrv_drained_begin(bs);
data->done = true;
- qemu_coroutine_enter(co);
+ aio_co_wake(co);
}
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
static void bdrv_co_em_bh(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
+ BlockDriverState *bs = acb->common.bs;
+ AioContext *ctx = bdrv_get_aio_context(bs);
assert(!acb->need_bh);
+ aio_context_acquire(ctx);
bdrv_co_complete(acb);
+ aio_context_release(ctx);
}
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
iscsi_bh_cb(void *p)
{
IscsiAIOCB *acb = p;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
qemu_bh_delete(acb->bh);
g_free(acb->buf);
acb->buf = NULL;
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->status);
+ aio_context_release(ctx);
if (acb->task != NULL) {
scsi_free_scsi_task(acb->task);
static void iscsi_co_generic_bh_cb(void *opaque)
{
struct IscsiTask *iTask = opaque;
+
iTask->complete = 1;
- qemu_coroutine_enter(iTask->co);
+ aio_co_wake(iTask->co);
}
static void iscsi_retry_timer_expired(void *opaque)
io_context_t ctx;
EventNotifier e;
- /* io queue for submit at batch */
+ /* io queue for submit at batch. Protected by AioContext lock. */
LaioQueue io_q;
- /* I/O completion processing */
+ /* I/O completion processing. Only runs in I/O thread. */
QEMUBH *completion_bh;
int event_idx;
int event_max;
*/
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
{
+ LinuxAioState *s = laiocb->ctx;
int ret;
ret = laiocb->ret;
}
laiocb->ret = ret;
+ aio_context_acquire(s->aio_context);
if (laiocb->co) {
/* If the coroutine is already entered it must be in ioq_submit() and
* will notice laio->ret has been filled in when it eventually runs
laiocb->common.cb(laiocb->common.opaque, ret);
qemu_aio_unref(laiocb);
}
+ aio_context_release(s->aio_context);
}
/**
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
{
qemu_laio_process_completions(s);
+
+ aio_context_acquire(s->aio_context);
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
+ aio_context_release(s->aio_context);
}
static void qemu_laio_completion_bh(void *opaque)
LinuxAioState *s = container_of(e, LinuxAioState, e);
if (event_notifier_test_and_clear(&s->e)) {
- aio_context_acquire(s->aio_context);
qemu_laio_process_completions_and_submit(s);
- aio_context_release(s->aio_context);
}
}
return false;
}
- aio_context_acquire(s->aio_context);
qemu_laio_process_completions_and_submit(s);
- aio_context_release(s->aio_context);
return true;
}
{
aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
qemu_bh_delete(s->completion_bh);
+ s->aio_context = NULL;
}
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
static void nfs_co_generic_bh_cb(void *opaque)
{
NFSRPC *task = opaque;
+
task->complete = 1;
- qemu_coroutine_enter(task->co);
+ aio_co_wake(task->co);
}
static void
static void null_bh_cb(void *opaque)
{
NullAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0);
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
static void qed_aio_complete_bh(void *opaque)
{
QEDAIOCB *acb = opaque;
+ BDRVQEDState *s = acb_to_s(acb);
BlockCompletionFunc *cb = acb->common.cb;
void *user_opaque = acb->common.opaque;
int ret = acb->bh_ret;
qemu_aio_unref(acb);
/* Invoke callback */
+ qed_acquire(s);
cb(user_opaque, ret);
+ qed_release(s);
}
static void qed_aio_complete(QEDAIOCB *acb, int ret)
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
RBDAIOCB *acb = rcb->acb;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
int64_t r;
r = rcb->ret;
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
}
qemu_vfree(acb->bounce);
+
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
+ aio_context_release(ctx);
qemu_aio_unref(acb);
}
QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
}
+ aio_context_acquire(dbs->ctx);
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
dma_blk_cb, dbs, dbs->io_func_opaque);
+ aio_context_release(dbs->ctx);
assert(dbs->acb);
}
s->rq = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (req) {
VirtIOBlockReq *next = req->next;
if (virtio_blk_handle_request(req, &mrb)) {
if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_dma_restart_cb(void *opaque, int running,
qemu_bh_delete(s->bh);
s->bh = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
scsi_req_ref(req);
if (req->retry) {
}
scsi_req_unref(req);
}
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
void scsi_req_retry(SCSIRequest *req)
ret = 1;
}
bh->idle = 0;
- aio_context_acquire(ctx);
aio_bh_call(bh);
- aio_context_release(ctx);
}
if (bh->deleted) {
deleted = true;
Coroutine *co = QSLIST_FIRST(&straight);
QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
trace_aio_co_schedule_bh_cb(ctx, co);
+ aio_context_acquire(ctx);
qemu_coroutine_enter(co);
+ aio_context_release(ctx);
}
}
ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next;
+ aio_context_acquire(pool->ctx);
restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_DONE) {
qemu_aio_unref(elem);
}
}
+ aio_context_release(pool->ctx);
}
static void thread_pool_cancel(BlockAIOCB *acb)