struct iocb iocb;
ssize_t ret;
size_t nbytes;
+ QLIST_ENTRY(qemu_laiocb) node;
};
struct qemu_laio_state {
return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
}
+/*
+ * Completes an AIO request (calls the callback and frees the ACB).
+ */
+static void qemu_laio_process_completion(struct qemu_laio_state *s,
+ struct qemu_laiocb *laiocb)
+{
+ int ret;
+
+ s->count--;
+
+ ret = laiocb->ret;
+ if (ret != -ECANCELED) {
+ if (ret == laiocb->nbytes)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EINVAL;
+
+ laiocb->common.cb(laiocb->common.opaque, ret);
+ }
+
+ qemu_aio_release(laiocb);
+}
+
+/*
+ * All requests are directly processed when they complete, so there's nothing
+ * left to do during qemu_aio_wait().
+ */
+static int qemu_laio_process_requests(void *opaque)
+{
+ return 0;
+}
+
static void qemu_laio_completion_cb(void *opaque)
{
struct qemu_laio_state *s = opaque;
do {
ret = read(s->efd, &val, sizeof(val));
- } while (ret == 1 && errno == EINTR);
+ } while (ret == -1 && errno == EINTR);
if (ret == -1 && errno == EAGAIN)
break;
struct qemu_laiocb *laiocb =
container_of(iocb, struct qemu_laiocb, iocb);
- s->count--;
-
- ret = laiocb->ret = io_event_ret(&events[i]);
- if (ret != -ECANCELED) {
- if (ret == laiocb->nbytes)
- ret = 0;
- else if (ret >= 0)
- ret = -EINVAL;
-
- laiocb->common.cb(laiocb->common.opaque, ret);
- }
-
- qemu_aio_release(laiocb);
+ laiocb->ret = io_event_ret(&events[i]);
+ qemu_laio_process_completion(s, laiocb);
}
}
}
if (io_setup(MAX_EVENTS, &s->ctx) != 0)
goto out_close_efd;
- qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb,
- NULL, qemu_laio_flush_cb, NULL, s);
+ qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL,
+ qemu_laio_flush_cb, qemu_laio_process_requests, s);
return s;