*/
#include "qemu-common.h"
#include "qemu-aio.h"
-#include "block_int.h"
#include "block/raw-posix-aio.h"
#include <sys/eventfd.h>
struct iocb iocb;
ssize_t ret;
size_t nbytes;
- int async_context_id;
+ QEMUIOVector *qiov;
+ bool is_read;
QLIST_ENTRY(qemu_laiocb) node;
};
io_context_t ctx;
int efd;
int count;
- QLIST_HEAD(, qemu_laiocb) completed_reqs;
};
static inline ssize_t io_event_ret(struct io_event *ev)
/*
* Completes an AIO request (calls the callback and frees the ACB).
- * Be sure to be in the right AsyncContext before calling this function.
*/
static void qemu_laio_process_completion(struct qemu_laio_state *s,
struct qemu_laiocb *laiocb)
ret = laiocb->ret;
if (ret != -ECANCELED) {
- if (ret == laiocb->nbytes)
+ if (ret == laiocb->nbytes) {
ret = 0;
- else if (ret >= 0)
- ret = -EINVAL;
+ } else if (ret >= 0) {
+ /* Short reads mean EOF, pad with zeros. */
+ if (laiocb->is_read) {
+ qemu_iovec_memset_skip(laiocb->qiov, 0,
+ laiocb->qiov->size - ret, ret);
+ } else {
+ ret = -EINVAL;
+ }
+ }
laiocb->common.cb(laiocb->common.opaque, ret);
}
qemu_aio_release(laiocb);
}
-/*
- * Processes all queued AIO requests, i.e. requests that have return from OS
- * but their callback was not called yet. Requests that cannot have their
- * callback called in the current AsyncContext, remain in the queue.
- *
- * Returns 1 if at least one request could be completed, 0 otherwise.
- */
-static int qemu_laio_process_requests(void *opaque)
-{
- struct qemu_laio_state *s = opaque;
- struct qemu_laiocb *laiocb, *next;
- int res = 0;
-
- QLIST_FOREACH_SAFE (laiocb, &s->completed_reqs, node, next) {
- if (laiocb->async_context_id == get_async_context_id()) {
- qemu_laio_process_completion(s, laiocb);
- QLIST_REMOVE(laiocb, node);
- res = 1;
- }
- }
-
- return res;
-}
-
-/*
- * Puts a request in the completion queue so that its callback is called the
- * next time when it's possible. If we already are in the right AsyncContext,
- * the request is completed immediately instead.
- */
-static void qemu_laio_enqueue_completed(struct qemu_laio_state *s,
- struct qemu_laiocb* laiocb)
-{
- if (laiocb->async_context_id == get_async_context_id()) {
- qemu_laio_process_completion(s, laiocb);
- } else {
- QLIST_INSERT_HEAD(&s->completed_reqs, laiocb, node);
- }
-}
-
static void qemu_laio_completion_cb(void *opaque)
{
struct qemu_laio_state *s = opaque;
do {
ret = read(s->efd, &val, sizeof(val));
- } while (ret == 1 && errno == EINTR);
+ } while (ret == -1 && errno == EINTR);
if (ret == -1 && errno == EAGAIN)
break;
container_of(iocb, struct qemu_laiocb, iocb);
laiocb->ret = io_event_ret(&events[i]);
- qemu_laio_enqueue_completed(s, laiocb);
+ qemu_laio_process_completion(s, laiocb);
}
}
}
off_t offset = sector_num * 512;
laiocb = qemu_aio_get(&laio_pool, bs, cb, opaque);
- if (!laiocb)
- return NULL;
laiocb->nbytes = nb_sectors * 512;
laiocb->ctx = s;
laiocb->ret = -EINPROGRESS;
- laiocb->async_context_id = get_async_context_id();
+ laiocb->is_read = (type == QEMU_AIO_READ);
+ laiocb->qiov = qiov;
iocbs = &laiocb->iocb;
case QEMU_AIO_READ:
io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
break;
+ /* Currently Linux kernel does not support other operations */
default:
fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
__func__, type);
goto out_dec_count;
return &laiocb->common;
-out_free_aiocb:
- qemu_aio_release(laiocb);
out_dec_count:
s->count--;
+out_free_aiocb:
+ qemu_aio_release(laiocb);
return NULL;
}
{
struct qemu_laio_state *s;
- s = qemu_mallocz(sizeof(*s));
- QLIST_INIT(&s->completed_reqs);
+ s = g_malloc0(sizeof(*s));
s->efd = eventfd(0, 0);
if (s->efd == -1)
goto out_free_state;
goto out_close_efd;
qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL,
- qemu_laio_flush_cb, qemu_laio_process_requests, s);
+ qemu_laio_flush_cb, NULL, s);
return s;
out_close_efd:
close(s->efd);
out_free_state:
- qemu_free(s);
+ g_free(s);
return NULL;
}