]>
Commit | Line | Data |
---|---|---|
3e9ec521 SH |
1 | /* |
2 | * Linux AIO request queue | |
3 | * | |
4 | * Copyright 2012 IBM, Corp. | |
5 | * Copyright 2012 Red Hat, Inc. and/or its affiliates | |
6 | * | |
7 | * Authors: | |
8 | * Stefan Hajnoczi <[email protected]> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
83c9f4ca | 15 | #include "ioq.h" |
3e9ec521 SH |
16 | |
17 | void ioq_init(IOQueue *ioq, int fd, unsigned int max_reqs) | |
18 | { | |
19 | int rc; | |
20 | ||
21 | ioq->fd = fd; | |
22 | ioq->max_reqs = max_reqs; | |
23 | ||
24 | memset(&ioq->io_ctx, 0, sizeof ioq->io_ctx); | |
25 | rc = io_setup(max_reqs, &ioq->io_ctx); | |
26 | if (rc != 0) { | |
27 | fprintf(stderr, "ioq io_setup failed %d\n", rc); | |
28 | exit(1); | |
29 | } | |
30 | ||
31 | rc = event_notifier_init(&ioq->io_notifier, 0); | |
32 | if (rc != 0) { | |
33 | fprintf(stderr, "ioq io event notifier creation failed %d\n", rc); | |
34 | exit(1); | |
35 | } | |
36 | ||
37 | ioq->freelist = g_malloc0(sizeof ioq->freelist[0] * max_reqs); | |
38 | ioq->freelist_idx = 0; | |
39 | ||
40 | ioq->queue = g_malloc0(sizeof ioq->queue[0] * max_reqs); | |
41 | ioq->queue_idx = 0; | |
42 | } | |
43 | ||
44 | void ioq_cleanup(IOQueue *ioq) | |
45 | { | |
46 | g_free(ioq->freelist); | |
47 | g_free(ioq->queue); | |
48 | ||
49 | event_notifier_cleanup(&ioq->io_notifier); | |
50 | io_destroy(ioq->io_ctx); | |
51 | } | |
52 | ||
53 | EventNotifier *ioq_get_notifier(IOQueue *ioq) | |
54 | { | |
55 | return &ioq->io_notifier; | |
56 | } | |
57 | ||
58 | struct iocb *ioq_get_iocb(IOQueue *ioq) | |
59 | { | |
60 | /* Underflow cannot happen since ioq is sized for max_reqs */ | |
61 | assert(ioq->freelist_idx != 0); | |
62 | ||
63 | struct iocb *iocb = ioq->freelist[--ioq->freelist_idx]; | |
64 | ioq->queue[ioq->queue_idx++] = iocb; | |
65 | return iocb; | |
66 | } | |
67 | ||
68 | void ioq_put_iocb(IOQueue *ioq, struct iocb *iocb) | |
69 | { | |
70 | /* Overflow cannot happen since ioq is sized for max_reqs */ | |
71 | assert(ioq->freelist_idx != ioq->max_reqs); | |
72 | ||
73 | ioq->freelist[ioq->freelist_idx++] = iocb; | |
74 | } | |
75 | ||
76 | struct iocb *ioq_rdwr(IOQueue *ioq, bool read, struct iovec *iov, | |
77 | unsigned int count, long long offset) | |
78 | { | |
79 | struct iocb *iocb = ioq_get_iocb(ioq); | |
80 | ||
81 | if (read) { | |
82 | io_prep_preadv(iocb, ioq->fd, iov, count, offset); | |
83 | } else { | |
84 | io_prep_pwritev(iocb, ioq->fd, iov, count, offset); | |
85 | } | |
86 | io_set_eventfd(iocb, event_notifier_get_fd(&ioq->io_notifier)); | |
87 | return iocb; | |
88 | } | |
89 | ||
90 | int ioq_submit(IOQueue *ioq) | |
91 | { | |
92 | int rc = io_submit(ioq->io_ctx, ioq->queue_idx, ioq->queue); | |
93 | ioq->queue_idx = 0; /* reset */ | |
94 | return rc; | |
95 | } | |
96 | ||
97 | int ioq_run_completion(IOQueue *ioq, IOQueueCompletion *completion, | |
98 | void *opaque) | |
99 | { | |
100 | struct io_event events[ioq->max_reqs]; | |
101 | int nevents, i; | |
102 | ||
103 | do { | |
104 | nevents = io_getevents(ioq->io_ctx, 0, ioq->max_reqs, events, NULL); | |
105 | } while (nevents < 0 && errno == EINTR); | |
106 | if (nevents < 0) { | |
107 | return nevents; | |
108 | } | |
109 | ||
110 | for (i = 0; i < nevents; i++) { | |
111 | ssize_t ret = ((uint64_t)events[i].res2 << 32) | events[i].res; | |
112 | ||
113 | completion(events[i].obj, ret, opaque); | |
114 | ioq_put_iocb(ioq, events[i].obj); | |
115 | } | |
116 | return nevents; | |
117 | } |