]> Git Repo - qemu.git/blame - aio-posix.c
aio: convert aio_poll() to g_poll(3)
[qemu.git] / aio-posix.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
737e150e 17#include "block/block.h"
1de7afc9
PB
18#include "qemu/queue.h"
19#include "qemu/sockets.h"
a76bab49 20
a76bab49
AL
21struct AioHandler
22{
cd9ba1eb 23 GPollFD pfd;
a76bab49
AL
24 IOHandler *io_read;
25 IOHandler *io_write;
26 AioFlushHandler *io_flush;
27 int deleted;
6b5f8762 28 int pollfds_idx;
a76bab49 29 void *opaque;
72cf2d4f 30 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
31};
32
a915f4bc 33static AioHandler *find_aio_handler(AioContext *ctx, int fd)
a76bab49
AL
34{
35 AioHandler *node;
36
a915f4bc 37 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
cd9ba1eb 38 if (node->pfd.fd == fd)
79d5ca56
AG
39 if (!node->deleted)
40 return node;
a76bab49
AL
41 }
42
43 return NULL;
44}
45
a915f4bc
PB
46void aio_set_fd_handler(AioContext *ctx,
47 int fd,
48 IOHandler *io_read,
49 IOHandler *io_write,
50 AioFlushHandler *io_flush,
51 void *opaque)
a76bab49
AL
52{
53 AioHandler *node;
54
a915f4bc 55 node = find_aio_handler(ctx, fd);
a76bab49
AL
56
57 /* Are we deleting the fd handler? */
58 if (!io_read && !io_write) {
59 if (node) {
e3713e00
PB
60 g_source_remove_poll(&ctx->source, &node->pfd);
61
a76bab49 62 /* If the lock is held, just mark the node as deleted */
cd9ba1eb 63 if (ctx->walking_handlers) {
a76bab49 64 node->deleted = 1;
cd9ba1eb
PB
65 node->pfd.revents = 0;
66 } else {
a76bab49
AL
67 /* Otherwise, delete it for real. We can't just mark it as
68 * deleted because deleted nodes are only cleaned up after
69 * releasing the walking_handlers lock.
70 */
72cf2d4f 71 QLIST_REMOVE(node, node);
7267c094 72 g_free(node);
a76bab49
AL
73 }
74 }
75 } else {
76 if (node == NULL) {
77 /* Alloc and insert if it's not already there */
7267c094 78 node = g_malloc0(sizeof(AioHandler));
cd9ba1eb 79 node->pfd.fd = fd;
a915f4bc 80 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
e3713e00
PB
81
82 g_source_add_poll(&ctx->source, &node->pfd);
a76bab49
AL
83 }
84 /* Update handler with latest information */
85 node->io_read = io_read;
86 node->io_write = io_write;
87 node->io_flush = io_flush;
88 node->opaque = opaque;
6b5f8762 89 node->pollfds_idx = -1;
cd9ba1eb
PB
90
91 node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
92 node->pfd.events |= (io_write ? G_IO_OUT : 0);
a76bab49 93 }
7ed2b24c
PB
94
95 aio_notify(ctx);
9958c351
PB
96}
97
a915f4bc
PB
98void aio_set_event_notifier(AioContext *ctx,
99 EventNotifier *notifier,
100 EventNotifierHandler *io_read,
101 AioFlushEventNotifierHandler *io_flush)
a76bab49 102{
a915f4bc
PB
103 aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
104 (IOHandler *)io_read, NULL,
105 (AioFlushHandler *)io_flush, notifier);
a76bab49
AL
106}
107
cd9ba1eb
PB
108bool aio_pending(AioContext *ctx)
109{
110 AioHandler *node;
111
112 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
113 int revents;
114
115 /*
116 * FIXME: right now we cannot get G_IO_HUP and G_IO_ERR because
117 * main-loop.c is still select based (due to the slirp legacy).
118 * If main-loop.c ever switches to poll, G_IO_ERR should be
119 * tested too. Dispatching G_IO_ERR to both handlers should be
120 * okay, since handlers need to be ready for spurious wakeups.
121 */
122 revents = node->pfd.revents & node->pfd.events;
123 if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
124 return true;
125 }
126 if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
127 return true;
128 }
129 }
130
131 return false;
132}
133
d0c8d2c0 134static bool aio_dispatch(AioContext *ctx)
a76bab49 135{
9eb0bfca 136 AioHandler *node;
d0c8d2c0 137 bool progress = false;
7c0628b2 138
cd9ba1eb 139 /*
cd9ba1eb
PB
140 * We have to walk very carefully in case qemu_aio_set_fd_handler is
141 * called while we're walking.
142 */
143 node = QLIST_FIRST(&ctx->aio_handlers);
144 while (node) {
145 AioHandler *tmp;
146 int revents;
147
148 ctx->walking_handlers++;
149
150 revents = node->pfd.revents & node->pfd.events;
151 node->pfd.revents = 0;
152
153 /* See comment in aio_pending. */
d0c8d2c0
SH
154 if (!node->deleted &&
155 (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
156 node->io_read) {
cd9ba1eb
PB
157 node->io_read(node->opaque);
158 progress = true;
159 }
d0c8d2c0
SH
160 if (!node->deleted &&
161 (revents & (G_IO_OUT | G_IO_ERR)) &&
162 node->io_write) {
cd9ba1eb
PB
163 node->io_write(node->opaque);
164 progress = true;
165 }
166
167 tmp = node;
168 node = QLIST_NEXT(node, node);
169
170 ctx->walking_handlers--;
171
172 if (!ctx->walking_handlers && tmp->deleted) {
173 QLIST_REMOVE(tmp, node);
174 g_free(tmp);
175 }
176 }
d0c8d2c0
SH
177 return progress;
178}
179
180bool aio_poll(AioContext *ctx, bool blocking)
181{
d0c8d2c0 182 AioHandler *node;
d0c8d2c0
SH
183 int ret;
184 bool busy, progress;
185
186 progress = false;
187
188 /*
189 * If there are callbacks left that have been queued, we need to call them.
190 * Do not call select in this case, because it is possible that the caller
191 * does not need a complete flush (as is the case for qemu_aio_wait loops).
192 */
193 if (aio_bh_poll(ctx)) {
194 blocking = false;
195 progress = true;
196 }
197
198 if (aio_dispatch(ctx)) {
199 progress = true;
200 }
cd9ba1eb 201
7c0628b2 202 if (progress && !blocking) {
bcdc1857 203 return true;
bafbd6a1 204 }
8febfa26 205
a915f4bc 206 ctx->walking_handlers++;
a76bab49 207
6b5f8762 208 g_array_set_size(ctx->pollfds, 0);
a76bab49 209
6b5f8762 210 /* fill pollfds */
9eb0bfca 211 busy = false;
a915f4bc 212 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
6b5f8762
SH
213 node->pollfds_idx = -1;
214
9eb0bfca
PB
215 /* If there aren't pending AIO operations, don't invoke callbacks.
216 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
217 * wait indefinitely.
218 */
4231c88d 219 if (!node->deleted && node->io_flush) {
9eb0bfca
PB
220 if (node->io_flush(node->opaque) == 0) {
221 continue;
a76bab49 222 }
9eb0bfca
PB
223 busy = true;
224 }
6b5f8762
SH
225 if (!node->deleted && node->pfd.events) {
226 GPollFD pfd = {
227 .fd = node->pfd.fd,
228 .events = node->pfd.events,
229 };
230 node->pollfds_idx = ctx->pollfds->len;
231 g_array_append_val(ctx->pollfds, pfd);
9eb0bfca
PB
232 }
233 }
a76bab49 234
a915f4bc 235 ctx->walking_handlers--;
a76bab49 236
9eb0bfca
PB
237 /* No AIO operations? Get us out of here */
238 if (!busy) {
7c0628b2 239 return progress;
9eb0bfca 240 }
a76bab49 241
9eb0bfca 242 /* wait until next event */
6b5f8762
SH
243 ret = g_poll((GPollFD *)ctx->pollfds->data,
244 ctx->pollfds->len,
245 blocking ? -1 : 0);
9eb0bfca
PB
246
247 /* if we have any readable fds, dispatch event */
248 if (ret > 0) {
6b5f8762
SH
249 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
250 if (node->pollfds_idx != -1) {
251 GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
252 node->pollfds_idx);
253 node->pfd.revents = pfd->revents;
9eb0bfca 254 }
a76bab49 255 }
6b5f8762
SH
256 if (aio_dispatch(ctx)) {
257 progress = true;
258 }
9eb0bfca 259 }
bcdc1857 260
2ea9b58f
KW
261 assert(progress || busy);
262 return true;
a76bab49 263}
This page took 0.358335 seconds and 4 git commands to generate.