]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
f42b2207 | 9 | * Paolo Bonzini <[email protected]> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
18 | #include "qemu-common.h" | |
737e150e | 19 | #include "block/block.h" |
1de7afc9 PB |
20 | #include "qemu/queue.h" |
21 | #include "qemu/sockets.h" | |
a76bab49 | 22 | |
f42b2207 PB |
23 | struct AioHandler { |
24 | EventNotifier *e; | |
b493317d PB |
25 | IOHandler *io_read; |
26 | IOHandler *io_write; | |
f42b2207 | 27 | EventNotifierHandler *io_notify; |
cd9ba1eb | 28 | GPollFD pfd; |
a76bab49 | 29 | int deleted; |
b493317d | 30 | void *opaque; |
72cf2d4f | 31 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
32 | }; |
33 | ||
b493317d PB |
34 | void aio_set_fd_handler(AioContext *ctx, |
35 | int fd, | |
36 | IOHandler *io_read, | |
37 | IOHandler *io_write, | |
38 | void *opaque) | |
39 | { | |
40 | /* fd is a SOCKET in our case */ | |
41 | AioHandler *node; | |
42 | ||
43 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
44 | if (node->pfd.fd == fd && !node->deleted) { | |
45 | break; | |
46 | } | |
47 | } | |
48 | ||
49 | /* Are we deleting the fd handler? */ | |
50 | if (!io_read && !io_write) { | |
51 | if (node) { | |
52 | /* If the lock is held, just mark the node as deleted */ | |
53 | if (ctx->walking_handlers) { | |
54 | node->deleted = 1; | |
55 | node->pfd.revents = 0; | |
56 | } else { | |
57 | /* Otherwise, delete it for real. We can't just mark it as | |
58 | * deleted because deleted nodes are only cleaned up after | |
59 | * releasing the walking_handlers lock. | |
60 | */ | |
61 | QLIST_REMOVE(node, node); | |
62 | g_free(node); | |
63 | } | |
64 | } | |
65 | } else { | |
66 | HANDLE event; | |
67 | ||
68 | if (node == NULL) { | |
69 | /* Alloc and insert if it's not already there */ | |
70 | node = g_malloc0(sizeof(AioHandler)); | |
71 | node->pfd.fd = fd; | |
72 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); | |
73 | } | |
74 | ||
75 | node->pfd.events = 0; | |
76 | if (node->io_read) { | |
77 | node->pfd.events |= G_IO_IN; | |
78 | } | |
79 | if (node->io_write) { | |
80 | node->pfd.events |= G_IO_OUT; | |
81 | } | |
82 | ||
83 | node->e = &ctx->notifier; | |
84 | ||
85 | /* Update handler with latest information */ | |
86 | node->opaque = opaque; | |
87 | node->io_read = io_read; | |
88 | node->io_write = io_write; | |
89 | ||
90 | event = event_notifier_get_handle(&ctx->notifier); | |
91 | WSAEventSelect(node->pfd.fd, event, | |
92 | FD_READ | FD_ACCEPT | FD_CLOSE | | |
93 | FD_CONNECT | FD_WRITE | FD_OOB); | |
94 | } | |
95 | ||
96 | aio_notify(ctx); | |
97 | } | |
98 | ||
f42b2207 PB |
99 | void aio_set_event_notifier(AioContext *ctx, |
100 | EventNotifier *e, | |
f2e5dca4 | 101 | EventNotifierHandler *io_notify) |
a76bab49 AL |
102 | { |
103 | AioHandler *node; | |
104 | ||
a915f4bc | 105 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
106 | if (node->e == e && !node->deleted) { |
107 | break; | |
108 | } | |
a76bab49 AL |
109 | } |
110 | ||
a76bab49 | 111 | /* Are we deleting the fd handler? */ |
f42b2207 | 112 | if (!io_notify) { |
a76bab49 | 113 | if (node) { |
e3713e00 PB |
114 | g_source_remove_poll(&ctx->source, &node->pfd); |
115 | ||
a76bab49 | 116 | /* If the lock is held, just mark the node as deleted */ |
cd9ba1eb | 117 | if (ctx->walking_handlers) { |
a76bab49 | 118 | node->deleted = 1; |
cd9ba1eb PB |
119 | node->pfd.revents = 0; |
120 | } else { | |
a76bab49 AL |
121 | /* Otherwise, delete it for real. We can't just mark it as |
122 | * deleted because deleted nodes are only cleaned up after | |
123 | * releasing the walking_handlers lock. | |
124 | */ | |
72cf2d4f | 125 | QLIST_REMOVE(node, node); |
7267c094 | 126 | g_free(node); |
a76bab49 AL |
127 | } |
128 | } | |
129 | } else { | |
130 | if (node == NULL) { | |
131 | /* Alloc and insert if it's not already there */ | |
7267c094 | 132 | node = g_malloc0(sizeof(AioHandler)); |
f42b2207 PB |
133 | node->e = e; |
134 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
135 | node->pfd.events = G_IO_IN; | |
a915f4bc | 136 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
e3713e00 PB |
137 | |
138 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 AL |
139 | } |
140 | /* Update handler with latest information */ | |
f42b2207 | 141 | node->io_notify = io_notify; |
a76bab49 | 142 | } |
7ed2b24c PB |
143 | |
144 | aio_notify(ctx); | |
9958c351 PB |
145 | } |
146 | ||
a3462c65 PB |
147 | bool aio_prepare(AioContext *ctx) |
148 | { | |
b493317d PB |
149 | static struct timeval tv0; |
150 | AioHandler *node; | |
151 | bool have_select_revents = false; | |
152 | fd_set rfds, wfds; | |
153 | ||
154 | /* fill fd sets */ | |
155 | FD_ZERO(&rfds); | |
156 | FD_ZERO(&wfds); | |
157 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
158 | if (node->io_read) { | |
159 | FD_SET ((SOCKET)node->pfd.fd, &rfds); | |
160 | } | |
161 | if (node->io_write) { | |
162 | FD_SET ((SOCKET)node->pfd.fd, &wfds); | |
163 | } | |
164 | } | |
165 | ||
166 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { | |
167 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
168 | node->pfd.revents = 0; | |
169 | if (FD_ISSET(node->pfd.fd, &rfds)) { | |
170 | node->pfd.revents |= G_IO_IN; | |
171 | have_select_revents = true; | |
172 | } | |
173 | ||
174 | if (FD_ISSET(node->pfd.fd, &wfds)) { | |
175 | node->pfd.revents |= G_IO_OUT; | |
176 | have_select_revents = true; | |
177 | } | |
178 | } | |
179 | } | |
180 | ||
181 | return have_select_revents; | |
a3462c65 PB |
182 | } |
183 | ||
cd9ba1eb PB |
184 | bool aio_pending(AioContext *ctx) |
185 | { | |
186 | AioHandler *node; | |
187 | ||
188 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
f42b2207 | 189 | if (node->pfd.revents && node->io_notify) { |
cd9ba1eb PB |
190 | return true; |
191 | } | |
b493317d PB |
192 | |
193 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { | |
194 | return true; | |
195 | } | |
196 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { | |
197 | return true; | |
198 | } | |
cd9ba1eb PB |
199 | } |
200 | ||
201 | return false; | |
202 | } | |
203 | ||
a398dea3 | 204 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
a76bab49 | 205 | { |
9eb0bfca | 206 | AioHandler *node; |
a398dea3 | 207 | bool progress = false; |
7c0628b2 | 208 | |
cd9ba1eb | 209 | /* |
87f68d31 | 210 | * We have to walk very carefully in case aio_set_fd_handler is |
cd9ba1eb PB |
211 | * called while we're walking. |
212 | */ | |
213 | node = QLIST_FIRST(&ctx->aio_handlers); | |
214 | while (node) { | |
215 | AioHandler *tmp; | |
b493317d | 216 | int revents = node->pfd.revents; |
cd9ba1eb PB |
217 | |
218 | ctx->walking_handlers++; | |
219 | ||
a398dea3 | 220 | if (!node->deleted && |
b493317d | 221 | (revents || event_notifier_get_handle(node->e) == event) && |
a398dea3 | 222 | node->io_notify) { |
f42b2207 PB |
223 | node->pfd.revents = 0; |
224 | node->io_notify(node->e); | |
164a101f SH |
225 | |
226 | /* aio_notify() does not count as progress */ | |
8b2d42d2 | 227 | if (node->e != &ctx->notifier) { |
164a101f SH |
228 | progress = true; |
229 | } | |
cd9ba1eb PB |
230 | } |
231 | ||
b493317d PB |
232 | if (!node->deleted && |
233 | (node->io_read || node->io_write)) { | |
234 | node->pfd.revents = 0; | |
235 | if ((revents & G_IO_IN) && node->io_read) { | |
236 | node->io_read(node->opaque); | |
237 | progress = true; | |
238 | } | |
239 | if ((revents & G_IO_OUT) && node->io_write) { | |
240 | node->io_write(node->opaque); | |
241 | progress = true; | |
242 | } | |
243 | ||
244 | /* if the next select() will return an event, we have progressed */ | |
245 | if (event == event_notifier_get_handle(&ctx->notifier)) { | |
246 | WSANETWORKEVENTS ev; | |
247 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); | |
248 | if (ev.lNetworkEvents) { | |
249 | progress = true; | |
250 | } | |
251 | } | |
252 | } | |
253 | ||
cd9ba1eb PB |
254 | tmp = node; |
255 | node = QLIST_NEXT(node, node); | |
256 | ||
257 | ctx->walking_handlers--; | |
258 | ||
259 | if (!ctx->walking_handlers && tmp->deleted) { | |
260 | QLIST_REMOVE(tmp, node); | |
261 | g_free(tmp); | |
262 | } | |
263 | } | |
264 | ||
a398dea3 PB |
265 | return progress; |
266 | } | |
267 | ||
e4c7e2d1 | 268 | bool aio_dispatch(AioContext *ctx) |
a398dea3 PB |
269 | { |
270 | bool progress; | |
271 | ||
e4c7e2d1 PB |
272 | progress = aio_bh_poll(ctx); |
273 | progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | |
d397ec99 | 274 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
a398dea3 PB |
275 | return progress; |
276 | } | |
277 | ||
278 | bool aio_poll(AioContext *ctx, bool blocking) | |
279 | { | |
280 | AioHandler *node; | |
281 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; | |
b493317d | 282 | bool was_dispatching, progress, have_select_revents, first; |
a398dea3 PB |
283 | int count; |
284 | int timeout; | |
285 | ||
b493317d PB |
286 | if (aio_prepare(ctx)) { |
287 | blocking = false; | |
288 | have_select_revents = true; | |
289 | } | |
290 | ||
0a9dd166 | 291 | was_dispatching = ctx->dispatching; |
a398dea3 PB |
292 | progress = false; |
293 | ||
0a9dd166 PB |
294 | /* aio_notify can avoid the expensive event_notifier_set if |
295 | * everything (file descriptors, bottom halves, timers) will | |
296 | * be re-evaluated before the next blocking poll(). This is | |
297 | * already true when aio_poll is called with blocking == false; | |
298 | * if blocking == true, it is only true after poll() returns. | |
299 | * | |
300 | * If we're in a nested event loop, ctx->dispatching might be true. | |
301 | * In that case we can restore it just before returning, but we | |
302 | * have to clear it now. | |
303 | */ | |
304 | aio_set_dispatching(ctx, !blocking); | |
305 | ||
a915f4bc | 306 | ctx->walking_handlers++; |
a76bab49 | 307 | |
9eb0bfca | 308 | /* fill fd sets */ |
f42b2207 | 309 | count = 0; |
a915f4bc | 310 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
311 | if (!node->deleted && node->io_notify) { |
312 | events[count++] = event_notifier_get_handle(node->e); | |
9eb0bfca PB |
313 | } |
314 | } | |
a76bab49 | 315 | |
a915f4bc | 316 | ctx->walking_handlers--; |
3672fa50 | 317 | first = true; |
a76bab49 | 318 | |
9eb0bfca | 319 | /* wait until next event */ |
b022b4a4 | 320 | while (count > 0) { |
b493317d | 321 | HANDLE event; |
438e1f47 AB |
322 | int ret; |
323 | ||
845ca10d PB |
324 | timeout = blocking |
325 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; | |
438e1f47 | 326 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
0a9dd166 | 327 | aio_set_dispatching(ctx, true); |
f42b2207 | 328 | |
3672fa50 PB |
329 | if (first && aio_bh_poll(ctx)) { |
330 | progress = true; | |
331 | } | |
332 | first = false; | |
333 | ||
f42b2207 | 334 | /* if we have any signaled events, dispatch event */ |
b493317d PB |
335 | event = NULL; |
336 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { | |
337 | event = events[ret - WAIT_OBJECT_0]; | |
338 | } else if (!have_select_revents) { | |
f42b2207 PB |
339 | break; |
340 | } | |
341 | ||
b493317d | 342 | have_select_revents = false; |
f42b2207 | 343 | blocking = false; |
9eb0bfca | 344 | |
b493317d | 345 | progress |= aio_dispatch_handlers(ctx, event); |
b022b4a4 PB |
346 | |
347 | /* Try again, but only call each handler once. */ | |
348 | events[ret - WAIT_OBJECT_0] = events[--count]; | |
9eb0bfca | 349 | } |
bcdc1857 | 350 | |
e4c7e2d1 | 351 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
438e1f47 | 352 | |
0a9dd166 | 353 | aio_set_dispatching(ctx, was_dispatching); |
164a101f | 354 | return progress; |
a76bab49 | 355 | } |