]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
f42b2207 | 9 | * Paolo Bonzini <[email protected]> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
d38ea87a | 18 | #include "qemu/osdep.h" |
a76bab49 | 19 | #include "qemu-common.h" |
737e150e | 20 | #include "block/block.h" |
1de7afc9 PB |
21 | #include "qemu/queue.h" |
22 | #include "qemu/sockets.h" | |
4a1cba38 | 23 | #include "qapi/error.h" |
b92d9a91 | 24 | #include "qemu/rcu_queue.h" |
a76bab49 | 25 | |
f42b2207 PB |
26 | struct AioHandler { |
27 | EventNotifier *e; | |
b493317d PB |
28 | IOHandler *io_read; |
29 | IOHandler *io_write; | |
f42b2207 | 30 | EventNotifierHandler *io_notify; |
cd9ba1eb | 31 | GPollFD pfd; |
a76bab49 | 32 | int deleted; |
b493317d | 33 | void *opaque; |
dca21ef2 | 34 | bool is_external; |
72cf2d4f | 35 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
36 | }; |
37 | ||
b493317d PB |
38 | void aio_set_fd_handler(AioContext *ctx, |
39 | int fd, | |
dca21ef2 | 40 | bool is_external, |
b493317d PB |
41 | IOHandler *io_read, |
42 | IOHandler *io_write, | |
4a1cba38 | 43 | AioPollFn *io_poll, |
b493317d PB |
44 | void *opaque) |
45 | { | |
46 | /* fd is a SOCKET in our case */ | |
47 | AioHandler *node; | |
48 | ||
b92d9a91 | 49 | qemu_lockcnt_lock(&ctx->list_lock); |
b493317d PB |
50 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
51 | if (node->pfd.fd == fd && !node->deleted) { | |
52 | break; | |
53 | } | |
54 | } | |
55 | ||
56 | /* Are we deleting the fd handler? */ | |
57 | if (!io_read && !io_write) { | |
58 | if (node) { | |
b92d9a91 PB |
59 | /* If aio_poll is in progress, just mark the node as deleted */ |
60 | if (qemu_lockcnt_count(&ctx->list_lock)) { | |
b493317d PB |
61 | node->deleted = 1; |
62 | node->pfd.revents = 0; | |
63 | } else { | |
64 | /* Otherwise, delete it for real. We can't just mark it as | |
65 | * deleted because deleted nodes are only cleaned up after | |
b92d9a91 | 66 | * releasing the list_lock. |
b493317d PB |
67 | */ |
68 | QLIST_REMOVE(node, node); | |
69 | g_free(node); | |
70 | } | |
71 | } | |
72 | } else { | |
73 | HANDLE event; | |
74 | ||
75 | if (node == NULL) { | |
76 | /* Alloc and insert if it's not already there */ | |
3ba235a0 | 77 | node = g_new0(AioHandler, 1); |
b493317d | 78 | node->pfd.fd = fd; |
b92d9a91 | 79 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
b493317d PB |
80 | } |
81 | ||
82 | node->pfd.events = 0; | |
83 | if (node->io_read) { | |
84 | node->pfd.events |= G_IO_IN; | |
85 | } | |
86 | if (node->io_write) { | |
87 | node->pfd.events |= G_IO_OUT; | |
88 | } | |
89 | ||
90 | node->e = &ctx->notifier; | |
91 | ||
92 | /* Update handler with latest information */ | |
93 | node->opaque = opaque; | |
94 | node->io_read = io_read; | |
95 | node->io_write = io_write; | |
dca21ef2 | 96 | node->is_external = is_external; |
b493317d PB |
97 | |
98 | event = event_notifier_get_handle(&ctx->notifier); | |
99 | WSAEventSelect(node->pfd.fd, event, | |
100 | FD_READ | FD_ACCEPT | FD_CLOSE | | |
101 | FD_CONNECT | FD_WRITE | FD_OOB); | |
102 | } | |
103 | ||
b92d9a91 | 104 | qemu_lockcnt_unlock(&ctx->list_lock); |
b493317d PB |
105 | aio_notify(ctx); |
106 | } | |
107 | ||
684e508c SH |
108 | void aio_set_fd_poll(AioContext *ctx, int fd, |
109 | IOHandler *io_poll_begin, | |
110 | IOHandler *io_poll_end) | |
111 | { | |
112 | /* Not implemented */ | |
113 | } | |
114 | ||
f42b2207 PB |
115 | void aio_set_event_notifier(AioContext *ctx, |
116 | EventNotifier *e, | |
dca21ef2 | 117 | bool is_external, |
4a1cba38 SH |
118 | EventNotifierHandler *io_notify, |
119 | AioPollFn *io_poll) | |
a76bab49 AL |
120 | { |
121 | AioHandler *node; | |
122 | ||
b92d9a91 | 123 | qemu_lockcnt_lock(&ctx->list_lock); |
a915f4bc | 124 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
125 | if (node->e == e && !node->deleted) { |
126 | break; | |
127 | } | |
a76bab49 AL |
128 | } |
129 | ||
a76bab49 | 130 | /* Are we deleting the fd handler? */ |
f42b2207 | 131 | if (!io_notify) { |
a76bab49 | 132 | if (node) { |
e3713e00 PB |
133 | g_source_remove_poll(&ctx->source, &node->pfd); |
134 | ||
b92d9a91 PB |
135 | /* aio_poll is in progress, just mark the node as deleted */ |
136 | if (qemu_lockcnt_count(&ctx->list_lock)) { | |
a76bab49 | 137 | node->deleted = 1; |
cd9ba1eb PB |
138 | node->pfd.revents = 0; |
139 | } else { | |
a76bab49 AL |
140 | /* Otherwise, delete it for real. We can't just mark it as |
141 | * deleted because deleted nodes are only cleaned up after | |
b92d9a91 | 142 | * releasing the list_lock. |
a76bab49 | 143 | */ |
72cf2d4f | 144 | QLIST_REMOVE(node, node); |
7267c094 | 145 | g_free(node); |
a76bab49 AL |
146 | } |
147 | } | |
148 | } else { | |
149 | if (node == NULL) { | |
150 | /* Alloc and insert if it's not already there */ | |
3ba235a0 | 151 | node = g_new0(AioHandler, 1); |
f42b2207 PB |
152 | node->e = e; |
153 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
154 | node->pfd.events = G_IO_IN; | |
dca21ef2 | 155 | node->is_external = is_external; |
b92d9a91 | 156 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
e3713e00 PB |
157 | |
158 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 AL |
159 | } |
160 | /* Update handler with latest information */ | |
f42b2207 | 161 | node->io_notify = io_notify; |
a76bab49 | 162 | } |
7ed2b24c | 163 | |
b92d9a91 | 164 | qemu_lockcnt_unlock(&ctx->list_lock); |
7ed2b24c | 165 | aio_notify(ctx); |
9958c351 PB |
166 | } |
167 | ||
684e508c SH |
168 | void aio_set_event_notifier_poll(AioContext *ctx, |
169 | EventNotifier *notifier, | |
170 | EventNotifierHandler *io_poll_begin, | |
171 | EventNotifierHandler *io_poll_end) | |
172 | { | |
173 | /* Not implemented */ | |
174 | } | |
175 | ||
a3462c65 PB |
176 | bool aio_prepare(AioContext *ctx) |
177 | { | |
b493317d PB |
178 | static struct timeval tv0; |
179 | AioHandler *node; | |
180 | bool have_select_revents = false; | |
181 | fd_set rfds, wfds; | |
182 | ||
b92d9a91 PB |
183 | /* |
184 | * We have to walk very carefully in case aio_set_fd_handler is | |
185 | * called while we're walking. | |
186 | */ | |
187 | qemu_lockcnt_inc(&ctx->list_lock); | |
188 | ||
b493317d PB |
189 | /* fill fd sets */ |
190 | FD_ZERO(&rfds); | |
191 | FD_ZERO(&wfds); | |
b92d9a91 | 192 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
b493317d PB |
193 | if (node->io_read) { |
194 | FD_SET ((SOCKET)node->pfd.fd, &rfds); | |
195 | } | |
196 | if (node->io_write) { | |
197 | FD_SET ((SOCKET)node->pfd.fd, &wfds); | |
198 | } | |
199 | } | |
200 | ||
201 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { | |
b92d9a91 | 202 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
b493317d PB |
203 | node->pfd.revents = 0; |
204 | if (FD_ISSET(node->pfd.fd, &rfds)) { | |
205 | node->pfd.revents |= G_IO_IN; | |
206 | have_select_revents = true; | |
207 | } | |
208 | ||
209 | if (FD_ISSET(node->pfd.fd, &wfds)) { | |
210 | node->pfd.revents |= G_IO_OUT; | |
211 | have_select_revents = true; | |
212 | } | |
213 | } | |
214 | } | |
215 | ||
b92d9a91 | 216 | qemu_lockcnt_dec(&ctx->list_lock); |
b493317d | 217 | return have_select_revents; |
a3462c65 PB |
218 | } |
219 | ||
cd9ba1eb PB |
220 | bool aio_pending(AioContext *ctx) |
221 | { | |
222 | AioHandler *node; | |
b92d9a91 | 223 | bool result = false; |
cd9ba1eb | 224 | |
b92d9a91 PB |
225 | /* |
226 | * We have to walk very carefully in case aio_set_fd_handler is | |
227 | * called while we're walking. | |
228 | */ | |
229 | qemu_lockcnt_inc(&ctx->list_lock); | |
230 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { | |
f42b2207 | 231 | if (node->pfd.revents && node->io_notify) { |
b92d9a91 PB |
232 | result = true; |
233 | break; | |
cd9ba1eb | 234 | } |
b493317d PB |
235 | |
236 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { | |
b92d9a91 PB |
237 | result = true; |
238 | break; | |
b493317d PB |
239 | } |
240 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { | |
b92d9a91 PB |
241 | result = true; |
242 | break; | |
b493317d | 243 | } |
cd9ba1eb PB |
244 | } |
245 | ||
b92d9a91 PB |
246 | qemu_lockcnt_dec(&ctx->list_lock); |
247 | return result; | |
cd9ba1eb PB |
248 | } |
249 | ||
a398dea3 | 250 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
a76bab49 | 251 | { |
b92d9a91 | 252 | AioHandler *node; |
a398dea3 | 253 | bool progress = false; |
b92d9a91 | 254 | AioHandler *tmp; |
7c0628b2 | 255 | |
b92d9a91 | 256 | qemu_lockcnt_inc(&ctx->list_lock); |
abf90d39 | 257 | |
cd9ba1eb | 258 | /* |
87f68d31 | 259 | * We have to walk very carefully in case aio_set_fd_handler is |
cd9ba1eb PB |
260 | * called while we're walking. |
261 | */ | |
b92d9a91 | 262 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { |
b493317d | 263 | int revents = node->pfd.revents; |
cd9ba1eb | 264 | |
a398dea3 | 265 | if (!node->deleted && |
b493317d | 266 | (revents || event_notifier_get_handle(node->e) == event) && |
a398dea3 | 267 | node->io_notify) { |
f42b2207 PB |
268 | node->pfd.revents = 0; |
269 | node->io_notify(node->e); | |
164a101f SH |
270 | |
271 | /* aio_notify() does not count as progress */ | |
8b2d42d2 | 272 | if (node->e != &ctx->notifier) { |
164a101f SH |
273 | progress = true; |
274 | } | |
cd9ba1eb PB |
275 | } |
276 | ||
b493317d PB |
277 | if (!node->deleted && |
278 | (node->io_read || node->io_write)) { | |
279 | node->pfd.revents = 0; | |
280 | if ((revents & G_IO_IN) && node->io_read) { | |
281 | node->io_read(node->opaque); | |
282 | progress = true; | |
283 | } | |
284 | if ((revents & G_IO_OUT) && node->io_write) { | |
285 | node->io_write(node->opaque); | |
286 | progress = true; | |
287 | } | |
288 | ||
289 | /* if the next select() will return an event, we have progressed */ | |
290 | if (event == event_notifier_get_handle(&ctx->notifier)) { | |
291 | WSANETWORKEVENTS ev; | |
292 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); | |
293 | if (ev.lNetworkEvents) { | |
294 | progress = true; | |
295 | } | |
296 | } | |
297 | } | |
298 | ||
abf90d39 | 299 | if (node->deleted) { |
b92d9a91 | 300 | if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
abf90d39 PB |
301 | QLIST_REMOVE(node, node); |
302 | g_free(node); | |
b92d9a91 | 303 | qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
abf90d39 | 304 | } |
cd9ba1eb PB |
305 | } |
306 | } | |
307 | ||
b92d9a91 | 308 | qemu_lockcnt_dec(&ctx->list_lock); |
a398dea3 PB |
309 | return progress; |
310 | } | |
311 | ||
721671ad | 312 | bool aio_dispatch(AioContext *ctx, bool dispatch_fds) |
a398dea3 PB |
313 | { |
314 | bool progress; | |
315 | ||
e4c7e2d1 | 316 | progress = aio_bh_poll(ctx); |
721671ad SH |
317 | if (dispatch_fds) { |
318 | progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | |
319 | } | |
d397ec99 | 320 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
a398dea3 PB |
321 | return progress; |
322 | } | |
323 | ||
324 | bool aio_poll(AioContext *ctx, bool blocking) | |
325 | { | |
326 | AioHandler *node; | |
327 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; | |
eabc9779 | 328 | bool progress, have_select_revents, first; |
a398dea3 PB |
329 | int count; |
330 | int timeout; | |
331 | ||
49110174 | 332 | aio_context_acquire(ctx); |
a398dea3 PB |
333 | progress = false; |
334 | ||
0a9dd166 PB |
335 | /* aio_notify can avoid the expensive event_notifier_set if |
336 | * everything (file descriptors, bottom halves, timers) will | |
337 | * be re-evaluated before the next blocking poll(). This is | |
338 | * already true when aio_poll is called with blocking == false; | |
eabc9779 PB |
339 | * if blocking == true, it is only true after poll() returns, |
340 | * so disable the optimization now. | |
0a9dd166 | 341 | */ |
eabc9779 PB |
342 | if (blocking) { |
343 | atomic_add(&ctx->notify_me, 2); | |
344 | } | |
0a9dd166 | 345 | |
b92d9a91 | 346 | qemu_lockcnt_inc(&ctx->list_lock); |
6493c975 PB |
347 | have_select_revents = aio_prepare(ctx); |
348 | ||
9eb0bfca | 349 | /* fill fd sets */ |
f42b2207 | 350 | count = 0; |
b92d9a91 | 351 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
c1e1e5fa FZ |
352 | if (!node->deleted && node->io_notify |
353 | && aio_node_check(ctx, node->is_external)) { | |
f42b2207 | 354 | events[count++] = event_notifier_get_handle(node->e); |
9eb0bfca PB |
355 | } |
356 | } | |
a76bab49 | 357 | |
b92d9a91 | 358 | qemu_lockcnt_dec(&ctx->list_lock); |
3672fa50 | 359 | first = true; |
a76bab49 | 360 | |
6493c975 PB |
361 | /* ctx->notifier is always registered. */ |
362 | assert(count > 0); | |
363 | ||
364 | /* Multiple iterations, all of them non-blocking except the first, | |
365 | * may be necessary to process all pending events. After the first | |
366 | * WaitForMultipleObjects call ctx->notify_me will be decremented. | |
367 | */ | |
368 | do { | |
b493317d | 369 | HANDLE event; |
438e1f47 AB |
370 | int ret; |
371 | ||
6493c975 | 372 | timeout = blocking && !have_select_revents |
845ca10d | 373 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
49110174 PB |
374 | if (timeout) { |
375 | aio_context_release(ctx); | |
376 | } | |
438e1f47 | 377 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
eabc9779 PB |
378 | if (blocking) { |
379 | assert(first); | |
380 | atomic_sub(&ctx->notify_me, 2); | |
381 | } | |
49110174 PB |
382 | if (timeout) { |
383 | aio_context_acquire(ctx); | |
384 | } | |
f42b2207 | 385 | |
21a03d17 | 386 | if (first) { |
05e514b1 | 387 | aio_notify_accept(ctx); |
21a03d17 PB |
388 | progress |= aio_bh_poll(ctx); |
389 | first = false; | |
3672fa50 | 390 | } |
3672fa50 | 391 | |
f42b2207 | 392 | /* if we have any signaled events, dispatch event */ |
b493317d PB |
393 | event = NULL; |
394 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { | |
395 | event = events[ret - WAIT_OBJECT_0]; | |
a90d411e | 396 | events[ret - WAIT_OBJECT_0] = events[--count]; |
b493317d | 397 | } else if (!have_select_revents) { |
f42b2207 PB |
398 | break; |
399 | } | |
400 | ||
b493317d | 401 | have_select_revents = false; |
f42b2207 | 402 | blocking = false; |
9eb0bfca | 403 | |
b493317d | 404 | progress |= aio_dispatch_handlers(ctx, event); |
6493c975 | 405 | } while (count > 0); |
bcdc1857 | 406 | |
e4c7e2d1 | 407 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
438e1f47 | 408 | |
49110174 | 409 | aio_context_release(ctx); |
164a101f | 410 | return progress; |
a76bab49 | 411 | } |
37fcee5d | 412 | |
7e003465 | 413 | void aio_context_setup(AioContext *ctx) |
37fcee5d FZ |
414 | { |
415 | } | |
4a1cba38 | 416 | |
82a41186 SH |
417 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
418 | int64_t grow, int64_t shrink, Error **errp) | |
4a1cba38 SH |
419 | { |
420 | error_setg(errp, "AioContext polling is not implemented on Windows"); | |
421 | } |