]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
f42b2207 | 9 | * Paolo Bonzini <[email protected]> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
d38ea87a | 18 | #include "qemu/osdep.h" |
a76bab49 | 19 | #include "qemu-common.h" |
737e150e | 20 | #include "block/block.h" |
1de7afc9 PB |
21 | #include "qemu/queue.h" |
22 | #include "qemu/sockets.h" | |
4a1cba38 | 23 | #include "qapi/error.h" |
b92d9a91 | 24 | #include "qemu/rcu_queue.h" |
a76bab49 | 25 | |
f42b2207 PB |
26 | struct AioHandler { |
27 | EventNotifier *e; | |
b493317d PB |
28 | IOHandler *io_read; |
29 | IOHandler *io_write; | |
f42b2207 | 30 | EventNotifierHandler *io_notify; |
cd9ba1eb | 31 | GPollFD pfd; |
a76bab49 | 32 | int deleted; |
b493317d | 33 | void *opaque; |
dca21ef2 | 34 | bool is_external; |
72cf2d4f | 35 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
36 | }; |
37 | ||
fef16601 RN |
38 | static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) |
39 | { | |
40 | /* If aio_poll is in progress, just mark the node as deleted */ | |
41 | if (qemu_lockcnt_count(&ctx->list_lock)) { | |
42 | node->deleted = 1; | |
43 | node->pfd.revents = 0; | |
44 | } else { | |
45 | /* Otherwise, delete it for real. We can't just mark it as | |
46 | * deleted because deleted nodes are only cleaned up after | |
47 | * releasing the list_lock. | |
48 | */ | |
49 | QLIST_REMOVE(node, node); | |
50 | g_free(node); | |
51 | } | |
52 | } | |
53 | ||
b493317d PB |
54 | void aio_set_fd_handler(AioContext *ctx, |
55 | int fd, | |
dca21ef2 | 56 | bool is_external, |
b493317d PB |
57 | IOHandler *io_read, |
58 | IOHandler *io_write, | |
4a1cba38 | 59 | AioPollFn *io_poll, |
b493317d PB |
60 | void *opaque) |
61 | { | |
62 | /* fd is a SOCKET in our case */ | |
fef16601 RN |
63 | AioHandler *old_node; |
64 | AioHandler *node = NULL; | |
b493317d | 65 | |
b92d9a91 | 66 | qemu_lockcnt_lock(&ctx->list_lock); |
fef16601 RN |
67 | QLIST_FOREACH(old_node, &ctx->aio_handlers, node) { |
68 | if (old_node->pfd.fd == fd && !old_node->deleted) { | |
b493317d PB |
69 | break; |
70 | } | |
71 | } | |
72 | ||
fef16601 | 73 | if (io_read || io_write) { |
b493317d | 74 | HANDLE event; |
55d41b16 | 75 | long bitmask = 0; |
b493317d | 76 | |
fef16601 RN |
77 | /* Alloc and insert if it's not already there */ |
78 | node = g_new0(AioHandler, 1); | |
79 | node->pfd.fd = fd; | |
b493317d PB |
80 | |
81 | node->pfd.events = 0; | |
82 | if (node->io_read) { | |
83 | node->pfd.events |= G_IO_IN; | |
84 | } | |
85 | if (node->io_write) { | |
86 | node->pfd.events |= G_IO_OUT; | |
87 | } | |
88 | ||
89 | node->e = &ctx->notifier; | |
90 | ||
91 | /* Update handler with latest information */ | |
92 | node->opaque = opaque; | |
93 | node->io_read = io_read; | |
94 | node->io_write = io_write; | |
dca21ef2 | 95 | node->is_external = is_external; |
b493317d | 96 | |
55d41b16 AF |
97 | if (io_read) { |
98 | bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE; | |
99 | } | |
100 | ||
101 | if (io_write) { | |
102 | bitmask |= FD_WRITE | FD_CONNECT; | |
103 | } | |
104 | ||
fef16601 | 105 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
b493317d | 106 | event = event_notifier_get_handle(&ctx->notifier); |
55d41b16 | 107 | WSAEventSelect(node->pfd.fd, event, bitmask); |
b493317d | 108 | } |
fef16601 RN |
109 | if (old_node) { |
110 | aio_remove_fd_handler(ctx, old_node); | |
111 | } | |
b493317d | 112 | |
b92d9a91 | 113 | qemu_lockcnt_unlock(&ctx->list_lock); |
b493317d PB |
114 | aio_notify(ctx); |
115 | } | |
116 | ||
684e508c SH |
117 | void aio_set_fd_poll(AioContext *ctx, int fd, |
118 | IOHandler *io_poll_begin, | |
119 | IOHandler *io_poll_end) | |
120 | { | |
121 | /* Not implemented */ | |
122 | } | |
123 | ||
f42b2207 PB |
124 | void aio_set_event_notifier(AioContext *ctx, |
125 | EventNotifier *e, | |
dca21ef2 | 126 | bool is_external, |
4a1cba38 SH |
127 | EventNotifierHandler *io_notify, |
128 | AioPollFn *io_poll) | |
a76bab49 AL |
129 | { |
130 | AioHandler *node; | |
131 | ||
b92d9a91 | 132 | qemu_lockcnt_lock(&ctx->list_lock); |
a915f4bc | 133 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
134 | if (node->e == e && !node->deleted) { |
135 | break; | |
136 | } | |
a76bab49 AL |
137 | } |
138 | ||
a76bab49 | 139 | /* Are we deleting the fd handler? */ |
f42b2207 | 140 | if (!io_notify) { |
a76bab49 | 141 | if (node) { |
e3713e00 PB |
142 | g_source_remove_poll(&ctx->source, &node->pfd); |
143 | ||
fef16601 | 144 | aio_remove_fd_handler(ctx, node); |
a76bab49 AL |
145 | } |
146 | } else { | |
147 | if (node == NULL) { | |
148 | /* Alloc and insert if it's not already there */ | |
3ba235a0 | 149 | node = g_new0(AioHandler, 1); |
f42b2207 PB |
150 | node->e = e; |
151 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
152 | node->pfd.events = G_IO_IN; | |
dca21ef2 | 153 | node->is_external = is_external; |
b92d9a91 | 154 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
e3713e00 PB |
155 | |
156 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 AL |
157 | } |
158 | /* Update handler with latest information */ | |
f42b2207 | 159 | node->io_notify = io_notify; |
a76bab49 | 160 | } |
7ed2b24c | 161 | |
b92d9a91 | 162 | qemu_lockcnt_unlock(&ctx->list_lock); |
7ed2b24c | 163 | aio_notify(ctx); |
9958c351 PB |
164 | } |
165 | ||
684e508c SH |
166 | void aio_set_event_notifier_poll(AioContext *ctx, |
167 | EventNotifier *notifier, | |
168 | EventNotifierHandler *io_poll_begin, | |
169 | EventNotifierHandler *io_poll_end) | |
170 | { | |
171 | /* Not implemented */ | |
172 | } | |
173 | ||
a3462c65 PB |
174 | bool aio_prepare(AioContext *ctx) |
175 | { | |
b493317d PB |
176 | static struct timeval tv0; |
177 | AioHandler *node; | |
178 | bool have_select_revents = false; | |
179 | fd_set rfds, wfds; | |
180 | ||
b92d9a91 PB |
181 | /* |
182 | * We have to walk very carefully in case aio_set_fd_handler is | |
183 | * called while we're walking. | |
184 | */ | |
185 | qemu_lockcnt_inc(&ctx->list_lock); | |
186 | ||
b493317d PB |
187 | /* fill fd sets */ |
188 | FD_ZERO(&rfds); | |
189 | FD_ZERO(&wfds); | |
b92d9a91 | 190 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
b493317d PB |
191 | if (node->io_read) { |
192 | FD_SET ((SOCKET)node->pfd.fd, &rfds); | |
193 | } | |
194 | if (node->io_write) { | |
195 | FD_SET ((SOCKET)node->pfd.fd, &wfds); | |
196 | } | |
197 | } | |
198 | ||
199 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { | |
b92d9a91 | 200 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
b493317d PB |
201 | node->pfd.revents = 0; |
202 | if (FD_ISSET(node->pfd.fd, &rfds)) { | |
203 | node->pfd.revents |= G_IO_IN; | |
204 | have_select_revents = true; | |
205 | } | |
206 | ||
207 | if (FD_ISSET(node->pfd.fd, &wfds)) { | |
208 | node->pfd.revents |= G_IO_OUT; | |
209 | have_select_revents = true; | |
210 | } | |
211 | } | |
212 | } | |
213 | ||
b92d9a91 | 214 | qemu_lockcnt_dec(&ctx->list_lock); |
b493317d | 215 | return have_select_revents; |
a3462c65 PB |
216 | } |
217 | ||
cd9ba1eb PB |
218 | bool aio_pending(AioContext *ctx) |
219 | { | |
220 | AioHandler *node; | |
b92d9a91 | 221 | bool result = false; |
cd9ba1eb | 222 | |
b92d9a91 PB |
223 | /* |
224 | * We have to walk very carefully in case aio_set_fd_handler is | |
225 | * called while we're walking. | |
226 | */ | |
227 | qemu_lockcnt_inc(&ctx->list_lock); | |
228 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { | |
f42b2207 | 229 | if (node->pfd.revents && node->io_notify) { |
b92d9a91 PB |
230 | result = true; |
231 | break; | |
cd9ba1eb | 232 | } |
b493317d PB |
233 | |
234 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { | |
b92d9a91 PB |
235 | result = true; |
236 | break; | |
b493317d PB |
237 | } |
238 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { | |
b92d9a91 PB |
239 | result = true; |
240 | break; | |
b493317d | 241 | } |
cd9ba1eb PB |
242 | } |
243 | ||
b92d9a91 PB |
244 | qemu_lockcnt_dec(&ctx->list_lock); |
245 | return result; | |
cd9ba1eb PB |
246 | } |
247 | ||
a398dea3 | 248 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
a76bab49 | 249 | { |
b92d9a91 | 250 | AioHandler *node; |
a398dea3 | 251 | bool progress = false; |
b92d9a91 | 252 | AioHandler *tmp; |
7c0628b2 | 253 | |
cd9ba1eb | 254 | /* |
87f68d31 | 255 | * We have to walk very carefully in case aio_set_fd_handler is |
cd9ba1eb PB |
256 | * called while we're walking. |
257 | */ | |
b92d9a91 | 258 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { |
b493317d | 259 | int revents = node->pfd.revents; |
cd9ba1eb | 260 | |
a398dea3 | 261 | if (!node->deleted && |
b493317d | 262 | (revents || event_notifier_get_handle(node->e) == event) && |
a398dea3 | 263 | node->io_notify) { |
f42b2207 PB |
264 | node->pfd.revents = 0; |
265 | node->io_notify(node->e); | |
164a101f SH |
266 | |
267 | /* aio_notify() does not count as progress */ | |
8b2d42d2 | 268 | if (node->e != &ctx->notifier) { |
164a101f SH |
269 | progress = true; |
270 | } | |
cd9ba1eb PB |
271 | } |
272 | ||
b493317d PB |
273 | if (!node->deleted && |
274 | (node->io_read || node->io_write)) { | |
275 | node->pfd.revents = 0; | |
276 | if ((revents & G_IO_IN) && node->io_read) { | |
277 | node->io_read(node->opaque); | |
278 | progress = true; | |
279 | } | |
280 | if ((revents & G_IO_OUT) && node->io_write) { | |
281 | node->io_write(node->opaque); | |
282 | progress = true; | |
283 | } | |
284 | ||
285 | /* if the next select() will return an event, we have progressed */ | |
286 | if (event == event_notifier_get_handle(&ctx->notifier)) { | |
287 | WSANETWORKEVENTS ev; | |
288 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); | |
289 | if (ev.lNetworkEvents) { | |
290 | progress = true; | |
291 | } | |
292 | } | |
293 | } | |
294 | ||
abf90d39 | 295 | if (node->deleted) { |
b92d9a91 | 296 | if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
abf90d39 PB |
297 | QLIST_REMOVE(node, node); |
298 | g_free(node); | |
b92d9a91 | 299 | qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
abf90d39 | 300 | } |
cd9ba1eb PB |
301 | } |
302 | } | |
303 | ||
a398dea3 PB |
304 | return progress; |
305 | } | |
306 | ||
a153bf52 | 307 | void aio_dispatch(AioContext *ctx) |
a398dea3 | 308 | { |
bd451435 | 309 | qemu_lockcnt_inc(&ctx->list_lock); |
a153bf52 PB |
310 | aio_bh_poll(ctx); |
311 | aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | |
bd451435 | 312 | qemu_lockcnt_dec(&ctx->list_lock); |
a153bf52 | 313 | timerlistgroup_run_timers(&ctx->tlg); |
a398dea3 PB |
314 | } |
315 | ||
316 | bool aio_poll(AioContext *ctx, bool blocking) | |
317 | { | |
318 | AioHandler *node; | |
319 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; | |
eabc9779 | 320 | bool progress, have_select_revents, first; |
a398dea3 PB |
321 | int count; |
322 | int timeout; | |
323 | ||
5710a3e0 PB |
324 | /* |
325 | * There cannot be two concurrent aio_poll calls for the same AioContext (or | |
326 | * an aio_poll concurrent with a GSource prepare/check/dispatch callback). | |
327 | * We rely on this below to avoid slow locked accesses to ctx->notify_me. | |
328 | */ | |
329 | assert(in_aio_context_home_thread(ctx)); | |
a398dea3 PB |
330 | progress = false; |
331 | ||
0a9dd166 PB |
332 | /* aio_notify can avoid the expensive event_notifier_set if |
333 | * everything (file descriptors, bottom halves, timers) will | |
334 | * be re-evaluated before the next blocking poll(). This is | |
335 | * already true when aio_poll is called with blocking == false; | |
eabc9779 PB |
336 | * if blocking == true, it is only true after poll() returns, |
337 | * so disable the optimization now. | |
0a9dd166 | 338 | */ |
eabc9779 | 339 | if (blocking) { |
5710a3e0 PB |
340 | atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); |
341 | /* | |
342 | * Write ctx->notify_me before computing the timeout | |
343 | * (reading bottom half flags, etc.). Pairs with | |
344 | * smp_mb in aio_notify(). | |
345 | */ | |
346 | smp_mb(); | |
eabc9779 | 347 | } |
0a9dd166 | 348 | |
b92d9a91 | 349 | qemu_lockcnt_inc(&ctx->list_lock); |
6493c975 PB |
350 | have_select_revents = aio_prepare(ctx); |
351 | ||
9eb0bfca | 352 | /* fill fd sets */ |
f42b2207 | 353 | count = 0; |
b92d9a91 | 354 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
c1e1e5fa FZ |
355 | if (!node->deleted && node->io_notify |
356 | && aio_node_check(ctx, node->is_external)) { | |
f42b2207 | 357 | events[count++] = event_notifier_get_handle(node->e); |
9eb0bfca PB |
358 | } |
359 | } | |
a76bab49 | 360 | |
3672fa50 | 361 | first = true; |
a76bab49 | 362 | |
6493c975 PB |
363 | /* ctx->notifier is always registered. */ |
364 | assert(count > 0); | |
365 | ||
366 | /* Multiple iterations, all of them non-blocking except the first, | |
367 | * may be necessary to process all pending events. After the first | |
368 | * WaitForMultipleObjects call ctx->notify_me will be decremented. | |
369 | */ | |
370 | do { | |
b493317d | 371 | HANDLE event; |
438e1f47 AB |
372 | int ret; |
373 | ||
6493c975 | 374 | timeout = blocking && !have_select_revents |
845ca10d | 375 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
438e1f47 | 376 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
eabc9779 PB |
377 | if (blocking) { |
378 | assert(first); | |
5710a3e0 | 379 | atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); |
b37548fc | 380 | aio_notify_accept(ctx); |
eabc9779 | 381 | } |
f42b2207 | 382 | |
21a03d17 | 383 | if (first) { |
21a03d17 PB |
384 | progress |= aio_bh_poll(ctx); |
385 | first = false; | |
3672fa50 | 386 | } |
3672fa50 | 387 | |
f42b2207 | 388 | /* if we have any signaled events, dispatch event */ |
b493317d PB |
389 | event = NULL; |
390 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { | |
391 | event = events[ret - WAIT_OBJECT_0]; | |
a90d411e | 392 | events[ret - WAIT_OBJECT_0] = events[--count]; |
b493317d | 393 | } else if (!have_select_revents) { |
f42b2207 PB |
394 | break; |
395 | } | |
396 | ||
b493317d | 397 | have_select_revents = false; |
f42b2207 | 398 | blocking = false; |
9eb0bfca | 399 | |
b493317d | 400 | progress |= aio_dispatch_handlers(ctx, event); |
6493c975 | 401 | } while (count > 0); |
bcdc1857 | 402 | |
bd451435 PB |
403 | qemu_lockcnt_dec(&ctx->list_lock); |
404 | ||
e4c7e2d1 | 405 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
164a101f | 406 | return progress; |
a76bab49 | 407 | } |
37fcee5d | 408 | |
7e003465 | 409 | void aio_context_setup(AioContext *ctx) |
37fcee5d FZ |
410 | { |
411 | } | |
4a1cba38 | 412 | |
cd0a6d2b JW |
413 | void aio_context_destroy(AioContext *ctx) |
414 | { | |
415 | } | |
416 | ||
ba607ca8 SH |
417 | void aio_context_use_g_source(AioContext *ctx) |
418 | { | |
419 | } | |
420 | ||
82a41186 SH |
421 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
422 | int64_t grow, int64_t shrink, Error **errp) | |
4a1cba38 | 423 | { |
90c558be PX |
424 | if (max_ns) { |
425 | error_setg(errp, "AioContext polling is not implemented on Windows"); | |
426 | } | |
4a1cba38 | 427 | } |