]>
Commit | Line | Data |
---|---|---|
a76bab49 | 1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 | 6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
f42b2207 | 9 | * Paolo Bonzini <[email protected]> |
a76bab49 | 10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 | 16 | */ |
17 | ||
18 | #include "qemu-common.h" | |
737e150e | 19 | #include "block/block.h" |
1de7afc9 PB |
20 | #include "qemu/queue.h" |
21 | #include "qemu/sockets.h" | |
a76bab49 | 22 | |
f42b2207 PB |
23 | struct AioHandler { |
24 | EventNotifier *e; | |
b493317d PB |
25 | IOHandler *io_read; |
26 | IOHandler *io_write; | |
f42b2207 | 27 | EventNotifierHandler *io_notify; |
cd9ba1eb | 28 | GPollFD pfd; |
a76bab49 | 29 | int deleted; |
b493317d | 30 | void *opaque; |
dca21ef2 | 31 | bool is_external; |
72cf2d4f | 32 | QLIST_ENTRY(AioHandler) node; |
a76bab49 | 33 | }; |
34 | ||
b493317d PB |
35 | void aio_set_fd_handler(AioContext *ctx, |
36 | int fd, | |
dca21ef2 | 37 | bool is_external, |
b493317d PB |
38 | IOHandler *io_read, |
39 | IOHandler *io_write, | |
40 | void *opaque) | |
41 | { | |
42 | /* fd is a SOCKET in our case */ | |
43 | AioHandler *node; | |
44 | ||
45 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
46 | if (node->pfd.fd == fd && !node->deleted) { | |
47 | break; | |
48 | } | |
49 | } | |
50 | ||
51 | /* Are we deleting the fd handler? */ | |
52 | if (!io_read && !io_write) { | |
53 | if (node) { | |
54 | /* If the lock is held, just mark the node as deleted */ | |
55 | if (ctx->walking_handlers) { | |
56 | node->deleted = 1; | |
57 | node->pfd.revents = 0; | |
58 | } else { | |
59 | /* Otherwise, delete it for real. We can't just mark it as | |
60 | * deleted because deleted nodes are only cleaned up after | |
61 | * releasing the walking_handlers lock. | |
62 | */ | |
63 | QLIST_REMOVE(node, node); | |
64 | g_free(node); | |
65 | } | |
66 | } | |
67 | } else { | |
68 | HANDLE event; | |
69 | ||
70 | if (node == NULL) { | |
71 | /* Alloc and insert if it's not already there */ | |
3ba235a0 | 72 | node = g_new0(AioHandler, 1); |
b493317d PB |
73 | node->pfd.fd = fd; |
74 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); | |
75 | } | |
76 | ||
77 | node->pfd.events = 0; | |
78 | if (node->io_read) { | |
79 | node->pfd.events |= G_IO_IN; | |
80 | } | |
81 | if (node->io_write) { | |
82 | node->pfd.events |= G_IO_OUT; | |
83 | } | |
84 | ||
85 | node->e = &ctx->notifier; | |
86 | ||
87 | /* Update handler with latest information */ | |
88 | node->opaque = opaque; | |
89 | node->io_read = io_read; | |
90 | node->io_write = io_write; | |
dca21ef2 | 91 | node->is_external = is_external; |
b493317d PB |
92 | |
93 | event = event_notifier_get_handle(&ctx->notifier); | |
94 | WSAEventSelect(node->pfd.fd, event, | |
95 | FD_READ | FD_ACCEPT | FD_CLOSE | | |
96 | FD_CONNECT | FD_WRITE | FD_OOB); | |
97 | } | |
98 | ||
99 | aio_notify(ctx); | |
100 | } | |
101 | ||
f42b2207 PB |
102 | void aio_set_event_notifier(AioContext *ctx, |
103 | EventNotifier *e, | |
dca21ef2 | 104 | bool is_external, |
f2e5dca4 | 105 | EventNotifierHandler *io_notify) |
a76bab49 | 106 | { |
107 | AioHandler *node; | |
108 | ||
a915f4bc | 109 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
110 | if (node->e == e && !node->deleted) { |
111 | break; | |
112 | } | |
a76bab49 | 113 | } |
114 | ||
a76bab49 | 115 | /* Are we deleting the fd handler? */ |
f42b2207 | 116 | if (!io_notify) { |
a76bab49 | 117 | if (node) { |
e3713e00 PB |
118 | g_source_remove_poll(&ctx->source, &node->pfd); |
119 | ||
a76bab49 | 120 | /* If the lock is held, just mark the node as deleted */ |
cd9ba1eb | 121 | if (ctx->walking_handlers) { |
a76bab49 | 122 | node->deleted = 1; |
cd9ba1eb PB |
123 | node->pfd.revents = 0; |
124 | } else { | |
a76bab49 | 125 | /* Otherwise, delete it for real. We can't just mark it as |
126 | * deleted because deleted nodes are only cleaned up after | |
127 | * releasing the walking_handlers lock. | |
128 | */ | |
72cf2d4f | 129 | QLIST_REMOVE(node, node); |
7267c094 | 130 | g_free(node); |
a76bab49 | 131 | } |
132 | } | |
133 | } else { | |
134 | if (node == NULL) { | |
135 | /* Alloc and insert if it's not already there */ | |
3ba235a0 | 136 | node = g_new0(AioHandler, 1); |
f42b2207 PB |
137 | node->e = e; |
138 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
139 | node->pfd.events = G_IO_IN; | |
dca21ef2 | 140 | node->is_external = is_external; |
a915f4bc | 141 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
e3713e00 PB |
142 | |
143 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 | 144 | } |
145 | /* Update handler with latest information */ | |
f42b2207 | 146 | node->io_notify = io_notify; |
a76bab49 | 147 | } |
7ed2b24c PB |
148 | |
149 | aio_notify(ctx); | |
9958c351 PB |
150 | } |
151 | ||
a3462c65 PB |
152 | bool aio_prepare(AioContext *ctx) |
153 | { | |
b493317d PB |
154 | static struct timeval tv0; |
155 | AioHandler *node; | |
156 | bool have_select_revents = false; | |
157 | fd_set rfds, wfds; | |
158 | ||
159 | /* fill fd sets */ | |
160 | FD_ZERO(&rfds); | |
161 | FD_ZERO(&wfds); | |
162 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
163 | if (node->io_read) { | |
164 | FD_SET ((SOCKET)node->pfd.fd, &rfds); | |
165 | } | |
166 | if (node->io_write) { | |
167 | FD_SET ((SOCKET)node->pfd.fd, &wfds); | |
168 | } | |
169 | } | |
170 | ||
171 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { | |
172 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
173 | node->pfd.revents = 0; | |
174 | if (FD_ISSET(node->pfd.fd, &rfds)) { | |
175 | node->pfd.revents |= G_IO_IN; | |
176 | have_select_revents = true; | |
177 | } | |
178 | ||
179 | if (FD_ISSET(node->pfd.fd, &wfds)) { | |
180 | node->pfd.revents |= G_IO_OUT; | |
181 | have_select_revents = true; | |
182 | } | |
183 | } | |
184 | } | |
185 | ||
186 | return have_select_revents; | |
a3462c65 PB |
187 | } |
188 | ||
cd9ba1eb PB |
189 | bool aio_pending(AioContext *ctx) |
190 | { | |
191 | AioHandler *node; | |
192 | ||
193 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
f42b2207 | 194 | if (node->pfd.revents && node->io_notify) { |
cd9ba1eb PB |
195 | return true; |
196 | } | |
b493317d PB |
197 | |
198 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { | |
199 | return true; | |
200 | } | |
201 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { | |
202 | return true; | |
203 | } | |
cd9ba1eb PB |
204 | } |
205 | ||
206 | return false; | |
207 | } | |
208 | ||
a398dea3 | 209 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
a76bab49 | 210 | { |
9eb0bfca | 211 | AioHandler *node; |
a398dea3 | 212 | bool progress = false; |
7c0628b2 | 213 | |
cd9ba1eb | 214 | /* |
87f68d31 | 215 | * We have to walk very carefully in case aio_set_fd_handler is |
cd9ba1eb PB |
216 | * called while we're walking. |
217 | */ | |
218 | node = QLIST_FIRST(&ctx->aio_handlers); | |
219 | while (node) { | |
220 | AioHandler *tmp; | |
b493317d | 221 | int revents = node->pfd.revents; |
cd9ba1eb PB |
222 | |
223 | ctx->walking_handlers++; | |
224 | ||
a398dea3 | 225 | if (!node->deleted && |
b493317d | 226 | (revents || event_notifier_get_handle(node->e) == event) && |
a398dea3 | 227 | node->io_notify) { |
f42b2207 PB |
228 | node->pfd.revents = 0; |
229 | node->io_notify(node->e); | |
164a101f SH |
230 | |
231 | /* aio_notify() does not count as progress */ | |
8b2d42d2 | 232 | if (node->e != &ctx->notifier) { |
164a101f SH |
233 | progress = true; |
234 | } | |
cd9ba1eb PB |
235 | } |
236 | ||
b493317d PB |
237 | if (!node->deleted && |
238 | (node->io_read || node->io_write)) { | |
239 | node->pfd.revents = 0; | |
240 | if ((revents & G_IO_IN) && node->io_read) { | |
241 | node->io_read(node->opaque); | |
242 | progress = true; | |
243 | } | |
244 | if ((revents & G_IO_OUT) && node->io_write) { | |
245 | node->io_write(node->opaque); | |
246 | progress = true; | |
247 | } | |
248 | ||
249 | /* if the next select() will return an event, we have progressed */ | |
250 | if (event == event_notifier_get_handle(&ctx->notifier)) { | |
251 | WSANETWORKEVENTS ev; | |
252 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); | |
253 | if (ev.lNetworkEvents) { | |
254 | progress = true; | |
255 | } | |
256 | } | |
257 | } | |
258 | ||
cd9ba1eb PB |
259 | tmp = node; |
260 | node = QLIST_NEXT(node, node); | |
261 | ||
262 | ctx->walking_handlers--; | |
263 | ||
264 | if (!ctx->walking_handlers && tmp->deleted) { | |
265 | QLIST_REMOVE(tmp, node); | |
266 | g_free(tmp); | |
267 | } | |
268 | } | |
269 | ||
a398dea3 PB |
270 | return progress; |
271 | } | |
272 | ||
e4c7e2d1 | 273 | bool aio_dispatch(AioContext *ctx) |
a398dea3 PB |
274 | { |
275 | bool progress; | |
276 | ||
e4c7e2d1 PB |
277 | progress = aio_bh_poll(ctx); |
278 | progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | |
d397ec99 | 279 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
a398dea3 PB |
280 | return progress; |
281 | } | |
282 | ||
283 | bool aio_poll(AioContext *ctx, bool blocking) | |
284 | { | |
285 | AioHandler *node; | |
286 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; | |
eabc9779 | 287 | bool progress, have_select_revents, first; |
a398dea3 PB |
288 | int count; |
289 | int timeout; | |
290 | ||
49110174 | 291 | aio_context_acquire(ctx); |
a398dea3 PB |
292 | progress = false; |
293 | ||
0a9dd166 PB |
294 | /* aio_notify can avoid the expensive event_notifier_set if |
295 | * everything (file descriptors, bottom halves, timers) will | |
296 | * be re-evaluated before the next blocking poll(). This is | |
297 | * already true when aio_poll is called with blocking == false; | |
eabc9779 PB |
298 | * if blocking == true, it is only true after poll() returns, |
299 | * so disable the optimization now. | |
0a9dd166 | 300 | */ |
eabc9779 PB |
301 | if (blocking) { |
302 | atomic_add(&ctx->notify_me, 2); | |
303 | } | |
0a9dd166 | 304 | |
6493c975 PB |
305 | have_select_revents = aio_prepare(ctx); |
306 | ||
a915f4bc | 307 | ctx->walking_handlers++; |
a76bab49 | 308 | |
9eb0bfca | 309 | /* fill fd sets */ |
f42b2207 | 310 | count = 0; |
a915f4bc | 311 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
c1e1e5fa FZ |
312 | if (!node->deleted && node->io_notify |
313 | && aio_node_check(ctx, node->is_external)) { | |
f42b2207 | 314 | events[count++] = event_notifier_get_handle(node->e); |
9eb0bfca PB |
315 | } |
316 | } | |
a76bab49 | 317 | |
a915f4bc | 318 | ctx->walking_handlers--; |
3672fa50 | 319 | first = true; |
a76bab49 | 320 | |
6493c975 PB |
321 | /* ctx->notifier is always registered. */ |
322 | assert(count > 0); | |
323 | ||
324 | /* Multiple iterations, all of them non-blocking except the first, | |
325 | * may be necessary to process all pending events. After the first | |
326 | * WaitForMultipleObjects call ctx->notify_me will be decremented. | |
327 | */ | |
328 | do { | |
b493317d | 329 | HANDLE event; |
438e1f47 AB |
330 | int ret; |
331 | ||
6493c975 | 332 | timeout = blocking && !have_select_revents |
845ca10d | 333 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
49110174 PB |
334 | if (timeout) { |
335 | aio_context_release(ctx); | |
336 | } | |
438e1f47 | 337 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
eabc9779 PB |
338 | if (blocking) { |
339 | assert(first); | |
340 | atomic_sub(&ctx->notify_me, 2); | |
341 | } | |
49110174 PB |
342 | if (timeout) { |
343 | aio_context_acquire(ctx); | |
344 | } | |
f42b2207 | 345 | |
21a03d17 | 346 | if (first) { |
05e514b1 | 347 | aio_notify_accept(ctx); |
21a03d17 PB |
348 | progress |= aio_bh_poll(ctx); |
349 | first = false; | |
3672fa50 | 350 | } |
3672fa50 | 351 | |
f42b2207 | 352 | /* if we have any signaled events, dispatch event */ |
b493317d PB |
353 | event = NULL; |
354 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { | |
355 | event = events[ret - WAIT_OBJECT_0]; | |
a90d411e | 356 | events[ret - WAIT_OBJECT_0] = events[--count]; |
b493317d | 357 | } else if (!have_select_revents) { |
f42b2207 PB |
358 | break; |
359 | } | |
360 | ||
b493317d | 361 | have_select_revents = false; |
f42b2207 | 362 | blocking = false; |
9eb0bfca | 363 | |
b493317d | 364 | progress |= aio_dispatch_handlers(ctx, event); |
6493c975 | 365 | } while (count > 0); |
bcdc1857 | 366 | |
e4c7e2d1 | 367 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
438e1f47 | 368 | |
49110174 | 369 | aio_context_release(ctx); |
164a101f | 370 | return progress; |
a76bab49 | 371 | } |
37fcee5d FZ |
372 | |
373 | void aio_context_setup(AioContext *ctx, Error **errp) | |
374 | { | |
375 | } |