]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
6b620ca3 PB |
12 | * Contributions after 2012-01-13 are licensed under the terms of the |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
14 | */ |
15 | ||
d38ea87a | 16 | #include "qemu/osdep.h" |
737e150e | 17 | #include "block/block.h" |
2bbf11d7 | 18 | #include "qemu/rcu_queue.h" |
1de7afc9 | 19 | #include "qemu/sockets.h" |
4a1cba38 | 20 | #include "qemu/cutils.h" |
c2b38b27 | 21 | #include "trace.h" |
147dfab7 | 22 | #ifdef CONFIG_EPOLL_CREATE1 |
fbe3fc5c FZ |
23 | #include <sys/epoll.h> |
24 | #endif | |
a76bab49 | 25 | |
a76bab49 AL |
26 | struct AioHandler |
27 | { | |
cd9ba1eb | 28 | GPollFD pfd; |
a76bab49 AL |
29 | IOHandler *io_read; |
30 | IOHandler *io_write; | |
4a1cba38 | 31 | AioPollFn *io_poll; |
684e508c SH |
32 | IOHandler *io_poll_begin; |
33 | IOHandler *io_poll_end; | |
a76bab49 AL |
34 | int deleted; |
35 | void *opaque; | |
dca21ef2 | 36 | bool is_external; |
72cf2d4f | 37 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
38 | }; |
39 | ||
147dfab7 | 40 | #ifdef CONFIG_EPOLL_CREATE1 |
fbe3fc5c | 41 | |
82dfee5a | 42 | /* The fd number threshold to switch to epoll */ |
fbe3fc5c FZ |
43 | #define EPOLL_ENABLE_THRESHOLD 64 |
44 | ||
45 | static void aio_epoll_disable(AioContext *ctx) | |
46 | { | |
cd0a6d2b JW |
47 | ctx->epoll_enabled = false; |
48 | if (!ctx->epoll_available) { | |
fbe3fc5c FZ |
49 | return; |
50 | } | |
cd0a6d2b | 51 | ctx->epoll_available = false; |
fbe3fc5c FZ |
52 | close(ctx->epollfd); |
53 | } | |
54 | ||
55 | static inline int epoll_events_from_pfd(int pfd_events) | |
56 | { | |
57 | return (pfd_events & G_IO_IN ? EPOLLIN : 0) | | |
58 | (pfd_events & G_IO_OUT ? EPOLLOUT : 0) | | |
59 | (pfd_events & G_IO_HUP ? EPOLLHUP : 0) | | |
60 | (pfd_events & G_IO_ERR ? EPOLLERR : 0); | |
61 | } | |
62 | ||
63 | static bool aio_epoll_try_enable(AioContext *ctx) | |
64 | { | |
65 | AioHandler *node; | |
66 | struct epoll_event event; | |
67 | ||
2bbf11d7 | 68 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
fbe3fc5c FZ |
69 | int r; |
70 | if (node->deleted || !node->pfd.events) { | |
71 | continue; | |
72 | } | |
73 | event.events = epoll_events_from_pfd(node->pfd.events); | |
74 | event.data.ptr = node; | |
75 | r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); | |
76 | if (r) { | |
77 | return false; | |
78 | } | |
79 | } | |
80 | ctx->epoll_enabled = true; | |
81 | return true; | |
82 | } | |
83 | ||
84 | static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) | |
85 | { | |
86 | struct epoll_event event; | |
87 | int r; | |
35dd66e2 | 88 | int ctl; |
fbe3fc5c FZ |
89 | |
90 | if (!ctx->epoll_enabled) { | |
91 | return; | |
92 | } | |
93 | if (!node->pfd.events) { | |
35dd66e2 | 94 | ctl = EPOLL_CTL_DEL; |
fbe3fc5c FZ |
95 | } else { |
96 | event.data.ptr = node; | |
97 | event.events = epoll_events_from_pfd(node->pfd.events); | |
35dd66e2 PB |
98 | ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; |
99 | } | |
100 | ||
101 | r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event); | |
102 | if (r) { | |
103 | aio_epoll_disable(ctx); | |
fbe3fc5c FZ |
104 | } |
105 | } | |
106 | ||
107 | static int aio_epoll(AioContext *ctx, GPollFD *pfds, | |
108 | unsigned npfd, int64_t timeout) | |
109 | { | |
110 | AioHandler *node; | |
111 | int i, ret = 0; | |
112 | struct epoll_event events[128]; | |
113 | ||
114 | assert(npfd == 1); | |
115 | assert(pfds[0].fd == ctx->epollfd); | |
116 | if (timeout > 0) { | |
117 | ret = qemu_poll_ns(pfds, npfd, timeout); | |
118 | } | |
119 | if (timeout <= 0 || ret > 0) { | |
120 | ret = epoll_wait(ctx->epollfd, events, | |
8f801baf | 121 | ARRAY_SIZE(events), |
fbe3fc5c FZ |
122 | timeout); |
123 | if (ret <= 0) { | |
124 | goto out; | |
125 | } | |
126 | for (i = 0; i < ret; i++) { | |
127 | int ev = events[i].events; | |
128 | node = events[i].data.ptr; | |
129 | node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) | | |
130 | (ev & EPOLLOUT ? G_IO_OUT : 0) | | |
131 | (ev & EPOLLHUP ? G_IO_HUP : 0) | | |
132 | (ev & EPOLLERR ? G_IO_ERR : 0); | |
133 | } | |
134 | } | |
135 | out: | |
136 | return ret; | |
137 | } | |
138 | ||
139 | static bool aio_epoll_enabled(AioContext *ctx) | |
140 | { | |
141 | /* Fall back to ppoll when external clients are disabled. */ | |
142 | return !aio_external_disabled(ctx) && ctx->epoll_enabled; | |
143 | } | |
144 | ||
145 | static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, | |
146 | unsigned npfd, int64_t timeout) | |
147 | { | |
148 | if (!ctx->epoll_available) { | |
149 | return false; | |
150 | } | |
151 | if (aio_epoll_enabled(ctx)) { | |
152 | return true; | |
153 | } | |
154 | if (npfd >= EPOLL_ENABLE_THRESHOLD) { | |
155 | if (aio_epoll_try_enable(ctx)) { | |
156 | return true; | |
157 | } else { | |
158 | aio_epoll_disable(ctx); | |
159 | } | |
160 | } | |
161 | return false; | |
162 | } | |
163 | ||
164 | #else | |
165 | ||
166 | static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) | |
167 | { | |
168 | } | |
169 | ||
170 | static int aio_epoll(AioContext *ctx, GPollFD *pfds, | |
171 | unsigned npfd, int64_t timeout) | |
172 | { | |
173 | assert(false); | |
174 | } | |
175 | ||
176 | static bool aio_epoll_enabled(AioContext *ctx) | |
177 | { | |
178 | return false; | |
179 | } | |
180 | ||
181 | static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, | |
182 | unsigned npfd, int64_t timeout) | |
183 | { | |
184 | return false; | |
185 | } | |
186 | ||
187 | #endif | |
188 | ||
a915f4bc | 189 | static AioHandler *find_aio_handler(AioContext *ctx, int fd) |
a76bab49 AL |
190 | { |
191 | AioHandler *node; | |
192 | ||
a915f4bc | 193 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
cd9ba1eb | 194 | if (node->pfd.fd == fd) |
79d5ca56 AG |
195 | if (!node->deleted) |
196 | return node; | |
a76bab49 AL |
197 | } |
198 | ||
199 | return NULL; | |
200 | } | |
201 | ||
fef16601 RN |
202 | static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) |
203 | { | |
204 | /* If the GSource is in the process of being destroyed then | |
205 | * g_source_remove_poll() causes an assertion failure. Skip | |
206 | * removal in that case, because glib cleans up its state during | |
207 | * destruction anyway. | |
208 | */ | |
209 | if (!g_source_is_destroyed(&ctx->source)) { | |
210 | g_source_remove_poll(&ctx->source, &node->pfd); | |
211 | } | |
212 | ||
213 | /* If a read is in progress, just mark the node as deleted */ | |
214 | if (qemu_lockcnt_count(&ctx->list_lock)) { | |
215 | node->deleted = 1; | |
216 | node->pfd.revents = 0; | |
217 | return false; | |
218 | } | |
219 | /* Otherwise, delete it for real. We can't just mark it as | |
220 | * deleted because deleted nodes are only cleaned up while | |
221 | * no one is walking the handlers list. | |
222 | */ | |
223 | QLIST_REMOVE(node, node); | |
224 | return true; | |
225 | } | |
226 | ||
a915f4bc PB |
227 | void aio_set_fd_handler(AioContext *ctx, |
228 | int fd, | |
dca21ef2 | 229 | bool is_external, |
a915f4bc PB |
230 | IOHandler *io_read, |
231 | IOHandler *io_write, | |
f6a51c84 | 232 | AioPollFn *io_poll, |
a915f4bc | 233 | void *opaque) |
a76bab49 AL |
234 | { |
235 | AioHandler *node; | |
fef16601 | 236 | AioHandler *new_node = NULL; |
fbe3fc5c | 237 | bool is_new = false; |
0ed39f3d | 238 | bool deleted = false; |
d7be5dd1 | 239 | int poll_disable_change; |
a76bab49 | 240 | |
2bbf11d7 PB |
241 | qemu_lockcnt_lock(&ctx->list_lock); |
242 | ||
a915f4bc | 243 | node = find_aio_handler(ctx, fd); |
a76bab49 AL |
244 | |
245 | /* Are we deleting the fd handler? */ | |
4a1cba38 | 246 | if (!io_read && !io_write && !io_poll) { |
36173ec5 | 247 | if (node == NULL) { |
2bbf11d7 | 248 | qemu_lockcnt_unlock(&ctx->list_lock); |
36173ec5 PB |
249 | return; |
250 | } | |
8821b34a RN |
251 | /* Clean events in order to unregister fd from the ctx epoll. */ |
252 | node->pfd.events = 0; | |
253 | ||
d7be5dd1 | 254 | poll_disable_change = -!node->io_poll; |
a76bab49 | 255 | } else { |
d7be5dd1 | 256 | poll_disable_change = !io_poll - (node && !node->io_poll); |
a76bab49 | 257 | if (node == NULL) { |
fbe3fc5c | 258 | is_new = true; |
a76bab49 | 259 | } |
fef16601 RN |
260 | /* Alloc and insert if it's not already there */ |
261 | new_node = g_new0(AioHandler, 1); | |
4a1cba38 | 262 | |
a76bab49 | 263 | /* Update handler with latest information */ |
fef16601 RN |
264 | new_node->io_read = io_read; |
265 | new_node->io_write = io_write; | |
266 | new_node->io_poll = io_poll; | |
267 | new_node->opaque = opaque; | |
268 | new_node->is_external = is_external; | |
269 | ||
270 | if (is_new) { | |
271 | new_node->pfd.fd = fd; | |
272 | } else { | |
273 | new_node->pfd = node->pfd; | |
274 | } | |
275 | g_source_add_poll(&ctx->source, &new_node->pfd); | |
276 | ||
277 | new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); | |
278 | new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); | |
279 | ||
280 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node); | |
281 | } | |
282 | if (node) { | |
283 | deleted = aio_remove_fd_handler(ctx, node); | |
a76bab49 | 284 | } |
7ed2b24c | 285 | |
d7be5dd1 PB |
286 | /* No need to order poll_disable_cnt writes against other updates; |
287 | * the counter is only used to avoid wasting time and latency on | |
288 | * iterated polling when the system call will be ultimately necessary. | |
289 | * Changing handlers is a rare event, and a little wasted polling until | |
290 | * the aio_notify below is not an issue. | |
291 | */ | |
292 | atomic_set(&ctx->poll_disable_cnt, | |
293 | atomic_read(&ctx->poll_disable_cnt) + poll_disable_change); | |
294 | ||
fef16601 RN |
295 | if (new_node) { |
296 | aio_epoll_update(ctx, new_node, is_new); | |
297 | } else if (node) { | |
298 | /* Unregister deleted fd_handler */ | |
299 | aio_epoll_update(ctx, node, false); | |
300 | } | |
2bbf11d7 | 301 | qemu_lockcnt_unlock(&ctx->list_lock); |
7ed2b24c | 302 | aio_notify(ctx); |
4a1cba38 | 303 | |
0ed39f3d FZ |
304 | if (deleted) { |
305 | g_free(node); | |
306 | } | |
9958c351 PB |
307 | } |
308 | ||
684e508c SH |
309 | void aio_set_fd_poll(AioContext *ctx, int fd, |
310 | IOHandler *io_poll_begin, | |
311 | IOHandler *io_poll_end) | |
312 | { | |
313 | AioHandler *node = find_aio_handler(ctx, fd); | |
314 | ||
315 | if (!node) { | |
316 | return; | |
317 | } | |
318 | ||
319 | node->io_poll_begin = io_poll_begin; | |
320 | node->io_poll_end = io_poll_end; | |
321 | } | |
322 | ||
a915f4bc PB |
323 | void aio_set_event_notifier(AioContext *ctx, |
324 | EventNotifier *notifier, | |
dca21ef2 | 325 | bool is_external, |
f6a51c84 SH |
326 | EventNotifierHandler *io_read, |
327 | AioPollFn *io_poll) | |
a76bab49 | 328 | { |
f6a51c84 SH |
329 | aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, |
330 | (IOHandler *)io_read, NULL, io_poll, notifier); | |
a76bab49 AL |
331 | } |
332 | ||
684e508c SH |
333 | void aio_set_event_notifier_poll(AioContext *ctx, |
334 | EventNotifier *notifier, | |
335 | EventNotifierHandler *io_poll_begin, | |
336 | EventNotifierHandler *io_poll_end) | |
337 | { | |
338 | aio_set_fd_poll(ctx, event_notifier_get_fd(notifier), | |
339 | (IOHandler *)io_poll_begin, | |
340 | (IOHandler *)io_poll_end); | |
341 | } | |
342 | ||
343 | static void poll_set_started(AioContext *ctx, bool started) | |
344 | { | |
345 | AioHandler *node; | |
346 | ||
347 | if (started == ctx->poll_started) { | |
348 | return; | |
349 | } | |
350 | ||
351 | ctx->poll_started = started; | |
352 | ||
2bbf11d7 PB |
353 | qemu_lockcnt_inc(&ctx->list_lock); |
354 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { | |
684e508c SH |
355 | IOHandler *fn; |
356 | ||
357 | if (node->deleted) { | |
358 | continue; | |
359 | } | |
360 | ||
361 | if (started) { | |
362 | fn = node->io_poll_begin; | |
363 | } else { | |
364 | fn = node->io_poll_end; | |
365 | } | |
366 | ||
367 | if (fn) { | |
368 | fn(node->opaque); | |
369 | } | |
370 | } | |
2bbf11d7 | 371 | qemu_lockcnt_dec(&ctx->list_lock); |
684e508c SH |
372 | } |
373 | ||
374 | ||
a3462c65 PB |
375 | bool aio_prepare(AioContext *ctx) |
376 | { | |
684e508c SH |
377 | /* Poll mode cannot be used with glib's event loop, disable it. */ |
378 | poll_set_started(ctx, false); | |
379 | ||
a3462c65 PB |
380 | return false; |
381 | } | |
382 | ||
cd9ba1eb PB |
383 | bool aio_pending(AioContext *ctx) |
384 | { | |
385 | AioHandler *node; | |
2bbf11d7 | 386 | bool result = false; |
cd9ba1eb | 387 | |
2bbf11d7 PB |
388 | /* |
389 | * We have to walk very carefully in case aio_set_fd_handler is | |
390 | * called while we're walking. | |
391 | */ | |
392 | qemu_lockcnt_inc(&ctx->list_lock); | |
393 | ||
394 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { | |
cd9ba1eb PB |
395 | int revents; |
396 | ||
cd9ba1eb | 397 | revents = node->pfd.revents & node->pfd.events; |
37989ced FZ |
398 | if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read && |
399 | aio_node_check(ctx, node->is_external)) { | |
2bbf11d7 PB |
400 | result = true; |
401 | break; | |
cd9ba1eb | 402 | } |
37989ced FZ |
403 | if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write && |
404 | aio_node_check(ctx, node->is_external)) { | |
2bbf11d7 PB |
405 | result = true; |
406 | break; | |
cd9ba1eb PB |
407 | } |
408 | } | |
2bbf11d7 | 409 | qemu_lockcnt_dec(&ctx->list_lock); |
cd9ba1eb | 410 | |
2bbf11d7 | 411 | return result; |
cd9ba1eb PB |
412 | } |
413 | ||
56d2c3c6 | 414 | static bool aio_dispatch_handlers(AioContext *ctx) |
a76bab49 | 415 | { |
abf90d39 | 416 | AioHandler *node, *tmp; |
d0c8d2c0 | 417 | bool progress = false; |
7c0628b2 | 418 | |
2bbf11d7 | 419 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { |
abf90d39 | 420 | int revents; |
cd9ba1eb PB |
421 | |
422 | revents = node->pfd.revents & node->pfd.events; | |
423 | node->pfd.revents = 0; | |
424 | ||
d0c8d2c0 SH |
425 | if (!node->deleted && |
426 | (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && | |
37989ced | 427 | aio_node_check(ctx, node->is_external) && |
d0c8d2c0 | 428 | node->io_read) { |
cd9ba1eb | 429 | node->io_read(node->opaque); |
164a101f SH |
430 | |
431 | /* aio_notify() does not count as progress */ | |
432 | if (node->opaque != &ctx->notifier) { | |
433 | progress = true; | |
434 | } | |
cd9ba1eb | 435 | } |
d0c8d2c0 SH |
436 | if (!node->deleted && |
437 | (revents & (G_IO_OUT | G_IO_ERR)) && | |
37989ced | 438 | aio_node_check(ctx, node->is_external) && |
d0c8d2c0 | 439 | node->io_write) { |
cd9ba1eb PB |
440 | node->io_write(node->opaque); |
441 | progress = true; | |
442 | } | |
443 | ||
abf90d39 | 444 | if (node->deleted) { |
2bbf11d7 | 445 | if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
abf90d39 PB |
446 | QLIST_REMOVE(node, node); |
447 | g_free(node); | |
2bbf11d7 | 448 | qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
abf90d39 | 449 | } |
cd9ba1eb PB |
450 | } |
451 | } | |
438e1f47 | 452 | |
56d2c3c6 PB |
453 | return progress; |
454 | } | |
455 | ||
a153bf52 | 456 | void aio_dispatch(AioContext *ctx) |
56d2c3c6 | 457 | { |
a153bf52 | 458 | qemu_lockcnt_inc(&ctx->list_lock); |
bd451435 | 459 | aio_bh_poll(ctx); |
a153bf52 PB |
460 | aio_dispatch_handlers(ctx); |
461 | qemu_lockcnt_dec(&ctx->list_lock); | |
438e1f47 | 462 | |
a153bf52 | 463 | timerlistgroup_run_timers(&ctx->tlg); |
d0c8d2c0 SH |
464 | } |
465 | ||
e98ab097 PB |
466 | /* These thread-local variables are used only in a small part of aio_poll |
467 | * around the call to the poll() system call. In particular they are not | |
468 | * used while aio_poll is performing callbacks, which makes it much easier | |
469 | * to think about reentrancy! | |
470 | * | |
471 | * Stack-allocated arrays would be perfect but they have size limitations; | |
472 | * heap allocation is expensive enough that we want to reuse arrays across | |
473 | * calls to aio_poll(). And because poll() has to be called without holding | |
474 | * any lock, the arrays cannot be stored in AioContext. Thread-local data | |
475 | * has none of the disadvantages of these three options. | |
476 | */ | |
477 | static __thread GPollFD *pollfds; | |
478 | static __thread AioHandler **nodes; | |
479 | static __thread unsigned npfd, nalloc; | |
480 | static __thread Notifier pollfds_cleanup_notifier; | |
481 | ||
482 | static void pollfds_cleanup(Notifier *n, void *unused) | |
483 | { | |
484 | g_assert(npfd == 0); | |
485 | g_free(pollfds); | |
486 | g_free(nodes); | |
487 | nalloc = 0; | |
488 | } | |
489 | ||
490 | static void add_pollfd(AioHandler *node) | |
491 | { | |
492 | if (npfd == nalloc) { | |
493 | if (nalloc == 0) { | |
494 | pollfds_cleanup_notifier.notify = pollfds_cleanup; | |
495 | qemu_thread_atexit_add(&pollfds_cleanup_notifier); | |
496 | nalloc = 8; | |
497 | } else { | |
498 | g_assert(nalloc <= INT_MAX); | |
499 | nalloc *= 2; | |
500 | } | |
501 | pollfds = g_renew(GPollFD, pollfds, nalloc); | |
502 | nodes = g_renew(AioHandler *, nodes, nalloc); | |
503 | } | |
504 | nodes[npfd] = node; | |
505 | pollfds[npfd] = (GPollFD) { | |
506 | .fd = node->pfd.fd, | |
507 | .events = node->pfd.events, | |
508 | }; | |
509 | npfd++; | |
510 | } | |
511 | ||
e30cffa0 | 512 | static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout) |
684e508c SH |
513 | { |
514 | bool progress = false; | |
515 | AioHandler *node; | |
516 | ||
2bbf11d7 | 517 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
684e508c | 518 | if (!node->deleted && node->io_poll && |
59c9f437 | 519 | aio_node_check(ctx, node->is_external) && |
cfeb35d6 | 520 | node->io_poll(node->opaque)) { |
993ed89f PB |
521 | /* |
522 | * Polling was successful, exit try_poll_mode immediately | |
523 | * to adjust the next polling time. | |
524 | */ | |
e30cffa0 | 525 | *timeout = 0; |
cfeb35d6 PB |
526 | if (node->opaque != &ctx->notifier) { |
527 | progress = true; | |
528 | } | |
684e508c SH |
529 | } |
530 | ||
531 | /* Caller handles freeing deleted nodes. Don't do it here. */ | |
532 | } | |
533 | ||
534 | return progress; | |
535 | } | |
536 | ||
4a1cba38 SH |
537 | /* run_poll_handlers: |
538 | * @ctx: the AioContext | |
539 | * @max_ns: maximum time to poll for, in nanoseconds | |
540 | * | |
541 | * Polls for a given time. | |
542 | * | |
543 | * Note that ctx->notify_me must be non-zero so this function can detect | |
544 | * aio_notify(). | |
545 | * | |
2bbf11d7 | 546 | * Note that the caller must have incremented ctx->list_lock. |
4a1cba38 SH |
547 | * |
548 | * Returns: true if progress was made, false otherwise | |
549 | */ | |
e30cffa0 | 550 | static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) |
4a1cba38 | 551 | { |
684e508c | 552 | bool progress; |
e30cffa0 | 553 | int64_t start_time, elapsed_time; |
4a1cba38 SH |
554 | |
555 | assert(ctx->notify_me); | |
2bbf11d7 | 556 | assert(qemu_lockcnt_count(&ctx->list_lock) > 0); |
4a1cba38 | 557 | |
e30cffa0 | 558 | trace_run_poll_handlers_begin(ctx, max_ns, *timeout); |
4a1cba38 | 559 | |
e30cffa0 | 560 | start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
4a1cba38 | 561 | do { |
e30cffa0 PB |
562 | progress = run_poll_handlers_once(ctx, timeout); |
563 | elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time; | |
993ed89f PB |
564 | max_ns = qemu_soonest_timeout(*timeout, max_ns); |
565 | assert(!(max_ns && progress)); | |
566 | } while (elapsed_time < max_ns && !atomic_read(&ctx->poll_disable_cnt)); | |
4a1cba38 | 567 | |
e30cffa0 PB |
568 | /* If time has passed with no successful polling, adjust *timeout to |
569 | * keep the same ending time. | |
570 | */ | |
571 | if (*timeout != -1) { | |
572 | *timeout -= MIN(*timeout, elapsed_time); | |
573 | } | |
4a1cba38 | 574 | |
e30cffa0 | 575 | trace_run_poll_handlers_end(ctx, progress, *timeout); |
4a1cba38 SH |
576 | return progress; |
577 | } | |
578 | ||
579 | /* try_poll_mode: | |
580 | * @ctx: the AioContext | |
e30cffa0 PB |
581 | * @timeout: timeout for blocking wait, computed by the caller and updated if |
582 | * polling succeeds. | |
4a1cba38 | 583 | * |
684e508c | 584 | * ctx->notify_me must be non-zero so this function can detect aio_notify(). |
4a1cba38 | 585 | * |
2bbf11d7 | 586 | * Note that the caller must have incremented ctx->list_lock. |
4a1cba38 SH |
587 | * |
588 | * Returns: true if progress was made, false otherwise | |
589 | */ | |
e30cffa0 | 590 | static bool try_poll_mode(AioContext *ctx, int64_t *timeout) |
4a1cba38 | 591 | { |
993ed89f | 592 | int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); |
4a1cba38 | 593 | |
e30cffa0 PB |
594 | if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) { |
595 | poll_set_started(ctx, true); | |
684e508c | 596 | |
e30cffa0 PB |
597 | if (run_poll_handlers(ctx, max_ns, timeout)) { |
598 | return true; | |
4a1cba38 SH |
599 | } |
600 | } | |
601 | ||
684e508c SH |
602 | poll_set_started(ctx, false); |
603 | ||
604 | /* Even if we don't run busy polling, try polling once in case it can make | |
605 | * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2). | |
606 | */ | |
e30cffa0 | 607 | return run_poll_handlers_once(ctx, timeout); |
4a1cba38 SH |
608 | } |
609 | ||
d0c8d2c0 SH |
610 | bool aio_poll(AioContext *ctx, bool blocking) |
611 | { | |
d0c8d2c0 | 612 | AioHandler *node; |
4a1cba38 SH |
613 | int i; |
614 | int ret = 0; | |
164a101f | 615 | bool progress; |
e98ab097 | 616 | int64_t timeout; |
82a41186 | 617 | int64_t start = 0; |
d0c8d2c0 | 618 | |
0dc165c1 KW |
619 | assert(in_aio_context_home_thread(ctx)); |
620 | ||
0ceb849b PB |
621 | /* aio_notify can avoid the expensive event_notifier_set if |
622 | * everything (file descriptors, bottom halves, timers) will | |
e4c7e2d1 PB |
623 | * be re-evaluated before the next blocking poll(). This is |
624 | * already true when aio_poll is called with blocking == false; | |
eabc9779 PB |
625 | * if blocking == true, it is only true after poll() returns, |
626 | * so disable the optimization now. | |
0ceb849b | 627 | */ |
eabc9779 PB |
628 | if (blocking) { |
629 | atomic_add(&ctx->notify_me, 2); | |
630 | } | |
0ceb849b | 631 | |
2bbf11d7 | 632 | qemu_lockcnt_inc(&ctx->list_lock); |
a76bab49 | 633 | |
82a41186 SH |
634 | if (ctx->poll_max_ns) { |
635 | start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | |
636 | } | |
637 | ||
e30cffa0 PB |
638 | timeout = blocking ? aio_compute_timeout(ctx) : 0; |
639 | progress = try_poll_mode(ctx, &timeout); | |
640 | assert(!(timeout && progress)); | |
641 | ||
642 | /* If polling is allowed, non-blocking aio_poll does not need the | |
643 | * system call---a single round of run_poll_handlers_once suffices. | |
644 | */ | |
645 | if (timeout || atomic_read(&ctx->poll_disable_cnt)) { | |
4a1cba38 | 646 | assert(npfd == 0); |
a76bab49 | 647 | |
4a1cba38 | 648 | /* fill pollfds */ |
6b942468 | 649 | |
4a1cba38 | 650 | if (!aio_epoll_enabled(ctx)) { |
2bbf11d7 | 651 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
4a1cba38 SH |
652 | if (!node->deleted && node->pfd.events |
653 | && aio_node_check(ctx, node->is_external)) { | |
654 | add_pollfd(node); | |
655 | } | |
6b942468 | 656 | } |
9eb0bfca | 657 | } |
a76bab49 | 658 | |
4a1cba38 | 659 | /* wait until next event */ |
4a1cba38 SH |
660 | if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) { |
661 | AioHandler epoll_handler; | |
662 | ||
663 | epoll_handler.pfd.fd = ctx->epollfd; | |
664 | epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR; | |
665 | npfd = 0; | |
666 | add_pollfd(&epoll_handler); | |
667 | ret = aio_epoll(ctx, pollfds, npfd, timeout); | |
668 | } else { | |
669 | ret = qemu_poll_ns(pollfds, npfd, timeout); | |
670 | } | |
fbe3fc5c | 671 | } |
4a1cba38 | 672 | |
eabc9779 PB |
673 | if (blocking) { |
674 | atomic_sub(&ctx->notify_me, 2); | |
b37548fc | 675 | aio_notify_accept(ctx); |
eabc9779 | 676 | } |
9eb0bfca | 677 | |
82a41186 SH |
678 | /* Adjust polling time */ |
679 | if (ctx->poll_max_ns) { | |
680 | int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start; | |
681 | ||
682 | if (block_ns <= ctx->poll_ns) { | |
683 | /* This is the sweet spot, no adjustment needed */ | |
684 | } else if (block_ns > ctx->poll_max_ns) { | |
685 | /* We'd have to poll for too long, poll less */ | |
686 | int64_t old = ctx->poll_ns; | |
687 | ||
688 | if (ctx->poll_shrink) { | |
689 | ctx->poll_ns /= ctx->poll_shrink; | |
690 | } else { | |
691 | ctx->poll_ns = 0; | |
692 | } | |
693 | ||
694 | trace_poll_shrink(ctx, old, ctx->poll_ns); | |
695 | } else if (ctx->poll_ns < ctx->poll_max_ns && | |
696 | block_ns < ctx->poll_max_ns) { | |
697 | /* There is room to grow, poll longer */ | |
698 | int64_t old = ctx->poll_ns; | |
699 | int64_t grow = ctx->poll_grow; | |
700 | ||
701 | if (grow == 0) { | |
702 | grow = 2; | |
703 | } | |
704 | ||
705 | if (ctx->poll_ns) { | |
706 | ctx->poll_ns *= grow; | |
707 | } else { | |
708 | ctx->poll_ns = 4000; /* start polling at 4 microseconds */ | |
709 | } | |
710 | ||
711 | if (ctx->poll_ns > ctx->poll_max_ns) { | |
712 | ctx->poll_ns = ctx->poll_max_ns; | |
713 | } | |
714 | ||
715 | trace_poll_grow(ctx, old, ctx->poll_ns); | |
716 | } | |
717 | } | |
718 | ||
9eb0bfca PB |
719 | /* if we have any readable fds, dispatch event */ |
720 | if (ret > 0) { | |
e98ab097 PB |
721 | for (i = 0; i < npfd; i++) { |
722 | nodes[i]->pfd.revents = pollfds[i].revents; | |
a76bab49 | 723 | } |
438e1f47 AB |
724 | } |
725 | ||
e98ab097 | 726 | npfd = 0; |
e98ab097 | 727 | |
a153bf52 PB |
728 | progress |= aio_bh_poll(ctx); |
729 | ||
730 | if (ret > 0) { | |
a153bf52 | 731 | progress |= aio_dispatch_handlers(ctx); |
9eb0bfca | 732 | } |
bcdc1857 | 733 | |
bd451435 PB |
734 | qemu_lockcnt_dec(&ctx->list_lock); |
735 | ||
a153bf52 PB |
736 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
737 | ||
164a101f | 738 | return progress; |
a76bab49 | 739 | } |
37fcee5d | 740 | |
7e003465 | 741 | void aio_context_setup(AioContext *ctx) |
37fcee5d | 742 | { |
147dfab7 | 743 | #ifdef CONFIG_EPOLL_CREATE1 |
fbe3fc5c FZ |
744 | assert(!ctx->epollfd); |
745 | ctx->epollfd = epoll_create1(EPOLL_CLOEXEC); | |
746 | if (ctx->epollfd == -1) { | |
7e003465 | 747 | fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno)); |
fbe3fc5c FZ |
748 | ctx->epoll_available = false; |
749 | } else { | |
750 | ctx->epoll_available = true; | |
751 | } | |
752 | #endif | |
37fcee5d | 753 | } |
4a1cba38 | 754 | |
cd0a6d2b JW |
755 | void aio_context_destroy(AioContext *ctx) |
756 | { | |
757 | #ifdef CONFIG_EPOLL_CREATE1 | |
758 | aio_epoll_disable(ctx); | |
759 | #endif | |
760 | } | |
761 | ||
82a41186 SH |
762 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
763 | int64_t grow, int64_t shrink, Error **errp) | |
4a1cba38 | 764 | { |
82a41186 SH |
765 | /* No thread synchronization here, it doesn't matter if an incorrect value |
766 | * is used once. | |
4a1cba38 SH |
767 | */ |
768 | ctx->poll_max_ns = max_ns; | |
82a41186 SH |
769 | ctx->poll_ns = 0; |
770 | ctx->poll_grow = grow; | |
771 | ctx->poll_shrink = shrink; | |
4a1cba38 SH |
772 | |
773 | aio_notify(ctx); | |
774 | } |