]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
f42b2207 | 9 | * Paolo Bonzini <[email protected]> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
18 | #include "qemu-common.h" | |
737e150e | 19 | #include "block/block.h" |
1de7afc9 PB |
20 | #include "qemu/queue.h" |
21 | #include "qemu/sockets.h" | |
a76bab49 | 22 | |
f42b2207 PB |
23 | struct AioHandler { |
24 | EventNotifier *e; | |
25 | EventNotifierHandler *io_notify; | |
26 | AioFlushEventNotifierHandler *io_flush; | |
cd9ba1eb | 27 | GPollFD pfd; |
a76bab49 | 28 | int deleted; |
72cf2d4f | 29 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
30 | }; |
31 | ||
f42b2207 PB |
32 | void aio_set_event_notifier(AioContext *ctx, |
33 | EventNotifier *e, | |
34 | EventNotifierHandler *io_notify, | |
35 | AioFlushEventNotifierHandler *io_flush) | |
a76bab49 AL |
36 | { |
37 | AioHandler *node; | |
38 | ||
a915f4bc | 39 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
40 | if (node->e == e && !node->deleted) { |
41 | break; | |
42 | } | |
a76bab49 AL |
43 | } |
44 | ||
a76bab49 | 45 | /* Are we deleting the fd handler? */ |
f42b2207 | 46 | if (!io_notify) { |
a76bab49 | 47 | if (node) { |
e3713e00 PB |
48 | g_source_remove_poll(&ctx->source, &node->pfd); |
49 | ||
a76bab49 | 50 | /* If the lock is held, just mark the node as deleted */ |
cd9ba1eb | 51 | if (ctx->walking_handlers) { |
a76bab49 | 52 | node->deleted = 1; |
cd9ba1eb PB |
53 | node->pfd.revents = 0; |
54 | } else { | |
a76bab49 AL |
55 | /* Otherwise, delete it for real. We can't just mark it as |
56 | * deleted because deleted nodes are only cleaned up after | |
57 | * releasing the walking_handlers lock. | |
58 | */ | |
72cf2d4f | 59 | QLIST_REMOVE(node, node); |
7267c094 | 60 | g_free(node); |
a76bab49 AL |
61 | } |
62 | } | |
63 | } else { | |
64 | if (node == NULL) { | |
65 | /* Alloc and insert if it's not already there */ | |
7267c094 | 66 | node = g_malloc0(sizeof(AioHandler)); |
f42b2207 PB |
67 | node->e = e; |
68 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
69 | node->pfd.events = G_IO_IN; | |
a915f4bc | 70 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
e3713e00 PB |
71 | |
72 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 AL |
73 | } |
74 | /* Update handler with latest information */ | |
f42b2207 | 75 | node->io_notify = io_notify; |
a76bab49 | 76 | node->io_flush = io_flush; |
a76bab49 | 77 | } |
7ed2b24c PB |
78 | |
79 | aio_notify(ctx); | |
9958c351 PB |
80 | } |
81 | ||
cd9ba1eb PB |
82 | bool aio_pending(AioContext *ctx) |
83 | { | |
84 | AioHandler *node; | |
85 | ||
86 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
f42b2207 | 87 | if (node->pfd.revents && node->io_notify) { |
cd9ba1eb PB |
88 | return true; |
89 | } | |
90 | } | |
91 | ||
92 | return false; | |
93 | } | |
94 | ||
7c0628b2 | 95 | bool aio_poll(AioContext *ctx, bool blocking) |
a76bab49 | 96 | { |
9eb0bfca | 97 | AioHandler *node; |
f42b2207 | 98 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
7c0628b2 | 99 | bool busy, progress; |
f42b2207 | 100 | int count; |
7c0628b2 PB |
101 | |
102 | progress = false; | |
a76bab49 | 103 | |
8febfa26 KW |
104 | /* |
105 | * If there are callbacks left that have been queued, we need to call then. | |
bcdc1857 PB |
106 | * Do not call select in this case, because it is possible that the caller |
107 | * does not need a complete flush (as is the case for qemu_aio_wait loops). | |
8febfa26 | 108 | */ |
a915f4bc | 109 | if (aio_bh_poll(ctx)) { |
7c0628b2 PB |
110 | blocking = false; |
111 | progress = true; | |
112 | } | |
113 | ||
cd9ba1eb PB |
114 | /* |
115 | * Then dispatch any pending callbacks from the GSource. | |
116 | * | |
117 | * We have to walk very carefully in case qemu_aio_set_fd_handler is | |
118 | * called while we're walking. | |
119 | */ | |
120 | node = QLIST_FIRST(&ctx->aio_handlers); | |
121 | while (node) { | |
122 | AioHandler *tmp; | |
cd9ba1eb PB |
123 | |
124 | ctx->walking_handlers++; | |
125 | ||
f42b2207 PB |
126 | if (node->pfd.revents && node->io_notify) { |
127 | node->pfd.revents = 0; | |
128 | node->io_notify(node->e); | |
cd9ba1eb PB |
129 | progress = true; |
130 | } | |
131 | ||
132 | tmp = node; | |
133 | node = QLIST_NEXT(node, node); | |
134 | ||
135 | ctx->walking_handlers--; | |
136 | ||
137 | if (!ctx->walking_handlers && tmp->deleted) { | |
138 | QLIST_REMOVE(tmp, node); | |
139 | g_free(tmp); | |
140 | } | |
141 | } | |
142 | ||
7c0628b2 | 143 | if (progress && !blocking) { |
bcdc1857 | 144 | return true; |
bafbd6a1 | 145 | } |
8febfa26 | 146 | |
a915f4bc | 147 | ctx->walking_handlers++; |
a76bab49 | 148 | |
9eb0bfca PB |
149 | /* fill fd sets */ |
150 | busy = false; | |
f42b2207 | 151 | count = 0; |
a915f4bc | 152 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
9eb0bfca PB |
153 | /* If there aren't pending AIO operations, don't invoke callbacks. |
154 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would | |
155 | * wait indefinitely. | |
156 | */ | |
4231c88d | 157 | if (!node->deleted && node->io_flush) { |
f42b2207 | 158 | if (node->io_flush(node->e) == 0) { |
9eb0bfca | 159 | continue; |
a76bab49 | 160 | } |
9eb0bfca PB |
161 | busy = true; |
162 | } | |
f42b2207 PB |
163 | if (!node->deleted && node->io_notify) { |
164 | events[count++] = event_notifier_get_handle(node->e); | |
9eb0bfca PB |
165 | } |
166 | } | |
a76bab49 | 167 | |
a915f4bc | 168 | ctx->walking_handlers--; |
a76bab49 | 169 | |
9eb0bfca PB |
170 | /* No AIO operations? Get us out of here */ |
171 | if (!busy) { | |
7c0628b2 | 172 | return progress; |
9eb0bfca | 173 | } |
a76bab49 | 174 | |
9eb0bfca | 175 | /* wait until next event */ |
b022b4a4 | 176 | while (count > 0) { |
f42b2207 PB |
177 | int timeout = blocking ? INFINITE : 0; |
178 | int ret = WaitForMultipleObjects(count, events, FALSE, timeout); | |
179 | ||
180 | /* if we have any signaled events, dispatch event */ | |
181 | if ((DWORD) (ret - WAIT_OBJECT_0) >= count) { | |
182 | break; | |
183 | } | |
184 | ||
185 | blocking = false; | |
9eb0bfca | 186 | |
9eb0bfca PB |
187 | /* we have to walk very carefully in case |
188 | * qemu_aio_set_fd_handler is called while we're walking */ | |
a915f4bc | 189 | node = QLIST_FIRST(&ctx->aio_handlers); |
9eb0bfca PB |
190 | while (node) { |
191 | AioHandler *tmp; | |
192 | ||
a915f4bc | 193 | ctx->walking_handlers++; |
2db2bfc0 | 194 | |
9eb0bfca | 195 | if (!node->deleted && |
f42b2207 PB |
196 | event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] && |
197 | node->io_notify) { | |
198 | node->io_notify(node->e); | |
cd9ba1eb | 199 | progress = true; |
a76bab49 AL |
200 | } |
201 | ||
9eb0bfca PB |
202 | tmp = node; |
203 | node = QLIST_NEXT(node, node); | |
204 | ||
a915f4bc | 205 | ctx->walking_handlers--; |
2db2bfc0 | 206 | |
a915f4bc | 207 | if (!ctx->walking_handlers && tmp->deleted) { |
9eb0bfca PB |
208 | QLIST_REMOVE(tmp, node); |
209 | g_free(tmp); | |
210 | } | |
a76bab49 | 211 | } |
b022b4a4 PB |
212 | |
213 | /* Try again, but only call each handler once. */ | |
214 | events[ret - WAIT_OBJECT_0] = events[--count]; | |
9eb0bfca | 215 | } |
bcdc1857 | 216 | |
2ea9b58f KW |
217 | assert(progress || busy); |
218 | return true; | |
a76bab49 | 219 | } |