]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
f42b2207 | 9 | * Paolo Bonzini <[email protected]> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
18 | #include "qemu-common.h" | |
19 | #include "block.h" | |
72cf2d4f | 20 | #include "qemu-queue.h" |
a76bab49 AL |
21 | #include "qemu_socket.h" |
22 | ||
f42b2207 PB |
23 | struct AioHandler { |
24 | EventNotifier *e; | |
25 | EventNotifierHandler *io_notify; | |
26 | AioFlushEventNotifierHandler *io_flush; | |
cd9ba1eb | 27 | GPollFD pfd; |
a76bab49 | 28 | int deleted; |
72cf2d4f | 29 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
30 | }; |
31 | ||
f42b2207 PB |
32 | void aio_set_event_notifier(AioContext *ctx, |
33 | EventNotifier *e, | |
34 | EventNotifierHandler *io_notify, | |
35 | AioFlushEventNotifierHandler *io_flush) | |
a76bab49 AL |
36 | { |
37 | AioHandler *node; | |
38 | ||
a915f4bc | 39 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
40 | if (node->e == e && !node->deleted) { |
41 | break; | |
42 | } | |
a76bab49 AL |
43 | } |
44 | ||
a76bab49 | 45 | /* Are we deleting the fd handler? */ |
f42b2207 | 46 | if (!io_notify) { |
a76bab49 | 47 | if (node) { |
e3713e00 PB |
48 | g_source_remove_poll(&ctx->source, &node->pfd); |
49 | ||
a76bab49 | 50 | /* If the lock is held, just mark the node as deleted */ |
cd9ba1eb | 51 | if (ctx->walking_handlers) { |
a76bab49 | 52 | node->deleted = 1; |
cd9ba1eb PB |
53 | node->pfd.revents = 0; |
54 | } else { | |
a76bab49 AL |
55 | /* Otherwise, delete it for real. We can't just mark it as |
56 | * deleted because deleted nodes are only cleaned up after | |
57 | * releasing the walking_handlers lock. | |
58 | */ | |
72cf2d4f | 59 | QLIST_REMOVE(node, node); |
7267c094 | 60 | g_free(node); |
a76bab49 AL |
61 | } |
62 | } | |
63 | } else { | |
64 | if (node == NULL) { | |
65 | /* Alloc and insert if it's not already there */ | |
7267c094 | 66 | node = g_malloc0(sizeof(AioHandler)); |
f42b2207 PB |
67 | node->e = e; |
68 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
69 | node->pfd.events = G_IO_IN; | |
a915f4bc | 70 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
e3713e00 PB |
71 | |
72 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 AL |
73 | } |
74 | /* Update handler with latest information */ | |
f42b2207 | 75 | node->io_notify = io_notify; |
a76bab49 | 76 | node->io_flush = io_flush; |
a76bab49 | 77 | } |
9958c351 PB |
78 | } |
79 | ||
cd9ba1eb PB |
80 | bool aio_pending(AioContext *ctx) |
81 | { | |
82 | AioHandler *node; | |
83 | ||
84 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
f42b2207 | 85 | if (node->pfd.revents && node->io_notify) { |
cd9ba1eb PB |
86 | return true; |
87 | } | |
88 | } | |
89 | ||
90 | return false; | |
91 | } | |
92 | ||
7c0628b2 | 93 | bool aio_poll(AioContext *ctx, bool blocking) |
a76bab49 | 94 | { |
9eb0bfca | 95 | AioHandler *node; |
f42b2207 | 96 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
7c0628b2 | 97 | bool busy, progress; |
f42b2207 | 98 | int count; |
7c0628b2 PB |
99 | |
100 | progress = false; | |
a76bab49 | 101 | |
8febfa26 KW |
102 | /* |
103 | * If there are callbacks left that have been queued, we need to call then. | |
bcdc1857 PB |
104 | * Do not call select in this case, because it is possible that the caller |
105 | * does not need a complete flush (as is the case for qemu_aio_wait loops). | |
8febfa26 | 106 | */ |
a915f4bc | 107 | if (aio_bh_poll(ctx)) { |
7c0628b2 PB |
108 | blocking = false; |
109 | progress = true; | |
110 | } | |
111 | ||
cd9ba1eb PB |
112 | /* |
113 | * Then dispatch any pending callbacks from the GSource. | |
114 | * | |
115 | * We have to walk very carefully in case qemu_aio_set_fd_handler is | |
116 | * called while we're walking. | |
117 | */ | |
118 | node = QLIST_FIRST(&ctx->aio_handlers); | |
119 | while (node) { | |
120 | AioHandler *tmp; | |
cd9ba1eb PB |
121 | |
122 | ctx->walking_handlers++; | |
123 | ||
f42b2207 PB |
124 | if (node->pfd.revents && node->io_notify) { |
125 | node->pfd.revents = 0; | |
126 | node->io_notify(node->e); | |
cd9ba1eb PB |
127 | progress = true; |
128 | } | |
129 | ||
130 | tmp = node; | |
131 | node = QLIST_NEXT(node, node); | |
132 | ||
133 | ctx->walking_handlers--; | |
134 | ||
135 | if (!ctx->walking_handlers && tmp->deleted) { | |
136 | QLIST_REMOVE(tmp, node); | |
137 | g_free(tmp); | |
138 | } | |
139 | } | |
140 | ||
7c0628b2 | 141 | if (progress && !blocking) { |
bcdc1857 | 142 | return true; |
bafbd6a1 | 143 | } |
8febfa26 | 144 | |
a915f4bc | 145 | ctx->walking_handlers++; |
a76bab49 | 146 | |
9eb0bfca PB |
147 | /* fill fd sets */ |
148 | busy = false; | |
f42b2207 | 149 | count = 0; |
a915f4bc | 150 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
9eb0bfca PB |
151 | /* If there aren't pending AIO operations, don't invoke callbacks. |
152 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would | |
153 | * wait indefinitely. | |
154 | */ | |
4231c88d | 155 | if (!node->deleted && node->io_flush) { |
f42b2207 | 156 | if (node->io_flush(node->e) == 0) { |
9eb0bfca | 157 | continue; |
a76bab49 | 158 | } |
9eb0bfca PB |
159 | busy = true; |
160 | } | |
f42b2207 PB |
161 | if (!node->deleted && node->io_notify) { |
162 | events[count++] = event_notifier_get_handle(node->e); | |
9eb0bfca PB |
163 | } |
164 | } | |
a76bab49 | 165 | |
a915f4bc | 166 | ctx->walking_handlers--; |
a76bab49 | 167 | |
9eb0bfca PB |
168 | /* No AIO operations? Get us out of here */ |
169 | if (!busy) { | |
7c0628b2 | 170 | return progress; |
9eb0bfca | 171 | } |
a76bab49 | 172 | |
9eb0bfca | 173 | /* wait until next event */ |
f42b2207 PB |
174 | for (;;) { |
175 | int timeout = blocking ? INFINITE : 0; | |
176 | int ret = WaitForMultipleObjects(count, events, FALSE, timeout); | |
177 | ||
178 | /* if we have any signaled events, dispatch event */ | |
179 | if ((DWORD) (ret - WAIT_OBJECT_0) >= count) { | |
180 | break; | |
181 | } | |
182 | ||
183 | blocking = false; | |
9eb0bfca | 184 | |
9eb0bfca PB |
185 | /* we have to walk very carefully in case |
186 | * qemu_aio_set_fd_handler is called while we're walking */ | |
a915f4bc | 187 | node = QLIST_FIRST(&ctx->aio_handlers); |
9eb0bfca PB |
188 | while (node) { |
189 | AioHandler *tmp; | |
190 | ||
a915f4bc | 191 | ctx->walking_handlers++; |
2db2bfc0 | 192 | |
9eb0bfca | 193 | if (!node->deleted && |
f42b2207 PB |
194 | event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] && |
195 | node->io_notify) { | |
196 | node->io_notify(node->e); | |
cd9ba1eb | 197 | progress = true; |
a76bab49 AL |
198 | } |
199 | ||
9eb0bfca PB |
200 | tmp = node; |
201 | node = QLIST_NEXT(node, node); | |
202 | ||
a915f4bc | 203 | ctx->walking_handlers--; |
2db2bfc0 | 204 | |
a915f4bc | 205 | if (!ctx->walking_handlers && tmp->deleted) { |
9eb0bfca PB |
206 | QLIST_REMOVE(tmp, node); |
207 | g_free(tmp); | |
208 | } | |
a76bab49 | 209 | } |
9eb0bfca | 210 | } |
bcdc1857 | 211 | |
7c0628b2 | 212 | return progress; |
a76bab49 | 213 | } |