]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "qapi/error.h" | |
27 | #include "qemu-common.h" | |
28 | #include "block/aio.h" | |
29 | #include "block/thread-pool.h" | |
30 | #include "qemu/main-loop.h" | |
31 | #include "qemu/atomic.h" | |
32 | #include "block/raw-aio.h" | |
33 | ||
34 | /***********************************************************/ | |
35 | /* bottom halves (can be seen as timers which expire ASAP) */ | |
36 | ||
37 | struct QEMUBH { | |
38 | AioContext *ctx; | |
39 | QEMUBHFunc *cb; | |
40 | void *opaque; | |
41 | QEMUBH *next; | |
42 | bool scheduled; | |
43 | bool idle; | |
44 | bool deleted; | |
45 | }; | |
46 | ||
47 | void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) | |
48 | { | |
49 | QEMUBH *bh; | |
50 | bh = g_new(QEMUBH, 1); | |
51 | *bh = (QEMUBH){ | |
52 | .ctx = ctx, | |
53 | .cb = cb, | |
54 | .opaque = opaque, | |
55 | }; | |
56 | qemu_mutex_lock(&ctx->bh_lock); | |
57 | bh->next = ctx->first_bh; | |
58 | bh->scheduled = 1; | |
59 | bh->deleted = 1; | |
60 | /* Make sure that the members are ready before putting bh into list */ | |
61 | smp_wmb(); | |
62 | ctx->first_bh = bh; | |
63 | qemu_mutex_unlock(&ctx->bh_lock); | |
64 | aio_notify(ctx); | |
65 | } | |
66 | ||
67 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) | |
68 | { | |
69 | QEMUBH *bh; | |
70 | bh = g_new(QEMUBH, 1); | |
71 | *bh = (QEMUBH){ | |
72 | .ctx = ctx, | |
73 | .cb = cb, | |
74 | .opaque = opaque, | |
75 | }; | |
76 | qemu_mutex_lock(&ctx->bh_lock); | |
77 | bh->next = ctx->first_bh; | |
78 | /* Make sure that the members are ready before putting bh into list */ | |
79 | smp_wmb(); | |
80 | ctx->first_bh = bh; | |
81 | qemu_mutex_unlock(&ctx->bh_lock); | |
82 | return bh; | |
83 | } | |
84 | ||
85 | void aio_bh_call(QEMUBH *bh) | |
86 | { | |
87 | bh->cb(bh->opaque); | |
88 | } | |
89 | ||
90 | /* Multiple occurrences of aio_bh_poll cannot be called concurrently */ | |
91 | int aio_bh_poll(AioContext *ctx) | |
92 | { | |
93 | QEMUBH *bh, **bhp, *next; | |
94 | int ret; | |
95 | ||
96 | ctx->walking_bh++; | |
97 | ||
98 | ret = 0; | |
99 | for (bh = ctx->first_bh; bh; bh = next) { | |
100 | /* Make sure that fetching bh happens before accessing its members */ | |
101 | smp_read_barrier_depends(); | |
102 | next = bh->next; | |
103 | /* The atomic_xchg is paired with the one in qemu_bh_schedule. The | |
104 | * implicit memory barrier ensures that the callback sees all writes | |
105 | * done by the scheduling thread. It also ensures that the scheduling | |
106 | * thread sees the zero before bh->cb has run, and thus will call | |
107 | * aio_notify again if necessary. | |
108 | */ | |
109 | if (atomic_xchg(&bh->scheduled, 0)) { | |
110 | /* Idle BHs don't count as progress */ | |
111 | if (!bh->idle) { | |
112 | ret = 1; | |
113 | } | |
114 | bh->idle = 0; | |
115 | aio_bh_call(bh); | |
116 | } | |
117 | } | |
118 | ||
119 | ctx->walking_bh--; | |
120 | ||
121 | /* remove deleted bhs */ | |
122 | if (!ctx->walking_bh) { | |
123 | qemu_mutex_lock(&ctx->bh_lock); | |
124 | bhp = &ctx->first_bh; | |
125 | while (*bhp) { | |
126 | bh = *bhp; | |
127 | if (bh->deleted && !bh->scheduled) { | |
128 | *bhp = bh->next; | |
129 | g_free(bh); | |
130 | } else { | |
131 | bhp = &bh->next; | |
132 | } | |
133 | } | |
134 | qemu_mutex_unlock(&ctx->bh_lock); | |
135 | } | |
136 | ||
137 | return ret; | |
138 | } | |
139 | ||
140 | void qemu_bh_schedule_idle(QEMUBH *bh) | |
141 | { | |
142 | bh->idle = 1; | |
143 | /* Make sure that idle & any writes needed by the callback are done | |
144 | * before the locations are read in the aio_bh_poll. | |
145 | */ | |
146 | atomic_mb_set(&bh->scheduled, 1); | |
147 | } | |
148 | ||
149 | void qemu_bh_schedule(QEMUBH *bh) | |
150 | { | |
151 | AioContext *ctx; | |
152 | ||
153 | ctx = bh->ctx; | |
154 | bh->idle = 0; | |
155 | /* The memory barrier implicit in atomic_xchg makes sure that: | |
156 | * 1. idle & any writes needed by the callback are done before the | |
157 | * locations are read in the aio_bh_poll. | |
158 | * 2. ctx is loaded before scheduled is set and the callback has a chance | |
159 | * to execute. | |
160 | */ | |
161 | if (atomic_xchg(&bh->scheduled, 1) == 0) { | |
162 | aio_notify(ctx); | |
163 | } | |
164 | } | |
165 | ||
166 | ||
167 | /* This func is async. | |
168 | */ | |
169 | void qemu_bh_cancel(QEMUBH *bh) | |
170 | { | |
171 | bh->scheduled = 0; | |
172 | } | |
173 | ||
174 | /* This func is async.The bottom half will do the delete action at the finial | |
175 | * end. | |
176 | */ | |
177 | void qemu_bh_delete(QEMUBH *bh) | |
178 | { | |
179 | bh->scheduled = 0; | |
180 | bh->deleted = 1; | |
181 | } | |
182 | ||
183 | int64_t | |
184 | aio_compute_timeout(AioContext *ctx) | |
185 | { | |
186 | int64_t deadline; | |
187 | int timeout = -1; | |
188 | QEMUBH *bh; | |
189 | ||
190 | for (bh = ctx->first_bh; bh; bh = bh->next) { | |
191 | if (bh->scheduled) { | |
192 | if (bh->idle) { | |
193 | /* idle bottom halves will be polled at least | |
194 | * every 10ms */ | |
195 | timeout = 10000000; | |
196 | } else { | |
197 | /* non-idle bottom halves will be executed | |
198 | * immediately */ | |
199 | return 0; | |
200 | } | |
201 | } | |
202 | } | |
203 | ||
204 | deadline = timerlistgroup_deadline_ns(&ctx->tlg); | |
205 | if (deadline == 0) { | |
206 | return 0; | |
207 | } else { | |
208 | return qemu_soonest_timeout(timeout, deadline); | |
209 | } | |
210 | } | |
211 | ||
212 | static gboolean | |
213 | aio_ctx_prepare(GSource *source, gint *timeout) | |
214 | { | |
215 | AioContext *ctx = (AioContext *) source; | |
216 | ||
217 | atomic_or(&ctx->notify_me, 1); | |
218 | ||
219 | /* We assume there is no timeout already supplied */ | |
220 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); | |
221 | ||
222 | if (aio_prepare(ctx)) { | |
223 | *timeout = 0; | |
224 | } | |
225 | ||
226 | return *timeout == 0; | |
227 | } | |
228 | ||
229 | static gboolean | |
230 | aio_ctx_check(GSource *source) | |
231 | { | |
232 | AioContext *ctx = (AioContext *) source; | |
233 | QEMUBH *bh; | |
234 | ||
235 | atomic_and(&ctx->notify_me, ~1); | |
236 | aio_notify_accept(ctx); | |
237 | ||
238 | for (bh = ctx->first_bh; bh; bh = bh->next) { | |
239 | if (bh->scheduled) { | |
240 | return true; | |
241 | } | |
242 | } | |
243 | return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); | |
244 | } | |
245 | ||
246 | static gboolean | |
247 | aio_ctx_dispatch(GSource *source, | |
248 | GSourceFunc callback, | |
249 | gpointer user_data) | |
250 | { | |
251 | AioContext *ctx = (AioContext *) source; | |
252 | ||
253 | assert(callback == NULL); | |
254 | aio_dispatch(ctx, true); | |
255 | return true; | |
256 | } | |
257 | ||
258 | static void | |
259 | aio_ctx_finalize(GSource *source) | |
260 | { | |
261 | AioContext *ctx = (AioContext *) source; | |
262 | ||
263 | thread_pool_free(ctx->thread_pool); | |
264 | ||
265 | #ifdef CONFIG_LINUX_AIO | |
266 | if (ctx->linux_aio) { | |
267 | laio_detach_aio_context(ctx->linux_aio, ctx); | |
268 | laio_cleanup(ctx->linux_aio); | |
269 | ctx->linux_aio = NULL; | |
270 | } | |
271 | #endif | |
272 | ||
273 | qemu_mutex_lock(&ctx->bh_lock); | |
274 | while (ctx->first_bh) { | |
275 | QEMUBH *next = ctx->first_bh->next; | |
276 | ||
277 | /* qemu_bh_delete() must have been called on BHs in this AioContext */ | |
278 | assert(ctx->first_bh->deleted); | |
279 | ||
280 | g_free(ctx->first_bh); | |
281 | ctx->first_bh = next; | |
282 | } | |
283 | qemu_mutex_unlock(&ctx->bh_lock); | |
284 | ||
285 | aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL); | |
286 | event_notifier_cleanup(&ctx->notifier); | |
287 | qemu_rec_mutex_destroy(&ctx->lock); | |
288 | qemu_mutex_destroy(&ctx->bh_lock); | |
289 | timerlistgroup_deinit(&ctx->tlg); | |
290 | } | |
291 | ||
292 | static GSourceFuncs aio_source_funcs = { | |
293 | aio_ctx_prepare, | |
294 | aio_ctx_check, | |
295 | aio_ctx_dispatch, | |
296 | aio_ctx_finalize | |
297 | }; | |
298 | ||
299 | GSource *aio_get_g_source(AioContext *ctx) | |
300 | { | |
301 | g_source_ref(&ctx->source); | |
302 | return &ctx->source; | |
303 | } | |
304 | ||
305 | ThreadPool *aio_get_thread_pool(AioContext *ctx) | |
306 | { | |
307 | if (!ctx->thread_pool) { | |
308 | ctx->thread_pool = thread_pool_new(ctx); | |
309 | } | |
310 | return ctx->thread_pool; | |
311 | } | |
312 | ||
313 | #ifdef CONFIG_LINUX_AIO | |
314 | LinuxAioState *aio_get_linux_aio(AioContext *ctx) | |
315 | { | |
316 | if (!ctx->linux_aio) { | |
317 | ctx->linux_aio = laio_init(); | |
318 | laio_attach_aio_context(ctx->linux_aio, ctx); | |
319 | } | |
320 | return ctx->linux_aio; | |
321 | } | |
322 | #endif | |
323 | ||
324 | void aio_notify(AioContext *ctx) | |
325 | { | |
326 | /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs | |
327 | * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. | |
328 | */ | |
329 | smp_mb(); | |
330 | if (ctx->notify_me) { | |
331 | event_notifier_set(&ctx->notifier); | |
332 | atomic_mb_set(&ctx->notified, true); | |
333 | } | |
334 | } | |
335 | ||
336 | void aio_notify_accept(AioContext *ctx) | |
337 | { | |
338 | if (atomic_xchg(&ctx->notified, false)) { | |
339 | event_notifier_test_and_clear(&ctx->notifier); | |
340 | } | |
341 | } | |
342 | ||
343 | static void aio_timerlist_notify(void *opaque) | |
344 | { | |
345 | aio_notify(opaque); | |
346 | } | |
347 | ||
348 | static void event_notifier_dummy_cb(EventNotifier *e) | |
349 | { | |
350 | } | |
351 | ||
352 | /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ | |
353 | static bool event_notifier_poll(void *opaque) | |
354 | { | |
355 | EventNotifier *e = opaque; | |
356 | AioContext *ctx = container_of(e, AioContext, notifier); | |
357 | ||
358 | return atomic_read(&ctx->notified); | |
359 | } | |
360 | ||
361 | AioContext *aio_context_new(Error **errp) | |
362 | { | |
363 | int ret; | |
364 | AioContext *ctx; | |
365 | ||
366 | ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); | |
367 | aio_context_setup(ctx); | |
368 | ||
369 | ret = event_notifier_init(&ctx->notifier, false); | |
370 | if (ret < 0) { | |
371 | error_setg_errno(errp, -ret, "Failed to initialize event notifier"); | |
372 | goto fail; | |
373 | } | |
374 | g_source_set_can_recurse(&ctx->source, true); | |
375 | aio_set_event_notifier(ctx, &ctx->notifier, | |
376 | false, | |
377 | (EventNotifierHandler *) | |
378 | event_notifier_dummy_cb, | |
379 | event_notifier_poll); | |
380 | #ifdef CONFIG_LINUX_AIO | |
381 | ctx->linux_aio = NULL; | |
382 | #endif | |
383 | ctx->thread_pool = NULL; | |
384 | qemu_mutex_init(&ctx->bh_lock); | |
385 | qemu_rec_mutex_init(&ctx->lock); | |
386 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); | |
387 | ||
388 | ctx->poll_ns = 0; | |
389 | ctx->poll_max_ns = 0; | |
390 | ctx->poll_grow = 0; | |
391 | ctx->poll_shrink = 0; | |
392 | ||
393 | return ctx; | |
394 | fail: | |
395 | g_source_destroy(&ctx->source); | |
396 | return NULL; | |
397 | } | |
398 | ||
399 | void aio_context_ref(AioContext *ctx) | |
400 | { | |
401 | g_source_ref(&ctx->source); | |
402 | } | |
403 | ||
404 | void aio_context_unref(AioContext *ctx) | |
405 | { | |
406 | g_source_unref(&ctx->source); | |
407 | } | |
408 | ||
409 | void aio_context_acquire(AioContext *ctx) | |
410 | { | |
411 | qemu_rec_mutex_lock(&ctx->lock); | |
412 | } | |
413 | ||
414 | void aio_context_release(AioContext *ctx) | |
415 | { | |
416 | qemu_rec_mutex_unlock(&ctx->lock); | |
417 | } |