]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Linux native AIO support. | |
3 | * | |
4 | * Copyright (C) 2009 IBM, Corp. | |
5 | * Copyright (C) 2009 Red Hat, Inc. | |
6 | * | |
7 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
8 | * See the COPYING file in the top-level directory. | |
9 | */ | |
10 | #include "qemu/osdep.h" | |
11 | #include "qemu-common.h" | |
12 | #include "block/aio.h" | |
13 | #include "qemu/queue.h" | |
14 | #include "block/block.h" | |
15 | #include "block/raw-aio.h" | |
16 | #include "qemu/event_notifier.h" | |
17 | #include "qemu/coroutine.h" | |
18 | ||
19 | #include <libaio.h> | |
20 | ||
21 | /* | |
22 | * Queue size (per-device). | |
23 | * | |
24 | * XXX: eventually we need to communicate this to the guest and/or make it | |
25 | * tunable by the guest. If we get more outstanding requests at a time | |
26 | * than this we will get EAGAIN from io_submit which is communicated to | |
27 | * the guest as an I/O error. | |
28 | */ | |
29 | #define MAX_EVENTS 128 | |
30 | ||
31 | struct qemu_laiocb { | |
32 | BlockAIOCB common; | |
33 | Coroutine *co; | |
34 | LinuxAioState *ctx; | |
35 | struct iocb iocb; | |
36 | ssize_t ret; | |
37 | size_t nbytes; | |
38 | QEMUIOVector *qiov; | |
39 | bool is_read; | |
40 | QSIMPLEQ_ENTRY(qemu_laiocb) next; | |
41 | }; | |
42 | ||
43 | typedef struct { | |
44 | int plugged; | |
45 | unsigned int in_queue; | |
46 | unsigned int in_flight; | |
47 | bool blocked; | |
48 | QSIMPLEQ_HEAD(, qemu_laiocb) pending; | |
49 | } LaioQueue; | |
50 | ||
51 | struct LinuxAioState { | |
52 | AioContext *aio_context; | |
53 | ||
54 | io_context_t ctx; | |
55 | EventNotifier e; | |
56 | ||
57 | /* io queue for submit at batch. Protected by AioContext lock. */ | |
58 | LaioQueue io_q; | |
59 | ||
60 | /* I/O completion processing. Only runs in I/O thread. */ | |
61 | QEMUBH *completion_bh; | |
62 | int event_idx; | |
63 | int event_max; | |
64 | }; | |
65 | ||
66 | static void ioq_submit(LinuxAioState *s); | |
67 | ||
68 | static inline ssize_t io_event_ret(struct io_event *ev) | |
69 | { | |
70 | return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res); | |
71 | } | |
72 | ||
73 | /* | |
74 | * Completes an AIO request (calls the callback and frees the ACB). | |
75 | */ | |
76 | static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | |
77 | { | |
78 | int ret; | |
79 | ||
80 | ret = laiocb->ret; | |
81 | if (ret != -ECANCELED) { | |
82 | if (ret == laiocb->nbytes) { | |
83 | ret = 0; | |
84 | } else if (ret >= 0) { | |
85 | /* Short reads mean EOF, pad with zeros. */ | |
86 | if (laiocb->is_read) { | |
87 | qemu_iovec_memset(laiocb->qiov, ret, 0, | |
88 | laiocb->qiov->size - ret); | |
89 | } else { | |
90 | ret = -ENOSPC; | |
91 | } | |
92 | } | |
93 | } | |
94 | ||
95 | laiocb->ret = ret; | |
96 | if (laiocb->co) { | |
97 | /* If the coroutine is already entered it must be in ioq_submit() and | |
98 | * will notice laio->ret has been filled in when it eventually runs | |
99 | * later. Coroutines cannot be entered recursively so avoid doing | |
100 | * that! | |
101 | */ | |
102 | if (!qemu_coroutine_entered(laiocb->co)) { | |
103 | aio_co_wake(laiocb->co); | |
104 | } | |
105 | } else { | |
106 | laiocb->common.cb(laiocb->common.opaque, ret); | |
107 | qemu_aio_unref(laiocb); | |
108 | } | |
109 | } | |
110 | ||
111 | /** | |
112 | * aio_ring buffer which is shared between userspace and kernel. | |
113 | * | |
114 | * This copied from linux/fs/aio.c, common header does not exist | |
115 | * but AIO exists for ages so we assume ABI is stable. | |
116 | */ | |
117 | struct aio_ring { | |
118 | unsigned id; /* kernel internal index number */ | |
119 | unsigned nr; /* number of io_events */ | |
120 | unsigned head; /* Written to by userland or by kernel. */ | |
121 | unsigned tail; | |
122 | ||
123 | unsigned magic; | |
124 | unsigned compat_features; | |
125 | unsigned incompat_features; | |
126 | unsigned header_length; /* size of aio_ring */ | |
127 | ||
128 | struct io_event io_events[0]; | |
129 | }; | |
130 | ||
131 | /** | |
132 | * io_getevents_peek: | |
133 | * @ctx: AIO context | |
134 | * @events: pointer on events array, output value | |
135 | ||
136 | * Returns the number of completed events and sets a pointer | |
137 | * on events array. This function does not update the internal | |
138 | * ring buffer, only reads head and tail. When @events has been | |
139 | * processed io_getevents_commit() must be called. | |
140 | */ | |
141 | static inline unsigned int io_getevents_peek(io_context_t ctx, | |
142 | struct io_event **events) | |
143 | { | |
144 | struct aio_ring *ring = (struct aio_ring *)ctx; | |
145 | unsigned int head = ring->head, tail = ring->tail; | |
146 | unsigned int nr; | |
147 | ||
148 | nr = tail >= head ? tail - head : ring->nr - head; | |
149 | *events = ring->io_events + head; | |
150 | /* To avoid speculative loads of s->events[i] before observing tail. | |
151 | Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */ | |
152 | smp_rmb(); | |
153 | ||
154 | return nr; | |
155 | } | |
156 | ||
157 | /** | |
158 | * io_getevents_commit: | |
159 | * @ctx: AIO context | |
160 | * @nr: the number of events on which head should be advanced | |
161 | * | |
162 | * Advances head of a ring buffer. | |
163 | */ | |
164 | static inline void io_getevents_commit(io_context_t ctx, unsigned int nr) | |
165 | { | |
166 | struct aio_ring *ring = (struct aio_ring *)ctx; | |
167 | ||
168 | if (nr) { | |
169 | ring->head = (ring->head + nr) % ring->nr; | |
170 | } | |
171 | } | |
172 | ||
173 | /** | |
174 | * io_getevents_advance_and_peek: | |
175 | * @ctx: AIO context | |
176 | * @events: pointer on events array, output value | |
177 | * @nr: the number of events on which head should be advanced | |
178 | * | |
179 | * Advances head of a ring buffer and returns number of elements left. | |
180 | */ | |
181 | static inline unsigned int | |
182 | io_getevents_advance_and_peek(io_context_t ctx, | |
183 | struct io_event **events, | |
184 | unsigned int nr) | |
185 | { | |
186 | io_getevents_commit(ctx, nr); | |
187 | return io_getevents_peek(ctx, events); | |
188 | } | |
189 | ||
190 | /** | |
191 | * qemu_laio_process_completions: | |
192 | * @s: AIO state | |
193 | * | |
194 | * Fetches completed I/O requests and invokes their callbacks. | |
195 | * | |
196 | * The function is somewhat tricky because it supports nested event loops, for | |
197 | * example when a request callback invokes aio_poll(). In order to do this, | |
198 | * indices are kept in LinuxAioState. Function schedules BH completion so it | |
199 | * can be called again in a nested event loop. When there are no events left | |
200 | * to complete the BH is being canceled. | |
201 | */ | |
202 | static void qemu_laio_process_completions(LinuxAioState *s) | |
203 | { | |
204 | struct io_event *events; | |
205 | ||
206 | /* Reschedule so nested event loops see currently pending completions */ | |
207 | qemu_bh_schedule(s->completion_bh); | |
208 | ||
209 | while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events, | |
210 | s->event_idx))) { | |
211 | for (s->event_idx = 0; s->event_idx < s->event_max; ) { | |
212 | struct iocb *iocb = events[s->event_idx].obj; | |
213 | struct qemu_laiocb *laiocb = | |
214 | container_of(iocb, struct qemu_laiocb, iocb); | |
215 | ||
216 | laiocb->ret = io_event_ret(&events[s->event_idx]); | |
217 | ||
218 | /* Change counters one-by-one because we can be nested. */ | |
219 | s->io_q.in_flight--; | |
220 | s->event_idx++; | |
221 | qemu_laio_process_completion(laiocb); | |
222 | } | |
223 | } | |
224 | ||
225 | qemu_bh_cancel(s->completion_bh); | |
226 | ||
227 | /* If we are nested we have to notify the level above that we are done | |
228 | * by setting event_max to zero, upper level will then jump out of it's | |
229 | * own `for` loop. If we are the last all counters droped to zero. */ | |
230 | s->event_max = 0; | |
231 | s->event_idx = 0; | |
232 | } | |
233 | ||
234 | static void qemu_laio_process_completions_and_submit(LinuxAioState *s) | |
235 | { | |
236 | qemu_laio_process_completions(s); | |
237 | ||
238 | aio_context_acquire(s->aio_context); | |
239 | if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { | |
240 | ioq_submit(s); | |
241 | } | |
242 | aio_context_release(s->aio_context); | |
243 | } | |
244 | ||
245 | static void qemu_laio_completion_bh(void *opaque) | |
246 | { | |
247 | LinuxAioState *s = opaque; | |
248 | ||
249 | qemu_laio_process_completions_and_submit(s); | |
250 | } | |
251 | ||
252 | static void qemu_laio_completion_cb(EventNotifier *e) | |
253 | { | |
254 | LinuxAioState *s = container_of(e, LinuxAioState, e); | |
255 | ||
256 | if (event_notifier_test_and_clear(&s->e)) { | |
257 | qemu_laio_process_completions_and_submit(s); | |
258 | } | |
259 | } | |
260 | ||
261 | static bool qemu_laio_poll_cb(void *opaque) | |
262 | { | |
263 | EventNotifier *e = opaque; | |
264 | LinuxAioState *s = container_of(e, LinuxAioState, e); | |
265 | struct io_event *events; | |
266 | ||
267 | if (!io_getevents_peek(s->ctx, &events)) { | |
268 | return false; | |
269 | } | |
270 | ||
271 | qemu_laio_process_completions_and_submit(s); | |
272 | return true; | |
273 | } | |
274 | ||
275 | static void laio_cancel(BlockAIOCB *blockacb) | |
276 | { | |
277 | struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb; | |
278 | struct io_event event; | |
279 | int ret; | |
280 | ||
281 | if (laiocb->ret != -EINPROGRESS) { | |
282 | return; | |
283 | } | |
284 | ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event); | |
285 | laiocb->ret = -ECANCELED; | |
286 | if (ret != 0) { | |
287 | /* iocb is not cancelled, cb will be called by the event loop later */ | |
288 | return; | |
289 | } | |
290 | ||
291 | laiocb->common.cb(laiocb->common.opaque, laiocb->ret); | |
292 | } | |
293 | ||
294 | static const AIOCBInfo laio_aiocb_info = { | |
295 | .aiocb_size = sizeof(struct qemu_laiocb), | |
296 | .cancel_async = laio_cancel, | |
297 | }; | |
298 | ||
299 | static void ioq_init(LaioQueue *io_q) | |
300 | { | |
301 | QSIMPLEQ_INIT(&io_q->pending); | |
302 | io_q->plugged = 0; | |
303 | io_q->in_queue = 0; | |
304 | io_q->in_flight = 0; | |
305 | io_q->blocked = false; | |
306 | } | |
307 | ||
308 | static void ioq_submit(LinuxAioState *s) | |
309 | { | |
310 | int ret, len; | |
311 | struct qemu_laiocb *aiocb; | |
312 | struct iocb *iocbs[MAX_EVENTS]; | |
313 | QSIMPLEQ_HEAD(, qemu_laiocb) completed; | |
314 | ||
315 | do { | |
316 | if (s->io_q.in_flight >= MAX_EVENTS) { | |
317 | break; | |
318 | } | |
319 | len = 0; | |
320 | QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) { | |
321 | iocbs[len++] = &aiocb->iocb; | |
322 | if (s->io_q.in_flight + len >= MAX_EVENTS) { | |
323 | break; | |
324 | } | |
325 | } | |
326 | ||
327 | ret = io_submit(s->ctx, len, iocbs); | |
328 | if (ret == -EAGAIN) { | |
329 | break; | |
330 | } | |
331 | if (ret < 0) { | |
332 | /* Fail the first request, retry the rest */ | |
333 | aiocb = QSIMPLEQ_FIRST(&s->io_q.pending); | |
334 | QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next); | |
335 | s->io_q.in_queue--; | |
336 | aiocb->ret = ret; | |
337 | qemu_laio_process_completion(aiocb); | |
338 | continue; | |
339 | } | |
340 | ||
341 | s->io_q.in_flight += ret; | |
342 | s->io_q.in_queue -= ret; | |
343 | aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb); | |
344 | QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed); | |
345 | } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending)); | |
346 | s->io_q.blocked = (s->io_q.in_queue > 0); | |
347 | ||
348 | if (s->io_q.in_flight) { | |
349 | /* We can try to complete something just right away if there are | |
350 | * still requests in-flight. */ | |
351 | qemu_laio_process_completions(s); | |
352 | /* | |
353 | * Even we have completed everything (in_flight == 0), the queue can | |
354 | * have still pended requests (in_queue > 0). We do not attempt to | |
355 | * repeat submission to avoid IO hang. The reason is simple: s->e is | |
356 | * still set and completion callback will be called shortly and all | |
357 | * pended requests will be submitted from there. | |
358 | */ | |
359 | } | |
360 | } | |
361 | ||
362 | void laio_io_plug(BlockDriverState *bs, LinuxAioState *s) | |
363 | { | |
364 | s->io_q.plugged++; | |
365 | } | |
366 | ||
367 | void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s) | |
368 | { | |
369 | assert(s->io_q.plugged); | |
370 | if (--s->io_q.plugged == 0 && | |
371 | !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { | |
372 | ioq_submit(s); | |
373 | } | |
374 | } | |
375 | ||
376 | static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, | |
377 | int type) | |
378 | { | |
379 | LinuxAioState *s = laiocb->ctx; | |
380 | struct iocb *iocbs = &laiocb->iocb; | |
381 | QEMUIOVector *qiov = laiocb->qiov; | |
382 | ||
383 | switch (type) { | |
384 | case QEMU_AIO_WRITE: | |
385 | io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); | |
386 | break; | |
387 | case QEMU_AIO_READ: | |
388 | io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); | |
389 | break; | |
390 | /* Currently Linux kernel does not support other operations */ | |
391 | default: | |
392 | fprintf(stderr, "%s: invalid AIO request type 0x%x.\n", | |
393 | __func__, type); | |
394 | return -EIO; | |
395 | } | |
396 | io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e)); | |
397 | ||
398 | QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next); | |
399 | s->io_q.in_queue++; | |
400 | if (!s->io_q.blocked && | |
401 | (!s->io_q.plugged || | |
402 | s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) { | |
403 | ioq_submit(s); | |
404 | } | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
409 | int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, | |
410 | uint64_t offset, QEMUIOVector *qiov, int type) | |
411 | { | |
412 | int ret; | |
413 | struct qemu_laiocb laiocb = { | |
414 | .co = qemu_coroutine_self(), | |
415 | .nbytes = qiov->size, | |
416 | .ctx = s, | |
417 | .ret = -EINPROGRESS, | |
418 | .is_read = (type == QEMU_AIO_READ), | |
419 | .qiov = qiov, | |
420 | }; | |
421 | ||
422 | ret = laio_do_submit(fd, &laiocb, offset, type); | |
423 | if (ret < 0) { | |
424 | return ret; | |
425 | } | |
426 | ||
427 | if (laiocb.ret == -EINPROGRESS) { | |
428 | qemu_coroutine_yield(); | |
429 | } | |
430 | return laiocb.ret; | |
431 | } | |
432 | ||
433 | BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd, | |
434 | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, | |
435 | BlockCompletionFunc *cb, void *opaque, int type) | |
436 | { | |
437 | struct qemu_laiocb *laiocb; | |
438 | off_t offset = sector_num * BDRV_SECTOR_SIZE; | |
439 | int ret; | |
440 | ||
441 | laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque); | |
442 | laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE; | |
443 | laiocb->ctx = s; | |
444 | laiocb->ret = -EINPROGRESS; | |
445 | laiocb->is_read = (type == QEMU_AIO_READ); | |
446 | laiocb->qiov = qiov; | |
447 | ||
448 | ret = laio_do_submit(fd, laiocb, offset, type); | |
449 | if (ret < 0) { | |
450 | qemu_aio_unref(laiocb); | |
451 | return NULL; | |
452 | } | |
453 | ||
454 | return &laiocb->common; | |
455 | } | |
456 | ||
457 | void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) | |
458 | { | |
459 | aio_set_event_notifier(old_context, &s->e, false, NULL, NULL); | |
460 | qemu_bh_delete(s->completion_bh); | |
461 | s->aio_context = NULL; | |
462 | } | |
463 | ||
464 | void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) | |
465 | { | |
466 | s->aio_context = new_context; | |
467 | s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); | |
468 | aio_set_event_notifier(new_context, &s->e, false, | |
469 | qemu_laio_completion_cb, | |
470 | qemu_laio_poll_cb); | |
471 | } | |
472 | ||
473 | LinuxAioState *laio_init(void) | |
474 | { | |
475 | LinuxAioState *s; | |
476 | ||
477 | s = g_malloc0(sizeof(*s)); | |
478 | if (event_notifier_init(&s->e, false) < 0) { | |
479 | goto out_free_state; | |
480 | } | |
481 | ||
482 | if (io_setup(MAX_EVENTS, &s->ctx) != 0) { | |
483 | goto out_close_efd; | |
484 | } | |
485 | ||
486 | ioq_init(&s->io_q); | |
487 | ||
488 | return s; | |
489 | ||
490 | out_close_efd: | |
491 | event_notifier_cleanup(&s->e); | |
492 | out_free_state: | |
493 | g_free(s); | |
494 | return NULL; | |
495 | } | |
496 | ||
497 | void laio_cleanup(LinuxAioState *s) | |
498 | { | |
499 | event_notifier_cleanup(&s->e); | |
500 | ||
501 | if (io_destroy(s->ctx) != 0) { | |
502 | fprintf(stderr, "%s: destroy AIO context %p failed\n", | |
503 | __func__, &s->ctx); | |
504 | } | |
505 | g_free(s); | |
506 | } |