]>
Commit | Line | Data |
---|---|---|
3c529d93 AL |
1 | /* |
2 | * QEMU posix-aio emulation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
6b620ca3 PB |
12 | * Contributions after 2012-01-13 are licensed under the terms of the |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
3c529d93 AL |
14 | */ |
15 | ||
221f715d | 16 | #include <sys/ioctl.h> |
9ef91a67 | 17 | #include <sys/types.h> |
3c529d93 AL |
18 | #include <pthread.h> |
19 | #include <unistd.h> | |
20 | #include <errno.h> | |
30525aff | 21 | #include <time.h> |
8653c015 | 22 | #include <string.h> |
23 | #include <stdlib.h> | |
24 | #include <stdio.h> | |
9ef91a67 | 25 | |
72cf2d4f | 26 | #include "qemu-queue.h" |
3c529d93 | 27 | #include "osdep.h" |
dc786bc9 | 28 | #include "sysemu.h" |
f141eafe | 29 | #include "qemu-common.h" |
6d519a5f | 30 | #include "trace.h" |
9ef91a67 | 31 | #include "block_int.h" |
3d9b4925 | 32 | #include "iov.h" |
9ef91a67 CH |
33 | |
34 | #include "block/raw-posix-aio.h" | |
35 | ||
e4ea78ee | 36 | static void do_spawn_thread(void); |
9ef91a67 CH |
37 | |
38 | struct qemu_paiocb { | |
39 | BlockDriverAIOCB common; | |
40 | int aio_fildes; | |
41 | union { | |
42 | struct iovec *aio_iov; | |
b587a52c | 43 | void *aio_ioctl_buf; |
9ef91a67 CH |
44 | }; |
45 | int aio_niov; | |
46 | size_t aio_nbytes; | |
47 | #define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */ | |
9ef91a67 CH |
48 | off_t aio_offset; |
49 | ||
72cf2d4f | 50 | QTAILQ_ENTRY(qemu_paiocb) node; |
9ef91a67 CH |
51 | int aio_type; |
52 | ssize_t ret; | |
53 | int active; | |
54 | struct qemu_paiocb *next; | |
55 | }; | |
56 | ||
57 | typedef struct PosixAioState { | |
58 | int rfd, wfd; | |
59 | struct qemu_paiocb *first_aio; | |
60 | } PosixAioState; | |
3c529d93 | 61 | |
3c529d93 AL |
62 | |
63 | static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; | |
64 | static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; | |
65 | static pthread_t thread_id; | |
a8227a5a | 66 | static pthread_attr_t attr; |
3c529d93 AL |
67 | static int max_threads = 64; |
68 | static int cur_threads = 0; | |
69 | static int idle_threads = 0; | |
e4ea78ee AK |
70 | static int new_threads = 0; /* backlog of threads we need to create */ |
71 | static int pending_threads = 0; /* threads created but not running yet */ | |
72 | static QEMUBH *new_thread_bh; | |
72cf2d4f | 73 | static QTAILQ_HEAD(, qemu_paiocb) request_list; |
3c529d93 | 74 | |
2341f9a1 | 75 | #ifdef CONFIG_PREADV |
ceb42de8 AL |
76 | static int preadv_present = 1; |
77 | #else | |
78 | static int preadv_present = 0; | |
79 | #endif | |
80 | ||
8653c015 | 81 | static void die2(int err, const char *what) |
82 | { | |
83 | fprintf(stderr, "%s failed: %s\n", what, strerror(err)); | |
84 | abort(); | |
85 | } | |
86 | ||
87 | static void die(const char *what) | |
88 | { | |
89 | die2(errno, what); | |
90 | } | |
91 | ||
92 | static void mutex_lock(pthread_mutex_t *mutex) | |
93 | { | |
94 | int ret = pthread_mutex_lock(mutex); | |
95 | if (ret) die2(ret, "pthread_mutex_lock"); | |
96 | } | |
97 | ||
98 | static void mutex_unlock(pthread_mutex_t *mutex) | |
99 | { | |
100 | int ret = pthread_mutex_unlock(mutex); | |
101 | if (ret) die2(ret, "pthread_mutex_unlock"); | |
102 | } | |
103 | ||
104 | static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, | |
105 | struct timespec *ts) | |
106 | { | |
107 | int ret = pthread_cond_timedwait(cond, mutex, ts); | |
108 | if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait"); | |
109 | return ret; | |
110 | } | |
111 | ||
5d47e372 | 112 | static void cond_signal(pthread_cond_t *cond) |
8653c015 | 113 | { |
5d47e372 | 114 | int ret = pthread_cond_signal(cond); |
115 | if (ret) die2(ret, "pthread_cond_signal"); | |
8653c015 | 116 | } |
117 | ||
118 | static void thread_create(pthread_t *thread, pthread_attr_t *attr, | |
119 | void *(*start_routine)(void*), void *arg) | |
120 | { | |
121 | int ret = pthread_create(thread, attr, start_routine, arg); | |
122 | if (ret) die2(ret, "pthread_create"); | |
123 | } | |
124 | ||
6769da29 | 125 | static ssize_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb) |
f141eafe | 126 | { |
b587a52c SH |
127 | int ret; |
128 | ||
129 | ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf); | |
130 | if (ret == -1) | |
131 | return -errno; | |
132 | ||
133 | /* | |
e7d81004 SW |
134 | * This looks weird, but the aio code only considers a request |
135 | * successful if it has written the full number of bytes. | |
b587a52c SH |
136 | * |
137 | * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command, | |
138 | * so in fact we return the ioctl command here to make posix_aio_read() | |
139 | * happy.. | |
140 | */ | |
141 | return aiocb->aio_nbytes; | |
f141eafe AL |
142 | } |
143 | ||
6769da29 | 144 | static ssize_t handle_aiocb_flush(struct qemu_paiocb *aiocb) |
b2e12bc6 CH |
145 | { |
146 | int ret; | |
147 | ||
47faadc6 | 148 | ret = qemu_fdatasync(aiocb->aio_fildes); |
b2e12bc6 CH |
149 | if (ret == -1) |
150 | return -errno; | |
151 | return 0; | |
152 | } | |
153 | ||
2341f9a1 | 154 | #ifdef CONFIG_PREADV |
ceb42de8 AL |
155 | |
156 | static ssize_t | |
157 | qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) | |
158 | { | |
159 | return preadv(fd, iov, nr_iov, offset); | |
160 | } | |
161 | ||
162 | static ssize_t | |
163 | qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) | |
164 | { | |
165 | return pwritev(fd, iov, nr_iov, offset); | |
166 | } | |
167 | ||
168 | #else | |
169 | ||
170 | static ssize_t | |
171 | qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) | |
172 | { | |
173 | return -ENOSYS; | |
174 | } | |
175 | ||
176 | static ssize_t | |
177 | qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) | |
178 | { | |
179 | return -ENOSYS; | |
180 | } | |
181 | ||
182 | #endif | |
183 | ||
6769da29 | 184 | static ssize_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb) |
ceb42de8 | 185 | { |
ceb42de8 AL |
186 | ssize_t len; |
187 | ||
188 | do { | |
9ef91a67 | 189 | if (aiocb->aio_type & QEMU_AIO_WRITE) |
ceb42de8 AL |
190 | len = qemu_pwritev(aiocb->aio_fildes, |
191 | aiocb->aio_iov, | |
192 | aiocb->aio_niov, | |
21cfa41e | 193 | aiocb->aio_offset); |
ceb42de8 AL |
194 | else |
195 | len = qemu_preadv(aiocb->aio_fildes, | |
196 | aiocb->aio_iov, | |
197 | aiocb->aio_niov, | |
21cfa41e | 198 | aiocb->aio_offset); |
ceb42de8 AL |
199 | } while (len == -1 && errno == EINTR); |
200 | ||
201 | if (len == -1) | |
202 | return -errno; | |
203 | return len; | |
204 | } | |
205 | ||
ba1d1afd KW |
206 | /* |
207 | * Read/writes the data to/from a given linear buffer. | |
208 | * | |
209 | * Returns the number of bytes handles or -errno in case of an error. Short | |
210 | * reads are only returned if the end of the file is reached. | |
211 | */ | |
6769da29 | 212 | static ssize_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf) |
221f715d | 213 | { |
6769da29 KW |
214 | ssize_t offset = 0; |
215 | ssize_t len; | |
221f715d AL |
216 | |
217 | while (offset < aiocb->aio_nbytes) { | |
9ef91a67 | 218 | if (aiocb->aio_type & QEMU_AIO_WRITE) |
f141eafe AL |
219 | len = pwrite(aiocb->aio_fildes, |
220 | (const char *)buf + offset, | |
221 | aiocb->aio_nbytes - offset, | |
222 | aiocb->aio_offset + offset); | |
223 | else | |
224 | len = pread(aiocb->aio_fildes, | |
225 | buf + offset, | |
221f715d AL |
226 | aiocb->aio_nbytes - offset, |
227 | aiocb->aio_offset + offset); | |
221f715d | 228 | |
f141eafe AL |
229 | if (len == -1 && errno == EINTR) |
230 | continue; | |
231 | else if (len == -1) { | |
232 | offset = -errno; | |
233 | break; | |
234 | } else if (len == 0) | |
235 | break; | |
236 | ||
237 | offset += len; | |
221f715d AL |
238 | } |
239 | ||
240 | return offset; | |
241 | } | |
242 | ||
6769da29 | 243 | static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb) |
221f715d | 244 | { |
6769da29 | 245 | ssize_t nbytes; |
f141eafe AL |
246 | char *buf; |
247 | ||
9ef91a67 | 248 | if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) { |
f141eafe AL |
249 | /* |
250 | * If there is just a single buffer, and it is properly aligned | |
251 | * we can just use plain pread/pwrite without any problems. | |
252 | */ | |
ceb42de8 AL |
253 | if (aiocb->aio_niov == 1) |
254 | return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base); | |
255 | ||
256 | /* | |
257 | * We have more than one iovec, and all are properly aligned. | |
258 | * | |
259 | * Try preadv/pwritev first and fall back to linearizing the | |
260 | * buffer if it's not supported. | |
261 | */ | |
b587a52c | 262 | if (preadv_present) { |
ceb42de8 AL |
263 | nbytes = handle_aiocb_rw_vector(aiocb); |
264 | if (nbytes == aiocb->aio_nbytes) | |
b587a52c | 265 | return nbytes; |
ceb42de8 AL |
266 | if (nbytes < 0 && nbytes != -ENOSYS) |
267 | return nbytes; | |
268 | preadv_present = 0; | |
269 | } | |
270 | ||
271 | /* | |
272 | * XXX(hch): short read/write. no easy way to handle the reminder | |
273 | * using these interfaces. For now retry using plain | |
274 | * pread/pwrite? | |
275 | */ | |
f141eafe | 276 | } |
221f715d | 277 | |
f141eafe AL |
278 | /* |
279 | * Ok, we have to do it the hard way, copy all segments into | |
280 | * a single aligned buffer. | |
281 | */ | |
72aef731 | 282 | buf = qemu_blockalign(aiocb->common.bs, aiocb->aio_nbytes); |
9ef91a67 | 283 | if (aiocb->aio_type & QEMU_AIO_WRITE) { |
f141eafe AL |
284 | char *p = buf; |
285 | int i; | |
286 | ||
287 | for (i = 0; i < aiocb->aio_niov; ++i) { | |
288 | memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len); | |
289 | p += aiocb->aio_iov[i].iov_len; | |
290 | } | |
291 | } | |
292 | ||
293 | nbytes = handle_aiocb_rw_linear(aiocb, buf); | |
9ef91a67 | 294 | if (!(aiocb->aio_type & QEMU_AIO_WRITE)) { |
f141eafe AL |
295 | char *p = buf; |
296 | size_t count = aiocb->aio_nbytes, copy; | |
297 | int i; | |
298 | ||
299 | for (i = 0; i < aiocb->aio_niov && count; ++i) { | |
300 | copy = count; | |
301 | if (copy > aiocb->aio_iov[i].iov_len) | |
302 | copy = aiocb->aio_iov[i].iov_len; | |
303 | memcpy(aiocb->aio_iov[i].iov_base, p, copy); | |
304 | p += copy; | |
305 | count -= copy; | |
306 | } | |
307 | } | |
308 | qemu_vfree(buf); | |
309 | ||
310 | return nbytes; | |
221f715d AL |
311 | } |
312 | ||
e1d3b254 FZ |
313 | static void posix_aio_notify_event(void); |
314 | ||
3c529d93 AL |
315 | static void *aio_thread(void *unused) |
316 | { | |
e4ea78ee AK |
317 | mutex_lock(&lock); |
318 | pending_threads--; | |
319 | mutex_unlock(&lock); | |
320 | do_spawn_thread(); | |
321 | ||
3c529d93 AL |
322 | while (1) { |
323 | struct qemu_paiocb *aiocb; | |
6769da29 | 324 | ssize_t ret = 0; |
30525aff | 325 | qemu_timeval tv; |
326 | struct timespec ts; | |
327 | ||
328 | qemu_gettimeofday(&tv); | |
329 | ts.tv_sec = tv.tv_sec + 10; | |
330 | ts.tv_nsec = 0; | |
3c529d93 | 331 | |
8653c015 | 332 | mutex_lock(&lock); |
3c529d93 | 333 | |
72cf2d4f | 334 | while (QTAILQ_EMPTY(&request_list) && |
3c529d93 | 335 | !(ret == ETIMEDOUT)) { |
5be4aab7 | 336 | idle_threads++; |
8653c015 | 337 | ret = cond_timedwait(&cond, &lock, &ts); |
5be4aab7 | 338 | idle_threads--; |
3c529d93 AL |
339 | } |
340 | ||
72cf2d4f | 341 | if (QTAILQ_EMPTY(&request_list)) |
3c529d93 AL |
342 | break; |
343 | ||
72cf2d4f BS |
344 | aiocb = QTAILQ_FIRST(&request_list); |
345 | QTAILQ_REMOVE(&request_list, aiocb, node); | |
3c529d93 | 346 | aiocb->active = 1; |
8653c015 | 347 | mutex_unlock(&lock); |
3c529d93 | 348 | |
9ef91a67 CH |
349 | switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { |
350 | case QEMU_AIO_READ: | |
ba1d1afd KW |
351 | ret = handle_aiocb_rw(aiocb); |
352 | if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->common.bs->growable) { | |
353 | /* A short read means that we have reached EOF. Pad the buffer | |
354 | * with zeros for bytes after EOF. */ | |
3d9b4925 MT |
355 | iov_memset(aiocb->aio_iov, aiocb->aio_niov, ret, |
356 | 0, aiocb->aio_nbytes - ret); | |
ba1d1afd KW |
357 | |
358 | ret = aiocb->aio_nbytes; | |
359 | } | |
360 | break; | |
9ef91a67 | 361 | case QEMU_AIO_WRITE: |
b587a52c SH |
362 | ret = handle_aiocb_rw(aiocb); |
363 | break; | |
b2e12bc6 | 364 | case QEMU_AIO_FLUSH: |
b587a52c SH |
365 | ret = handle_aiocb_flush(aiocb); |
366 | break; | |
9ef91a67 | 367 | case QEMU_AIO_IOCTL: |
b587a52c SH |
368 | ret = handle_aiocb_ioctl(aiocb); |
369 | break; | |
370 | default: | |
371 | fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type); | |
372 | ret = -EINVAL; | |
373 | break; | |
374 | } | |
3c529d93 | 375 | |
8653c015 | 376 | mutex_lock(&lock); |
221f715d | 377 | aiocb->ret = ret; |
8653c015 | 378 | mutex_unlock(&lock); |
3c529d93 | 379 | |
e1d3b254 | 380 | posix_aio_notify_event(); |
3c529d93 AL |
381 | } |
382 | ||
3c529d93 | 383 | cur_threads--; |
8653c015 | 384 | mutex_unlock(&lock); |
3c529d93 AL |
385 | |
386 | return NULL; | |
387 | } | |
388 | ||
e4ea78ee | 389 | static void do_spawn_thread(void) |
3c529d93 | 390 | { |
ee399306 | 391 | sigset_t set, oldset; |
392 | ||
e4ea78ee AK |
393 | mutex_lock(&lock); |
394 | if (!new_threads) { | |
395 | mutex_unlock(&lock); | |
396 | return; | |
397 | } | |
398 | ||
399 | new_threads--; | |
400 | pending_threads++; | |
401 | ||
402 | mutex_unlock(&lock); | |
ee399306 | 403 | |
404 | /* block all signals */ | |
405 | if (sigfillset(&set)) die("sigfillset"); | |
406 | if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask"); | |
407 | ||
8653c015 | 408 | thread_create(&thread_id, &attr, aio_thread, NULL); |
ee399306 | 409 | |
410 | if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore"); | |
3c529d93 AL |
411 | } |
412 | ||
e4ea78ee AK |
413 | static void spawn_thread_bh_fn(void *opaque) |
414 | { | |
415 | do_spawn_thread(); | |
416 | } | |
417 | ||
418 | static void spawn_thread(void) | |
419 | { | |
420 | cur_threads++; | |
421 | new_threads++; | |
422 | /* If there are threads being created, they will spawn new workers, so | |
423 | * we don't spend time creating many threads in a loop holding a mutex or | |
424 | * starving the current vcpu. | |
425 | * | |
426 | * If there are no idle threads, ask the main thread to create one, so we | |
427 | * inherit the correct affinity instead of the vcpu affinity. | |
428 | */ | |
429 | if (!pending_threads) { | |
430 | qemu_bh_schedule(new_thread_bh); | |
431 | } | |
432 | } | |
433 | ||
9ef91a67 | 434 | static void qemu_paio_submit(struct qemu_paiocb *aiocb) |
3c529d93 | 435 | { |
3c529d93 AL |
436 | aiocb->ret = -EINPROGRESS; |
437 | aiocb->active = 0; | |
8653c015 | 438 | mutex_lock(&lock); |
3c529d93 AL |
439 | if (idle_threads == 0 && cur_threads < max_threads) |
440 | spawn_thread(); | |
72cf2d4f | 441 | QTAILQ_INSERT_TAIL(&request_list, aiocb, node); |
8653c015 | 442 | mutex_unlock(&lock); |
5d47e372 | 443 | cond_signal(&cond); |
3c529d93 AL |
444 | } |
445 | ||
9ef91a67 | 446 | static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb) |
3c529d93 AL |
447 | { |
448 | ssize_t ret; | |
449 | ||
8653c015 | 450 | mutex_lock(&lock); |
3c529d93 | 451 | ret = aiocb->ret; |
8653c015 | 452 | mutex_unlock(&lock); |
3c529d93 AL |
453 | |
454 | return ret; | |
455 | } | |
456 | ||
9ef91a67 | 457 | static int qemu_paio_error(struct qemu_paiocb *aiocb) |
3c529d93 AL |
458 | { |
459 | ssize_t ret = qemu_paio_return(aiocb); | |
460 | ||
461 | if (ret < 0) | |
462 | ret = -ret; | |
463 | else | |
464 | ret = 0; | |
465 | ||
466 | return ret; | |
467 | } | |
468 | ||
adfe92f6 | 469 | static void posix_aio_read(void *opaque) |
3c529d93 | 470 | { |
9ef91a67 CH |
471 | PosixAioState *s = opaque; |
472 | struct qemu_paiocb *acb, **pacb; | |
3c529d93 | 473 | int ret; |
adfe92f6 PB |
474 | ssize_t len; |
475 | ||
476 | /* read all bytes from signal pipe */ | |
477 | for (;;) { | |
478 | char bytes[16]; | |
479 | ||
480 | len = read(s->rfd, bytes, sizeof(bytes)); | |
481 | if (len == -1 && errno == EINTR) | |
482 | continue; /* try again */ | |
483 | if (len == sizeof(bytes)) | |
484 | continue; /* more to read */ | |
485 | break; | |
486 | } | |
9ef91a67 CH |
487 | |
488 | for(;;) { | |
489 | pacb = &s->first_aio; | |
490 | for(;;) { | |
491 | acb = *pacb; | |
492 | if (!acb) | |
adfe92f6 | 493 | return; |
e5f37649 | 494 | |
9ef91a67 CH |
495 | ret = qemu_paio_error(acb); |
496 | if (ret == ECANCELED) { | |
497 | /* remove the request */ | |
498 | *pacb = acb->next; | |
499 | qemu_aio_release(acb); | |
500 | } else if (ret != EINPROGRESS) { | |
501 | /* end of aio */ | |
502 | if (ret == 0) { | |
503 | ret = qemu_paio_return(acb); | |
504 | if (ret == acb->aio_nbytes) | |
505 | ret = 0; | |
506 | else | |
507 | ret = -EINVAL; | |
508 | } else { | |
509 | ret = -ret; | |
510 | } | |
ddca9fb2 SH |
511 | |
512 | trace_paio_complete(acb, acb->common.opaque, ret); | |
513 | ||
9ef91a67 CH |
514 | /* remove the request */ |
515 | *pacb = acb->next; | |
516 | /* call the callback */ | |
517 | acb->common.cb(acb->common.opaque, ret); | |
518 | qemu_aio_release(acb); | |
519 | break; | |
520 | } else { | |
521 | pacb = &acb->next; | |
522 | } | |
523 | } | |
524 | } | |
9ef91a67 CH |
525 | } |
526 | ||
527 | static int posix_aio_flush(void *opaque) | |
528 | { | |
529 | PosixAioState *s = opaque; | |
530 | return !!s->first_aio; | |
531 | } | |
532 | ||
533 | static PosixAioState *posix_aio_state; | |
534 | ||
e1d3b254 | 535 | static void posix_aio_notify_event(void) |
9ef91a67 | 536 | { |
e1d3b254 FZ |
537 | char byte = 0; |
538 | ssize_t ret; | |
9ef91a67 | 539 | |
e1d3b254 FZ |
540 | ret = write(posix_aio_state->wfd, &byte, sizeof(byte)); |
541 | if (ret < 0 && errno != EAGAIN) | |
542 | die("write()"); | |
9ef91a67 CH |
543 | } |
544 | ||
545 | static void paio_remove(struct qemu_paiocb *acb) | |
546 | { | |
547 | struct qemu_paiocb **pacb; | |
548 | ||
549 | /* remove the callback from the queue */ | |
550 | pacb = &posix_aio_state->first_aio; | |
551 | for(;;) { | |
552 | if (*pacb == NULL) { | |
553 | fprintf(stderr, "paio_remove: aio request not found!\n"); | |
554 | break; | |
555 | } else if (*pacb == acb) { | |
556 | *pacb = acb->next; | |
557 | qemu_aio_release(acb); | |
558 | break; | |
559 | } | |
560 | pacb = &(*pacb)->next; | |
561 | } | |
562 | } | |
563 | ||
564 | static void paio_cancel(BlockDriverAIOCB *blockacb) | |
565 | { | |
566 | struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb; | |
567 | int active = 0; | |
3c529d93 | 568 | |
ddca9fb2 SH |
569 | trace_paio_cancel(acb, acb->common.opaque); |
570 | ||
8653c015 | 571 | mutex_lock(&lock); |
9ef91a67 | 572 | if (!acb->active) { |
72cf2d4f | 573 | QTAILQ_REMOVE(&request_list, acb, node); |
9ef91a67 CH |
574 | acb->ret = -ECANCELED; |
575 | } else if (acb->ret == -EINPROGRESS) { | |
576 | active = 1; | |
577 | } | |
8653c015 | 578 | mutex_unlock(&lock); |
3c529d93 | 579 | |
9ef91a67 CH |
580 | if (active) { |
581 | /* fail safe: if the aio could not be canceled, we wait for | |
582 | it */ | |
583 | while (qemu_paio_error(acb) == EINPROGRESS) | |
584 | ; | |
585 | } | |
586 | ||
587 | paio_remove(acb); | |
588 | } | |
589 | ||
590 | static AIOPool raw_aio_pool = { | |
591 | .aiocb_size = sizeof(struct qemu_paiocb), | |
592 | .cancel = paio_cancel, | |
593 | }; | |
594 | ||
1e5b9d2f | 595 | BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, |
9ef91a67 CH |
596 | int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, |
597 | BlockDriverCompletionFunc *cb, void *opaque, int type) | |
598 | { | |
599 | struct qemu_paiocb *acb; | |
600 | ||
601 | acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque); | |
9ef91a67 CH |
602 | acb->aio_type = type; |
603 | acb->aio_fildes = fd; | |
e5f37649 | 604 | |
b2e12bc6 CH |
605 | if (qiov) { |
606 | acb->aio_iov = qiov->iov; | |
607 | acb->aio_niov = qiov->niov; | |
608 | } | |
9ef91a67 CH |
609 | acb->aio_nbytes = nb_sectors * 512; |
610 | acb->aio_offset = sector_num * 512; | |
611 | ||
612 | acb->next = posix_aio_state->first_aio; | |
613 | posix_aio_state->first_aio = acb; | |
614 | ||
6d519a5f | 615 | trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); |
9ef91a67 CH |
616 | qemu_paio_submit(acb); |
617 | return &acb->common; | |
618 | } | |
619 | ||
620 | BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd, | |
621 | unsigned long int req, void *buf, | |
622 | BlockDriverCompletionFunc *cb, void *opaque) | |
623 | { | |
624 | struct qemu_paiocb *acb; | |
625 | ||
626 | acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque); | |
9ef91a67 CH |
627 | acb->aio_type = QEMU_AIO_IOCTL; |
628 | acb->aio_fildes = fd; | |
9ef91a67 CH |
629 | acb->aio_offset = 0; |
630 | acb->aio_ioctl_buf = buf; | |
631 | acb->aio_ioctl_cmd = req; | |
632 | ||
633 | acb->next = posix_aio_state->first_aio; | |
634 | posix_aio_state->first_aio = acb; | |
635 | ||
636 | qemu_paio_submit(acb); | |
637 | return &acb->common; | |
638 | } | |
639 | ||
1e5b9d2f | 640 | int paio_init(void) |
9ef91a67 | 641 | { |
9ef91a67 CH |
642 | PosixAioState *s; |
643 | int fds[2]; | |
644 | int ret; | |
645 | ||
646 | if (posix_aio_state) | |
1e5b9d2f | 647 | return 0; |
9ef91a67 | 648 | |
7267c094 | 649 | s = g_malloc(sizeof(PosixAioState)); |
9ef91a67 | 650 | |
9ef91a67 | 651 | s->first_aio = NULL; |
40ff6d7e | 652 | if (qemu_pipe(fds) == -1) { |
9ef91a67 | 653 | fprintf(stderr, "failed to create pipe\n"); |
095ed5be | 654 | g_free(s); |
1e5b9d2f | 655 | return -1; |
9ef91a67 CH |
656 | } |
657 | ||
658 | s->rfd = fds[0]; | |
659 | s->wfd = fds[1]; | |
660 | ||
661 | fcntl(s->rfd, F_SETFL, O_NONBLOCK); | |
662 | fcntl(s->wfd, F_SETFL, O_NONBLOCK); | |
663 | ||
bafbd6a1 | 664 | qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s); |
9ef91a67 CH |
665 | |
666 | ret = pthread_attr_init(&attr); | |
667 | if (ret) | |
668 | die2(ret, "pthread_attr_init"); | |
669 | ||
670 | ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); | |
671 | if (ret) | |
672 | die2(ret, "pthread_attr_setdetachstate"); | |
673 | ||
72cf2d4f | 674 | QTAILQ_INIT(&request_list); |
e4ea78ee | 675 | new_thread_bh = qemu_bh_new(spawn_thread_bh_fn, NULL); |
9ef91a67 CH |
676 | |
677 | posix_aio_state = s; | |
1e5b9d2f | 678 | return 0; |
3c529d93 | 679 | } |