]> Git Repo - qemu.git/blame - posix-aio-compat.c
usb-ohci: Change casts to DO_UPCAST() for OHCIPCIState
[qemu.git] / posix-aio-compat.c
CommitLineData
3c529d93
AL
1/*
2 * QEMU posix-aio emulation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
221f715d 14#include <sys/ioctl.h>
9ef91a67 15#include <sys/types.h>
3c529d93
AL
16#include <pthread.h>
17#include <unistd.h>
18#include <errno.h>
30525aff 19#include <time.h>
9ef91a67 20#include <signal.h>
8653c015 21#include <string.h>
22#include <stdlib.h>
23#include <stdio.h>
9ef91a67
CH
24
25#include "sys-queue.h"
3c529d93 26#include "osdep.h"
f141eafe 27#include "qemu-common.h"
9ef91a67
CH
28#include "block_int.h"
29
30#include "block/raw-posix-aio.h"
31
32
33struct qemu_paiocb {
34 BlockDriverAIOCB common;
35 int aio_fildes;
36 union {
37 struct iovec *aio_iov;
38 void *aio_ioctl_buf;
39 };
40 int aio_niov;
41 size_t aio_nbytes;
42#define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */
43 int ev_signo;
44 off_t aio_offset;
45
46 TAILQ_ENTRY(qemu_paiocb) node;
47 int aio_type;
48 ssize_t ret;
49 int active;
50 struct qemu_paiocb *next;
51};
52
53typedef struct PosixAioState {
54 int rfd, wfd;
55 struct qemu_paiocb *first_aio;
56} PosixAioState;
3c529d93 57
3c529d93
AL
58
59static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
60static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
61static pthread_t thread_id;
a8227a5a 62static pthread_attr_t attr;
3c529d93
AL
63static int max_threads = 64;
64static int cur_threads = 0;
65static int idle_threads = 0;
66static TAILQ_HEAD(, qemu_paiocb) request_list;
67
2341f9a1 68#ifdef CONFIG_PREADV
ceb42de8
AL
69static int preadv_present = 1;
70#else
71static int preadv_present = 0;
72#endif
73
8653c015 74static void die2(int err, const char *what)
75{
76 fprintf(stderr, "%s failed: %s\n", what, strerror(err));
77 abort();
78}
79
80static void die(const char *what)
81{
82 die2(errno, what);
83}
84
85static void mutex_lock(pthread_mutex_t *mutex)
86{
87 int ret = pthread_mutex_lock(mutex);
88 if (ret) die2(ret, "pthread_mutex_lock");
89}
90
91static void mutex_unlock(pthread_mutex_t *mutex)
92{
93 int ret = pthread_mutex_unlock(mutex);
94 if (ret) die2(ret, "pthread_mutex_unlock");
95}
96
97static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
98 struct timespec *ts)
99{
100 int ret = pthread_cond_timedwait(cond, mutex, ts);
101 if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
102 return ret;
103}
104
5d47e372 105static void cond_signal(pthread_cond_t *cond)
8653c015 106{
5d47e372 107 int ret = pthread_cond_signal(cond);
108 if (ret) die2(ret, "pthread_cond_signal");
8653c015 109}
110
111static void thread_create(pthread_t *thread, pthread_attr_t *attr,
112 void *(*start_routine)(void*), void *arg)
113{
114 int ret = pthread_create(thread, attr, start_routine, arg);
115 if (ret) die2(ret, "pthread_create");
116}
117
f141eafe
AL
118static size_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
119{
120 int ret;
121
122 ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf);
123 if (ret == -1)
124 return -errno;
e7d54ae8
CH
125
126 /*
127 * This looks weird, but the aio code only consideres a request
128 * successfull if it has written the number full number of bytes.
129 *
130 * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command,
131 * so in fact we return the ioctl command here to make posix_aio_read()
132 * happy..
133 */
134 return aiocb->aio_nbytes;
f141eafe
AL
135}
136
2341f9a1 137#ifdef CONFIG_PREADV
ceb42de8
AL
138
139static ssize_t
140qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
141{
142 return preadv(fd, iov, nr_iov, offset);
143}
144
145static ssize_t
146qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
147{
148 return pwritev(fd, iov, nr_iov, offset);
149}
150
151#else
152
153static ssize_t
154qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
155{
156 return -ENOSYS;
157}
158
159static ssize_t
160qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
161{
162 return -ENOSYS;
163}
164
165#endif
166
ceb42de8
AL
167static size_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb)
168{
169 size_t offset = 0;
170 ssize_t len;
171
172 do {
9ef91a67 173 if (aiocb->aio_type & QEMU_AIO_WRITE)
ceb42de8
AL
174 len = qemu_pwritev(aiocb->aio_fildes,
175 aiocb->aio_iov,
176 aiocb->aio_niov,
177 aiocb->aio_offset + offset);
178 else
179 len = qemu_preadv(aiocb->aio_fildes,
180 aiocb->aio_iov,
181 aiocb->aio_niov,
182 aiocb->aio_offset + offset);
183 } while (len == -1 && errno == EINTR);
184
185 if (len == -1)
186 return -errno;
187 return len;
188}
189
f141eafe 190static size_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf)
221f715d
AL
191{
192 size_t offset = 0;
f141eafe 193 size_t len;
221f715d
AL
194
195 while (offset < aiocb->aio_nbytes) {
9ef91a67 196 if (aiocb->aio_type & QEMU_AIO_WRITE)
f141eafe
AL
197 len = pwrite(aiocb->aio_fildes,
198 (const char *)buf + offset,
199 aiocb->aio_nbytes - offset,
200 aiocb->aio_offset + offset);
201 else
202 len = pread(aiocb->aio_fildes,
203 buf + offset,
221f715d
AL
204 aiocb->aio_nbytes - offset,
205 aiocb->aio_offset + offset);
221f715d 206
f141eafe
AL
207 if (len == -1 && errno == EINTR)
208 continue;
209 else if (len == -1) {
210 offset = -errno;
211 break;
212 } else if (len == 0)
213 break;
214
215 offset += len;
221f715d
AL
216 }
217
218 return offset;
219}
220
f141eafe 221static size_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
221f715d 222{
f141eafe
AL
223 size_t nbytes;
224 char *buf;
225
9ef91a67 226 if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
f141eafe
AL
227 /*
228 * If there is just a single buffer, and it is properly aligned
229 * we can just use plain pread/pwrite without any problems.
230 */
ceb42de8
AL
231 if (aiocb->aio_niov == 1)
232 return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base);
233
234 /*
235 * We have more than one iovec, and all are properly aligned.
236 *
237 * Try preadv/pwritev first and fall back to linearizing the
238 * buffer if it's not supported.
239 */
240 if (preadv_present) {
241 nbytes = handle_aiocb_rw_vector(aiocb);
242 if (nbytes == aiocb->aio_nbytes)
243 return nbytes;
244 if (nbytes < 0 && nbytes != -ENOSYS)
245 return nbytes;
246 preadv_present = 0;
247 }
248
249 /*
250 * XXX(hch): short read/write. no easy way to handle the reminder
251 * using these interfaces. For now retry using plain
252 * pread/pwrite?
253 */
f141eafe 254 }
221f715d 255
f141eafe
AL
256 /*
257 * Ok, we have to do it the hard way, copy all segments into
258 * a single aligned buffer.
259 */
260 buf = qemu_memalign(512, aiocb->aio_nbytes);
9ef91a67 261 if (aiocb->aio_type & QEMU_AIO_WRITE) {
f141eafe
AL
262 char *p = buf;
263 int i;
264
265 for (i = 0; i < aiocb->aio_niov; ++i) {
266 memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
267 p += aiocb->aio_iov[i].iov_len;
268 }
269 }
270
271 nbytes = handle_aiocb_rw_linear(aiocb, buf);
9ef91a67 272 if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
f141eafe
AL
273 char *p = buf;
274 size_t count = aiocb->aio_nbytes, copy;
275 int i;
276
277 for (i = 0; i < aiocb->aio_niov && count; ++i) {
278 copy = count;
279 if (copy > aiocb->aio_iov[i].iov_len)
280 copy = aiocb->aio_iov[i].iov_len;
281 memcpy(aiocb->aio_iov[i].iov_base, p, copy);
282 p += copy;
283 count -= copy;
284 }
285 }
286 qemu_vfree(buf);
287
288 return nbytes;
221f715d
AL
289}
290
3c529d93
AL
291static void *aio_thread(void *unused)
292{
a8227a5a 293 pid_t pid;
3c529d93
AL
294 sigset_t set;
295
a8227a5a 296 pid = getpid();
297
3c529d93 298 /* block all signals */
8653c015 299 if (sigfillset(&set)) die("sigfillset");
300 if (sigprocmask(SIG_BLOCK, &set, NULL)) die("sigprocmask");
3c529d93
AL
301
302 while (1) {
303 struct qemu_paiocb *aiocb;
221f715d 304 size_t ret = 0;
30525aff 305 qemu_timeval tv;
306 struct timespec ts;
307
308 qemu_gettimeofday(&tv);
309 ts.tv_sec = tv.tv_sec + 10;
310 ts.tv_nsec = 0;
3c529d93 311
8653c015 312 mutex_lock(&lock);
3c529d93
AL
313
314 while (TAILQ_EMPTY(&request_list) &&
315 !(ret == ETIMEDOUT)) {
8653c015 316 ret = cond_timedwait(&cond, &lock, &ts);
3c529d93
AL
317 }
318
514f7a27 319 if (TAILQ_EMPTY(&request_list))
3c529d93
AL
320 break;
321
322 aiocb = TAILQ_FIRST(&request_list);
323 TAILQ_REMOVE(&request_list, aiocb, node);
3c529d93 324 aiocb->active = 1;
3c529d93 325 idle_threads--;
8653c015 326 mutex_unlock(&lock);
3c529d93 327
9ef91a67
CH
328 switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
329 case QEMU_AIO_READ:
330 case QEMU_AIO_WRITE:
f141eafe 331 ret = handle_aiocb_rw(aiocb);
221f715d 332 break;
9ef91a67 333 case QEMU_AIO_IOCTL:
221f715d
AL
334 ret = handle_aiocb_ioctl(aiocb);
335 break;
336 default:
337 fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
338 ret = -EINVAL;
339 break;
340 }
3c529d93 341
8653c015 342 mutex_lock(&lock);
221f715d 343 aiocb->ret = ret;
3c529d93 344 idle_threads++;
8653c015 345 mutex_unlock(&lock);
3c529d93 346
a8227a5a 347 if (kill(pid, aiocb->ev_signo)) die("kill failed");
3c529d93
AL
348 }
349
350 idle_threads--;
351 cur_threads--;
8653c015 352 mutex_unlock(&lock);
3c529d93
AL
353
354 return NULL;
355}
356
8653c015 357static void spawn_thread(void)
3c529d93 358{
3c529d93
AL
359 cur_threads++;
360 idle_threads++;
8653c015 361 thread_create(&thread_id, &attr, aio_thread, NULL);
3c529d93
AL
362}
363
9ef91a67 364static void qemu_paio_submit(struct qemu_paiocb *aiocb)
3c529d93 365{
3c529d93
AL
366 aiocb->ret = -EINPROGRESS;
367 aiocb->active = 0;
8653c015 368 mutex_lock(&lock);
3c529d93
AL
369 if (idle_threads == 0 && cur_threads < max_threads)
370 spawn_thread();
371 TAILQ_INSERT_TAIL(&request_list, aiocb, node);
8653c015 372 mutex_unlock(&lock);
5d47e372 373 cond_signal(&cond);
3c529d93
AL
374}
375
9ef91a67 376static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
3c529d93
AL
377{
378 ssize_t ret;
379
8653c015 380 mutex_lock(&lock);
3c529d93 381 ret = aiocb->ret;
8653c015 382 mutex_unlock(&lock);
3c529d93
AL
383
384 return ret;
385}
386
9ef91a67 387static int qemu_paio_error(struct qemu_paiocb *aiocb)
3c529d93
AL
388{
389 ssize_t ret = qemu_paio_return(aiocb);
390
391 if (ret < 0)
392 ret = -ret;
393 else
394 ret = 0;
395
396 return ret;
397}
398
9ef91a67 399static void posix_aio_read(void *opaque)
3c529d93 400{
9ef91a67
CH
401 PosixAioState *s = opaque;
402 struct qemu_paiocb *acb, **pacb;
3c529d93 403 int ret;
9ef91a67
CH
404 ssize_t len;
405
406 /* read all bytes from signal pipe */
407 for (;;) {
408 char bytes[16];
409
410 len = read(s->rfd, bytes, sizeof(bytes));
411 if (len == -1 && errno == EINTR)
412 continue; /* try again */
413 if (len == sizeof(bytes))
414 continue; /* more to read */
415 break;
416 }
417
418 for(;;) {
419 pacb = &s->first_aio;
420 for(;;) {
421 acb = *pacb;
422 if (!acb)
423 goto the_end;
424 ret = qemu_paio_error(acb);
425 if (ret == ECANCELED) {
426 /* remove the request */
427 *pacb = acb->next;
428 qemu_aio_release(acb);
429 } else if (ret != EINPROGRESS) {
430 /* end of aio */
431 if (ret == 0) {
432 ret = qemu_paio_return(acb);
433 if (ret == acb->aio_nbytes)
434 ret = 0;
435 else
436 ret = -EINVAL;
437 } else {
438 ret = -ret;
439 }
440 /* remove the request */
441 *pacb = acb->next;
442 /* call the callback */
443 acb->common.cb(acb->common.opaque, ret);
444 qemu_aio_release(acb);
445 break;
446 } else {
447 pacb = &acb->next;
448 }
449 }
450 }
451 the_end: ;
452}
453
454static int posix_aio_flush(void *opaque)
455{
456 PosixAioState *s = opaque;
457 return !!s->first_aio;
458}
459
460static PosixAioState *posix_aio_state;
461
462static void aio_signal_handler(int signum)
463{
464 if (posix_aio_state) {
465 char byte = 0;
466
467 write(posix_aio_state->wfd, &byte, sizeof(byte));
468 }
469
470 qemu_service_io();
471}
472
473static void paio_remove(struct qemu_paiocb *acb)
474{
475 struct qemu_paiocb **pacb;
476
477 /* remove the callback from the queue */
478 pacb = &posix_aio_state->first_aio;
479 for(;;) {
480 if (*pacb == NULL) {
481 fprintf(stderr, "paio_remove: aio request not found!\n");
482 break;
483 } else if (*pacb == acb) {
484 *pacb = acb->next;
485 qemu_aio_release(acb);
486 break;
487 }
488 pacb = &(*pacb)->next;
489 }
490}
491
492static void paio_cancel(BlockDriverAIOCB *blockacb)
493{
494 struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb;
495 int active = 0;
3c529d93 496
8653c015 497 mutex_lock(&lock);
9ef91a67
CH
498 if (!acb->active) {
499 TAILQ_REMOVE(&request_list, acb, node);
500 acb->ret = -ECANCELED;
501 } else if (acb->ret == -EINPROGRESS) {
502 active = 1;
503 }
8653c015 504 mutex_unlock(&lock);
3c529d93 505
9ef91a67
CH
506 if (active) {
507 /* fail safe: if the aio could not be canceled, we wait for
508 it */
509 while (qemu_paio_error(acb) == EINPROGRESS)
510 ;
511 }
512
513 paio_remove(acb);
514}
515
516static AIOPool raw_aio_pool = {
517 .aiocb_size = sizeof(struct qemu_paiocb),
518 .cancel = paio_cancel,
519};
520
521BlockDriverAIOCB *paio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
522 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
523 BlockDriverCompletionFunc *cb, void *opaque, int type)
524{
525 struct qemu_paiocb *acb;
526
527 acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
528 if (!acb)
529 return NULL;
530 acb->aio_type = type;
531 acb->aio_fildes = fd;
532 acb->ev_signo = SIGUSR2;
533 acb->aio_iov = qiov->iov;
534 acb->aio_niov = qiov->niov;
535 acb->aio_nbytes = nb_sectors * 512;
536 acb->aio_offset = sector_num * 512;
537
538 acb->next = posix_aio_state->first_aio;
539 posix_aio_state->first_aio = acb;
540
541 qemu_paio_submit(acb);
542 return &acb->common;
543}
544
545BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
546 unsigned long int req, void *buf,
547 BlockDriverCompletionFunc *cb, void *opaque)
548{
549 struct qemu_paiocb *acb;
550
551 acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque);
552 if (!acb)
553 return NULL;
554 acb->aio_type = QEMU_AIO_IOCTL;
555 acb->aio_fildes = fd;
556 acb->ev_signo = SIGUSR2;
557 acb->aio_offset = 0;
558 acb->aio_ioctl_buf = buf;
559 acb->aio_ioctl_cmd = req;
560
561 acb->next = posix_aio_state->first_aio;
562 posix_aio_state->first_aio = acb;
563
564 qemu_paio_submit(acb);
565 return &acb->common;
566}
567
568void *paio_init(void)
569{
570 struct sigaction act;
571 PosixAioState *s;
572 int fds[2];
573 int ret;
574
575 if (posix_aio_state)
576 return posix_aio_state;
577
578 s = qemu_malloc(sizeof(PosixAioState));
579
580 sigfillset(&act.sa_mask);
581 act.sa_flags = 0; /* do not restart syscalls to interrupt select() */
582 act.sa_handler = aio_signal_handler;
583 sigaction(SIGUSR2, &act, NULL);
584
585 s->first_aio = NULL;
586 if (pipe(fds) == -1) {
587 fprintf(stderr, "failed to create pipe\n");
588 return NULL;
589 }
590
591 s->rfd = fds[0];
592 s->wfd = fds[1];
593
594 fcntl(s->rfd, F_SETFL, O_NONBLOCK);
595 fcntl(s->wfd, F_SETFL, O_NONBLOCK);
596
597 qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s);
598
599 ret = pthread_attr_init(&attr);
600 if (ret)
601 die2(ret, "pthread_attr_init");
602
603 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
604 if (ret)
605 die2(ret, "pthread_attr_setdetachstate");
606
607 TAILQ_INIT(&request_list);
608
609 posix_aio_state = s;
610
611 return posix_aio_state;
3c529d93 612}
This page took 0.235314 seconds and 4 git commands to generate.