]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include "qemu-common.h" | |
15 | #include "block.h" | |
16 | #include "sys-queue.h" | |
17 | #include "qemu_socket.h" | |
18 | ||
19 | typedef struct AioHandler AioHandler; | |
20 | ||
21 | /* The list of registered AIO handlers */ | |
22 | static LIST_HEAD(, AioHandler) aio_handlers; | |
23 | ||
24 | /* This is a simple lock used to protect the aio_handlers list. Specifically, | |
25 | * it's used to ensure that no callbacks are removed while we're walking and | |
26 | * dispatching callbacks. | |
27 | */ | |
28 | static int walking_handlers; | |
29 | ||
30 | struct AioHandler | |
31 | { | |
32 | int fd; | |
33 | IOHandler *io_read; | |
34 | IOHandler *io_write; | |
35 | AioFlushHandler *io_flush; | |
36 | int deleted; | |
37 | void *opaque; | |
38 | LIST_ENTRY(AioHandler) node; | |
39 | }; | |
40 | ||
41 | static AioHandler *find_aio_handler(int fd) | |
42 | { | |
43 | AioHandler *node; | |
44 | ||
45 | LIST_FOREACH(node, &aio_handlers, node) { | |
46 | if (node->fd == fd) | |
47 | return node; | |
48 | } | |
49 | ||
50 | return NULL; | |
51 | } | |
52 | ||
53 | int qemu_aio_set_fd_handler(int fd, | |
54 | IOHandler *io_read, | |
55 | IOHandler *io_write, | |
56 | AioFlushHandler *io_flush, | |
57 | void *opaque) | |
58 | { | |
59 | AioHandler *node; | |
60 | ||
61 | node = find_aio_handler(fd); | |
62 | ||
63 | /* Are we deleting the fd handler? */ | |
64 | if (!io_read && !io_write) { | |
65 | if (node) { | |
66 | /* If the lock is held, just mark the node as deleted */ | |
67 | if (walking_handlers) | |
68 | node->deleted = 1; | |
69 | else { | |
70 | /* Otherwise, delete it for real. We can't just mark it as | |
71 | * deleted because deleted nodes are only cleaned up after | |
72 | * releasing the walking_handlers lock. | |
73 | */ | |
74 | LIST_REMOVE(node, node); | |
75 | qemu_free(node); | |
76 | } | |
77 | } | |
78 | } else { | |
79 | if (node == NULL) { | |
80 | /* Alloc and insert if it's not already there */ | |
81 | node = qemu_mallocz(sizeof(AioHandler)); | |
82 | if (node == NULL) | |
83 | return -ENOMEM; | |
84 | node->fd = fd; | |
85 | LIST_INSERT_HEAD(&aio_handlers, node, node); | |
86 | } | |
87 | /* Update handler with latest information */ | |
88 | node->io_read = io_read; | |
89 | node->io_write = io_write; | |
90 | node->io_flush = io_flush; | |
91 | node->opaque = opaque; | |
92 | } | |
93 | ||
94 | qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque); | |
95 | ||
96 | return 0; | |
97 | } | |
98 | ||
99 | void qemu_aio_flush(void) | |
100 | { | |
101 | AioHandler *node; | |
102 | int ret; | |
103 | ||
104 | do { | |
105 | ret = 0; | |
106 | ||
107 | LIST_FOREACH(node, &aio_handlers, node) { | |
108 | ret |= node->io_flush(node->opaque); | |
109 | } | |
110 | ||
111 | qemu_aio_wait(); | |
112 | } while (ret > 0); | |
113 | } | |
114 | ||
115 | void qemu_aio_wait(void) | |
116 | { | |
117 | int ret; | |
118 | ||
119 | if (qemu_bh_poll()) | |
120 | return; | |
121 | ||
122 | do { | |
123 | AioHandler *node; | |
124 | fd_set rdfds, wrfds; | |
125 | int max_fd = -1; | |
126 | ||
127 | walking_handlers = 1; | |
128 | ||
f71903d0 AL |
129 | FD_ZERO(&rdfds); |
130 | FD_ZERO(&wrfds); | |
131 | ||
a76bab49 AL |
132 | /* fill fd sets */ |
133 | LIST_FOREACH(node, &aio_handlers, node) { | |
134 | /* If there aren't pending AIO operations, don't invoke callbacks. | |
135 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would | |
136 | * wait indefinitely. | |
137 | */ | |
138 | if (node->io_flush && node->io_flush(node->opaque) == 0) | |
139 | continue; | |
140 | ||
141 | if (!node->deleted && node->io_read) { | |
142 | FD_SET(node->fd, &rdfds); | |
143 | max_fd = MAX(max_fd, node->fd + 1); | |
144 | } | |
145 | if (!node->deleted && node->io_write) { | |
146 | FD_SET(node->fd, &wrfds); | |
147 | max_fd = MAX(max_fd, node->fd + 1); | |
148 | } | |
149 | } | |
150 | ||
151 | walking_handlers = 0; | |
152 | ||
153 | /* No AIO operations? Get us out of here */ | |
154 | if (max_fd == -1) | |
155 | break; | |
156 | ||
157 | /* wait until next event */ | |
158 | ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); | |
159 | if (ret == -1 && errno == EINTR) | |
160 | continue; | |
161 | ||
162 | /* if we have any readable fds, dispatch event */ | |
163 | if (ret > 0) { | |
164 | walking_handlers = 1; | |
165 | ||
166 | /* we have to walk very carefully in case | |
167 | * qemu_aio_set_fd_handler is called while we're walking */ | |
168 | node = LIST_FIRST(&aio_handlers); | |
169 | while (node) { | |
170 | AioHandler *tmp; | |
171 | ||
172 | if (!node->deleted && | |
173 | FD_ISSET(node->fd, &rdfds) && | |
174 | node->io_read) { | |
175 | node->io_read(node->opaque); | |
176 | } | |
177 | if (!node->deleted && | |
178 | FD_ISSET(node->fd, &wrfds) && | |
179 | node->io_write) { | |
180 | node->io_write(node->opaque); | |
181 | } | |
182 | ||
183 | tmp = node; | |
184 | node = LIST_NEXT(node, node); | |
185 | ||
186 | if (tmp->deleted) { | |
187 | LIST_REMOVE(tmp, node); | |
188 | qemu_free(tmp); | |
189 | } | |
190 | } | |
191 | ||
192 | walking_handlers = 0; | |
193 | } | |
194 | } while (ret == 0); | |
195 | } |