]>
Commit | Line | Data |
---|---|---|
00dccaf1 KW |
1 | /* |
2 | * QEMU coroutine implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2011 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <[email protected]> | |
b96e9247 | 8 | * Kevin Wolf <[email protected]> |
00dccaf1 KW |
9 | * |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
15 | #ifndef QEMU_COROUTINE_H | |
16 | #define QEMU_COROUTINE_H | |
17 | ||
1de7afc9 PB |
18 | #include "qemu/queue.h" |
19 | #include "qemu/timer.h" | |
00dccaf1 KW |
20 | |
21 | /** | |
22 | * Coroutines are a mechanism for stack switching and can be used for | |
23 | * cooperative userspace threading. These functions provide a simple but | |
24 | * useful flavor of coroutines that is suitable for writing sequential code, | |
25 | * rather than callbacks, for operations that need to give up control while | |
26 | * waiting for events to complete. | |
27 | * | |
28 | * These functions are re-entrant and may be used outside the global mutex. | |
29 | */ | |
30 | ||
31 | /** | |
32 | * Mark a function that executes in coroutine context | |
33 | * | |
34 | * Functions that execute in coroutine context cannot be called directly from | |
35 | * normal functions. In the future it would be nice to enable compiler or | |
36 | * static checker support for catching such errors. This annotation might make | |
37 | * it possible and in the meantime it serves as documentation. | |
38 | * | |
39 | * For example: | |
40 | * | |
41 | * static void coroutine_fn foo(void) { | |
42 | * .... | |
43 | * } | |
44 | */ | |
45 | #define coroutine_fn | |
46 | ||
47 | typedef struct Coroutine Coroutine; | |
48 | ||
49 | /** | |
50 | * Coroutine entry point | |
51 | * | |
52 | * When the coroutine is entered for the first time, opaque is passed in as an | |
53 | * argument. | |
54 | * | |
55 | * When this function returns, the coroutine is destroyed automatically and | |
56 | * execution continues in the caller who last entered the coroutine. | |
57 | */ | |
58 | typedef void coroutine_fn CoroutineEntry(void *opaque); | |
59 | ||
60 | /** | |
61 | * Create a new coroutine | |
62 | * | |
63 | * Use qemu_coroutine_enter() to actually transfer control to the coroutine. | |
0b8b8753 | 64 | * The opaque argument is passed as the argument to the entry point. |
00dccaf1 | 65 | */ |
0b8b8753 | 66 | Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque); |
00dccaf1 KW |
67 | |
68 | /** | |
69 | * Transfer control to a coroutine | |
00dccaf1 | 70 | */ |
0b8b8753 | 71 | void qemu_coroutine_enter(Coroutine *coroutine); |
00dccaf1 | 72 | |
536fca7f KW |
73 | /** |
74 | * Transfer control to a coroutine if it's not active (i.e. part of the call | |
75 | * stack of the running coroutine). Otherwise, do nothing. | |
76 | */ | |
77 | void qemu_coroutine_enter_if_inactive(Coroutine *co); | |
78 | ||
ba9e75ce FZ |
79 | /** |
80 | * Transfer control to a coroutine and associate it with ctx | |
81 | */ | |
82 | void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co); | |
83 | ||
00dccaf1 KW |
84 | /** |
85 | * Transfer control back to a coroutine's caller | |
86 | * | |
87 | * This function does not return until the coroutine is re-entered using | |
88 | * qemu_coroutine_enter(). | |
89 | */ | |
90 | void coroutine_fn qemu_coroutine_yield(void); | |
91 | ||
aa1361d5 KW |
92 | /** |
93 | * Get the AioContext of the given coroutine | |
94 | */ | |
95 | AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co); | |
96 | ||
00dccaf1 KW |
97 | /** |
98 | * Get the currently executing coroutine | |
99 | */ | |
100 | Coroutine *coroutine_fn qemu_coroutine_self(void); | |
101 | ||
102 | /** | |
103 | * Return whether or not currently inside a coroutine | |
104 | * | |
105 | * This can be used to write functions that work both when in coroutine context | |
106 | * and when not in coroutine context. Note that such functions cannot use the | |
107 | * coroutine_fn annotation since they work outside coroutine context. | |
108 | */ | |
109 | bool qemu_in_coroutine(void); | |
110 | ||
f643e469 SH |
111 | /** |
112 | * Return true if the coroutine is currently entered | |
113 | * | |
114 | * A coroutine is "entered" if it has not yielded from the current | |
115 | * qemu_coroutine_enter() call used to run it. This does not mean that the | |
116 | * coroutine is currently executing code since it may have transferred control | |
117 | * to another coroutine using qemu_coroutine_enter(). | |
118 | * | |
119 | * When several coroutines enter each other there may be no way to know which | |
120 | * ones have already been entered. In such situations this function can be | |
121 | * used to avoid recursively entering coroutines. | |
122 | */ | |
123 | bool qemu_coroutine_entered(Coroutine *co); | |
b96e9247 | 124 | |
b96e9247 KW |
125 | /** |
126 | * Provides a mutex that can be used to synchronise coroutines | |
127 | */ | |
fed20a70 | 128 | struct CoWaitRecord; |
e70372fc | 129 | struct CoMutex { |
fed20a70 PB |
130 | /* Count of pending lockers; 0 for a free mutex, 1 for an |
131 | * uncontended mutex. | |
132 | */ | |
133 | unsigned locked; | |
134 | ||
480cff63 PB |
135 | /* Context that is holding the lock. Useful to avoid spinning |
136 | * when two coroutines on the same AioContext try to get the lock. :) | |
137 | */ | |
138 | AioContext *ctx; | |
139 | ||
fed20a70 PB |
140 | /* A queue of waiters. Elements are added atomically in front of |
141 | * from_push. to_pop is only populated, and popped from, by whoever | |
142 | * is in charge of the next wakeup. This can be an unlocker or, | |
143 | * through the handoff protocol, a locker that is about to go to sleep. | |
144 | */ | |
145 | QSLIST_HEAD(, CoWaitRecord) from_push, to_pop; | |
146 | ||
147 | unsigned handoff, sequence; | |
148 | ||
0e438cdc | 149 | Coroutine *holder; |
e70372fc | 150 | }; |
b96e9247 KW |
151 | |
152 | /** | |
153 | * Initialises a CoMutex. This must be called before any other operation is used | |
154 | * on the CoMutex. | |
155 | */ | |
156 | void qemu_co_mutex_init(CoMutex *mutex); | |
157 | ||
158 | /** | |
159 | * Locks the mutex. If the lock cannot be taken immediately, control is | |
160 | * transferred to the caller of the current coroutine. | |
161 | */ | |
162 | void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex); | |
163 | ||
164 | /** | |
165 | * Unlocks the mutex and schedules the next coroutine that was waiting for this | |
166 | * lock to be run. | |
167 | */ | |
168 | void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex); | |
169 | ||
f8c6e1cb PB |
170 | |
171 | /** | |
172 | * CoQueues are a mechanism to queue coroutines in order to continue executing | |
1ace7cea PB |
173 | * them later. They are similar to condition variables, but they need help |
174 | * from an external mutex in order to maintain thread-safety. | |
f8c6e1cb PB |
175 | */ |
176 | typedef struct CoQueue { | |
177 | QSIMPLEQ_HEAD(, Coroutine) entries; | |
178 | } CoQueue; | |
179 | ||
180 | /** | |
181 | * Initialise a CoQueue. This must be called before any other operation is used | |
182 | * on the CoQueue. | |
183 | */ | |
184 | void qemu_co_queue_init(CoQueue *queue); | |
185 | ||
186 | /** | |
187 | * Adds the current coroutine to the CoQueue and transfers control to the | |
1ace7cea PB |
188 | * caller of the coroutine. The mutex is unlocked during the wait and |
189 | * locked again afterwards. | |
f8c6e1cb | 190 | */ |
1a957cf9 PB |
191 | #define qemu_co_queue_wait(queue, lock) \ |
192 | qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock)) | |
193 | void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock); | |
f8c6e1cb PB |
194 | |
195 | /** | |
5261dd7b PB |
196 | * Removes the next coroutine from the CoQueue, and wake it up. |
197 | * Returns true if a coroutine was removed, false if the queue is empty. | |
f8c6e1cb PB |
198 | */ |
199 | bool coroutine_fn qemu_co_queue_next(CoQueue *queue); | |
200 | ||
201 | /** | |
5261dd7b | 202 | * Empties the CoQueue; all coroutines are woken up. |
f8c6e1cb PB |
203 | */ |
204 | void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue); | |
205 | ||
206 | /** | |
5261dd7b PB |
207 | * Removes the next coroutine from the CoQueue, and wake it up. Unlike |
208 | * qemu_co_queue_next, this function releases the lock during aio_co_wake | |
209 | * because it is meant to be used outside coroutine context; in that case, the | |
210 | * coroutine is entered immediately, before qemu_co_enter_next returns. | |
211 | * | |
212 | * If used in coroutine context, qemu_co_enter_next is equivalent to | |
213 | * qemu_co_queue_next. | |
f8c6e1cb | 214 | */ |
5261dd7b PB |
215 | #define qemu_co_enter_next(queue, lock) \ |
216 | qemu_co_enter_next_impl(queue, QEMU_MAKE_LOCKABLE(lock)) | |
217 | bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock); | |
f8c6e1cb PB |
218 | |
219 | /** | |
220 | * Checks if the CoQueue is empty. | |
221 | */ | |
222 | bool qemu_co_queue_empty(CoQueue *queue); | |
223 | ||
224 | ||
12888904 | 225 | typedef struct CoRwlock { |
a7b91d35 | 226 | int pending_writer; |
12888904 | 227 | int reader; |
a7b91d35 | 228 | CoMutex mutex; |
12888904 AK |
229 | CoQueue queue; |
230 | } CoRwlock; | |
231 | ||
232 | /** | |
233 | * Initialises a CoRwlock. This must be called before any other operation | |
234 | * is used on the CoRwlock | |
235 | */ | |
236 | void qemu_co_rwlock_init(CoRwlock *lock); | |
237 | ||
238 | /** | |
239 | * Read locks the CoRwlock. If the lock cannot be taken immediately because | |
240 | * of a parallel writer, control is transferred to the caller of the current | |
241 | * coroutine. | |
242 | */ | |
243 | void qemu_co_rwlock_rdlock(CoRwlock *lock); | |
244 | ||
667221c1 PB |
245 | /** |
246 | * Write Locks the CoRwlock from a reader. This is a bit more efficient than | |
247 | * @qemu_co_rwlock_unlock followed by a separate @qemu_co_rwlock_wrlock. | |
248 | * However, if the lock cannot be upgraded immediately, control is transferred | |
249 | * to the caller of the current coroutine. Also, @qemu_co_rwlock_upgrade | |
250 | * only overrides CoRwlock fairness if there are no concurrent readers, so | |
251 | * another writer might run while @qemu_co_rwlock_upgrade blocks. | |
252 | */ | |
253 | void qemu_co_rwlock_upgrade(CoRwlock *lock); | |
254 | ||
255 | /** | |
256 | * Downgrades a write-side critical section to a reader. Downgrading with | |
257 | * @qemu_co_rwlock_downgrade never blocks, unlike @qemu_co_rwlock_unlock | |
258 | * followed by @qemu_co_rwlock_rdlock. This makes it more efficient, but | |
259 | * may also sometimes be necessary for correctness. | |
260 | */ | |
261 | void qemu_co_rwlock_downgrade(CoRwlock *lock); | |
262 | ||
12888904 AK |
263 | /** |
264 | * Write Locks the mutex. If the lock cannot be taken immediately because | |
265 | * of a parallel reader, control is transferred to the caller of the current | |
266 | * coroutine. | |
267 | */ | |
268 | void qemu_co_rwlock_wrlock(CoRwlock *lock); | |
269 | ||
270 | /** | |
271 | * Unlocks the read/write lock and schedules the next coroutine that was | |
272 | * waiting for this lock to be run. | |
273 | */ | |
274 | void qemu_co_rwlock_unlock(CoRwlock *lock); | |
275 | ||
3ab7bd19 MK |
276 | /** |
277 | * Yield the coroutine for a given duration | |
3ab7bd19 | 278 | */ |
78f1d3d6 | 279 | void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns); |
3ab7bd19 | 280 | |
9f05d0c3 MH |
281 | /** |
282 | * Yield until a file descriptor becomes readable | |
283 | * | |
284 | * Note that this function clobbers the handlers for the file descriptor. | |
285 | */ | |
286 | void coroutine_fn yield_until_fd_readable(int fd); | |
ac2662a9 | 287 | |
1a957cf9 PB |
288 | #include "qemu/lockable.h" |
289 | ||
00dccaf1 | 290 | #endif /* QEMU_COROUTINE_H */ |