]>
Commit | Line | Data |
---|---|---|
00dccaf1 KW |
1 | /* |
2 | * QEMU coroutine implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2011 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <[email protected]> | |
b96e9247 | 8 | * Kevin Wolf <[email protected]> |
00dccaf1 KW |
9 | * |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
15 | #ifndef QEMU_COROUTINE_H | |
16 | #define QEMU_COROUTINE_H | |
17 | ||
1de7afc9 PB |
18 | #include "qemu/queue.h" |
19 | #include "qemu/timer.h" | |
00dccaf1 KW |
20 | |
21 | /** | |
22 | * Coroutines are a mechanism for stack switching and can be used for | |
23 | * cooperative userspace threading. These functions provide a simple but | |
24 | * useful flavor of coroutines that is suitable for writing sequential code, | |
25 | * rather than callbacks, for operations that need to give up control while | |
26 | * waiting for events to complete. | |
27 | * | |
28 | * These functions are re-entrant and may be used outside the global mutex. | |
29 | */ | |
30 | ||
31 | /** | |
32 | * Mark a function that executes in coroutine context | |
33 | * | |
34 | * Functions that execute in coroutine context cannot be called directly from | |
35 | * normal functions. In the future it would be nice to enable compiler or | |
36 | * static checker support for catching such errors. This annotation might make | |
37 | * it possible and in the meantime it serves as documentation. | |
38 | * | |
39 | * For example: | |
40 | * | |
41 | * static void coroutine_fn foo(void) { | |
42 | * .... | |
43 | * } | |
44 | */ | |
45 | #define coroutine_fn | |
46 | ||
47 | typedef struct Coroutine Coroutine; | |
48 | ||
49 | /** | |
50 | * Coroutine entry point | |
51 | * | |
52 | * When the coroutine is entered for the first time, opaque is passed in as an | |
53 | * argument. | |
54 | * | |
55 | * When this function returns, the coroutine is destroyed automatically and | |
56 | * execution continues in the caller who last entered the coroutine. | |
57 | */ | |
58 | typedef void coroutine_fn CoroutineEntry(void *opaque); | |
59 | ||
60 | /** | |
61 | * Create a new coroutine | |
62 | * | |
63 | * Use qemu_coroutine_enter() to actually transfer control to the coroutine. | |
0b8b8753 | 64 | * The opaque argument is passed as the argument to the entry point. |
00dccaf1 | 65 | */ |
0b8b8753 | 66 | Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque); |
00dccaf1 KW |
67 | |
68 | /** | |
69 | * Transfer control to a coroutine | |
00dccaf1 | 70 | */ |
0b8b8753 | 71 | void qemu_coroutine_enter(Coroutine *coroutine); |
00dccaf1 | 72 | |
536fca7f KW |
73 | /** |
74 | * Transfer control to a coroutine if it's not active (i.e. part of the call | |
75 | * stack of the running coroutine). Otherwise, do nothing. | |
76 | */ | |
77 | void qemu_coroutine_enter_if_inactive(Coroutine *co); | |
78 | ||
00dccaf1 KW |
79 | /** |
80 | * Transfer control back to a coroutine's caller | |
81 | * | |
82 | * This function does not return until the coroutine is re-entered using | |
83 | * qemu_coroutine_enter(). | |
84 | */ | |
85 | void coroutine_fn qemu_coroutine_yield(void); | |
86 | ||
87 | /** | |
88 | * Get the currently executing coroutine | |
89 | */ | |
90 | Coroutine *coroutine_fn qemu_coroutine_self(void); | |
91 | ||
92 | /** | |
93 | * Return whether or not currently inside a coroutine | |
94 | * | |
95 | * This can be used to write functions that work both when in coroutine context | |
96 | * and when not in coroutine context. Note that such functions cannot use the | |
97 | * coroutine_fn annotation since they work outside coroutine context. | |
98 | */ | |
99 | bool qemu_in_coroutine(void); | |
100 | ||
f643e469 SH |
101 | /** |
102 | * Return true if the coroutine is currently entered | |
103 | * | |
104 | * A coroutine is "entered" if it has not yielded from the current | |
105 | * qemu_coroutine_enter() call used to run it. This does not mean that the | |
106 | * coroutine is currently executing code since it may have transferred control | |
107 | * to another coroutine using qemu_coroutine_enter(). | |
108 | * | |
109 | * When several coroutines enter each other there may be no way to know which | |
110 | * ones have already been entered. In such situations this function can be | |
111 | * used to avoid recursively entering coroutines. | |
112 | */ | |
113 | bool qemu_coroutine_entered(Coroutine *co); | |
b96e9247 | 114 | |
b96e9247 KW |
115 | /** |
116 | * Provides a mutex that can be used to synchronise coroutines | |
117 | */ | |
fed20a70 | 118 | struct CoWaitRecord; |
b96e9247 | 119 | typedef struct CoMutex { |
fed20a70 PB |
120 | /* Count of pending lockers; 0 for a free mutex, 1 for an |
121 | * uncontended mutex. | |
122 | */ | |
123 | unsigned locked; | |
124 | ||
480cff63 PB |
125 | /* Context that is holding the lock. Useful to avoid spinning |
126 | * when two coroutines on the same AioContext try to get the lock. :) | |
127 | */ | |
128 | AioContext *ctx; | |
129 | ||
fed20a70 PB |
130 | /* A queue of waiters. Elements are added atomically in front of |
131 | * from_push. to_pop is only populated, and popped from, by whoever | |
132 | * is in charge of the next wakeup. This can be an unlocker or, | |
133 | * through the handoff protocol, a locker that is about to go to sleep. | |
134 | */ | |
135 | QSLIST_HEAD(, CoWaitRecord) from_push, to_pop; | |
136 | ||
137 | unsigned handoff, sequence; | |
138 | ||
0e438cdc | 139 | Coroutine *holder; |
b96e9247 KW |
140 | } CoMutex; |
141 | ||
142 | /** | |
143 | * Initialises a CoMutex. This must be called before any other operation is used | |
144 | * on the CoMutex. | |
145 | */ | |
146 | void qemu_co_mutex_init(CoMutex *mutex); | |
147 | ||
148 | /** | |
149 | * Locks the mutex. If the lock cannot be taken immediately, control is | |
150 | * transferred to the caller of the current coroutine. | |
151 | */ | |
152 | void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex); | |
153 | ||
154 | /** | |
155 | * Unlocks the mutex and schedules the next coroutine that was waiting for this | |
156 | * lock to be run. | |
157 | */ | |
158 | void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex); | |
159 | ||
f8c6e1cb PB |
160 | |
161 | /** | |
162 | * CoQueues are a mechanism to queue coroutines in order to continue executing | |
163 | * them later. | |
164 | */ | |
165 | typedef struct CoQueue { | |
166 | QSIMPLEQ_HEAD(, Coroutine) entries; | |
167 | } CoQueue; | |
168 | ||
169 | /** | |
170 | * Initialise a CoQueue. This must be called before any other operation is used | |
171 | * on the CoQueue. | |
172 | */ | |
173 | void qemu_co_queue_init(CoQueue *queue); | |
174 | ||
175 | /** | |
176 | * Adds the current coroutine to the CoQueue and transfers control to the | |
177 | * caller of the coroutine. | |
178 | */ | |
179 | void coroutine_fn qemu_co_queue_wait(CoQueue *queue); | |
180 | ||
181 | /** | |
182 | * Restarts the next coroutine in the CoQueue and removes it from the queue. | |
183 | * | |
184 | * Returns true if a coroutine was restarted, false if the queue is empty. | |
185 | */ | |
186 | bool coroutine_fn qemu_co_queue_next(CoQueue *queue); | |
187 | ||
188 | /** | |
189 | * Restarts all coroutines in the CoQueue and leaves the queue empty. | |
190 | */ | |
191 | void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue); | |
192 | ||
193 | /** | |
194 | * Enter the next coroutine in the queue | |
195 | */ | |
196 | bool qemu_co_enter_next(CoQueue *queue); | |
197 | ||
198 | /** | |
199 | * Checks if the CoQueue is empty. | |
200 | */ | |
201 | bool qemu_co_queue_empty(CoQueue *queue); | |
202 | ||
203 | ||
12888904 AK |
204 | typedef struct CoRwlock { |
205 | bool writer; | |
206 | int reader; | |
207 | CoQueue queue; | |
208 | } CoRwlock; | |
209 | ||
210 | /** | |
211 | * Initialises a CoRwlock. This must be called before any other operation | |
212 | * is used on the CoRwlock | |
213 | */ | |
214 | void qemu_co_rwlock_init(CoRwlock *lock); | |
215 | ||
216 | /** | |
217 | * Read locks the CoRwlock. If the lock cannot be taken immediately because | |
218 | * of a parallel writer, control is transferred to the caller of the current | |
219 | * coroutine. | |
220 | */ | |
221 | void qemu_co_rwlock_rdlock(CoRwlock *lock); | |
222 | ||
223 | /** | |
224 | * Write Locks the mutex. If the lock cannot be taken immediately because | |
225 | * of a parallel reader, control is transferred to the caller of the current | |
226 | * coroutine. | |
227 | */ | |
228 | void qemu_co_rwlock_wrlock(CoRwlock *lock); | |
229 | ||
230 | /** | |
231 | * Unlocks the read/write lock and schedules the next coroutine that was | |
232 | * waiting for this lock to be run. | |
233 | */ | |
234 | void qemu_co_rwlock_unlock(CoRwlock *lock); | |
235 | ||
3ab7bd19 MK |
236 | /** |
237 | * Yield the coroutine for a given duration | |
238 | * | |
239 | * Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be | |
87f68d31 | 240 | * resumed when using aio_poll(). |
3ab7bd19 MK |
241 | */ |
242 | void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type, | |
243 | int64_t ns); | |
244 | ||
9f05d0c3 MH |
245 | /** |
246 | * Yield until a file descriptor becomes readable | |
247 | * | |
248 | * Note that this function clobbers the handlers for the file descriptor. | |
249 | */ | |
250 | void coroutine_fn yield_until_fd_readable(int fd); | |
ac2662a9 | 251 | |
00dccaf1 | 252 | #endif /* QEMU_COROUTINE_H */ |