]>
Commit | Line | Data |
---|---|---|
b96e9247 KW |
1 | /* |
2 | * coroutine queues and locks | |
3 | * | |
4 | * Copyright (c) 2011 Kevin Wolf <[email protected]> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
fed20a70 PB |
23 | * |
24 | * The lock-free mutex implementation is based on OSv | |
25 | * (core/lfmutex.cc, include/lockfree/mutex.hh). | |
26 | * Copyright (C) 2013 Cloudius Systems, Ltd. | |
b96e9247 KW |
27 | */ |
28 | ||
aafd7584 | 29 | #include "qemu/osdep.h" |
10817bf0 DB |
30 | #include "qemu/coroutine.h" |
31 | #include "qemu/coroutine_int.h" | |
480cff63 | 32 | #include "qemu/processor.h" |
1de7afc9 | 33 | #include "qemu/queue.h" |
a9d92355 | 34 | #include "block/aio.h" |
b96e9247 KW |
35 | #include "trace.h" |
36 | ||
b96e9247 KW |
37 | void qemu_co_queue_init(CoQueue *queue) |
38 | { | |
7d9c8581 | 39 | QSIMPLEQ_INIT(&queue->entries); |
b96e9247 KW |
40 | } |
41 | ||
0421b563 SH |
42 | void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock, |
43 | CoQueueWaitFlags flags) | |
b96e9247 KW |
44 | { |
45 | Coroutine *self = qemu_coroutine_self(); | |
0421b563 SH |
46 | if (flags & CO_QUEUE_WAIT_FRONT) { |
47 | QSIMPLEQ_INSERT_HEAD(&queue->entries, self, co_queue_next); | |
48 | } else { | |
49 | QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); | |
50 | } | |
1ace7cea | 51 | |
1a957cf9 PB |
52 | if (lock) { |
53 | qemu_lockable_unlock(lock); | |
1ace7cea PB |
54 | } |
55 | ||
56 | /* There is no race condition here. Other threads will call | |
57 | * aio_co_schedule on our AioContext, which can reenter this | |
58 | * coroutine but only after this yield and after the main loop | |
59 | * has gone through the next iteration. | |
60 | */ | |
b96e9247 KW |
61 | qemu_coroutine_yield(); |
62 | assert(qemu_in_coroutine()); | |
1ace7cea PB |
63 | |
64 | /* TODO: OSv implements wait morphing here, where the wakeup | |
65 | * primitive automatically places the woken coroutine on the | |
66 | * mutex's queue. This avoids the thundering herd effect. | |
1a957cf9 PB |
67 | * This could be implemented for CoMutexes, but not really for |
68 | * other cases of QemuLockable. | |
1ace7cea | 69 | */ |
1a957cf9 PB |
70 | if (lock) { |
71 | qemu_lockable_lock(lock); | |
1ace7cea | 72 | } |
02ffb504 SH |
73 | } |
74 | ||
5261dd7b | 75 | bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) |
b681a1c7 BC |
76 | { |
77 | Coroutine *next; | |
78 | ||
7d9c8581 | 79 | next = QSIMPLEQ_FIRST(&queue->entries); |
b681a1c7 BC |
80 | if (!next) { |
81 | return false; | |
82 | } | |
83 | ||
7d9c8581 | 84 | QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); |
5261dd7b PB |
85 | if (lock) { |
86 | qemu_lockable_unlock(lock); | |
87 | } | |
88 | aio_co_wake(next); | |
89 | if (lock) { | |
90 | qemu_lockable_lock(lock); | |
91 | } | |
b681a1c7 BC |
92 | return true; |
93 | } | |
94 | ||
248af9e8 PB |
95 | bool coroutine_fn qemu_co_queue_next(CoQueue *queue) |
96 | { | |
97 | /* No unlock/lock needed in coroutine context. */ | |
98 | return qemu_co_enter_next_impl(queue, NULL); | |
99 | } | |
100 | ||
d6ee15ad PB |
101 | void qemu_co_enter_all_impl(CoQueue *queue, QemuLockable *lock) |
102 | { | |
103 | while (qemu_co_enter_next_impl(queue, lock)) { | |
104 | /* just loop */ | |
105 | } | |
106 | } | |
107 | ||
f0d43b1e PB |
108 | void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue) |
109 | { | |
110 | /* No unlock/lock needed in coroutine context. */ | |
111 | qemu_co_enter_all_impl(queue, NULL); | |
112 | } | |
113 | ||
b96e9247 KW |
114 | bool qemu_co_queue_empty(CoQueue *queue) |
115 | { | |
7d9c8581 | 116 | return QSIMPLEQ_FIRST(&queue->entries) == NULL; |
b96e9247 KW |
117 | } |
118 | ||
fed20a70 PB |
119 | /* The wait records are handled with a multiple-producer, single-consumer |
120 | * lock-free queue. There cannot be two concurrent pop_waiter() calls | |
121 | * because pop_waiter() can only be called while mutex->handoff is zero. | |
122 | * This can happen in three cases: | |
123 | * - in qemu_co_mutex_unlock, before the hand-off protocol has started. | |
124 | * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and | |
125 | * not take part in the handoff. | |
126 | * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from | |
127 | * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail | |
128 | * the cmpxchg (it will see either 0 or the next sequence value) and | |
129 | * exit. The next hand-off cannot begin until qemu_co_mutex_lock has | |
130 | * woken up someone. | |
131 | * - in qemu_co_mutex_unlock, if it takes the hand-off token itself. | |
132 | * In this case another iteration starts with mutex->handoff == 0; | |
133 | * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and | |
134 | * qemu_co_mutex_unlock will go back to case (1). | |
135 | * | |
136 | * The following functions manage this queue. | |
137 | */ | |
138 | typedef struct CoWaitRecord { | |
139 | Coroutine *co; | |
140 | QSLIST_ENTRY(CoWaitRecord) next; | |
141 | } CoWaitRecord; | |
142 | ||
46cd09de | 143 | static void coroutine_fn push_waiter(CoMutex *mutex, CoWaitRecord *w) |
fed20a70 PB |
144 | { |
145 | w->co = qemu_coroutine_self(); | |
146 | QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); | |
147 | } | |
148 | ||
149 | static void move_waiters(CoMutex *mutex) | |
150 | { | |
151 | QSLIST_HEAD(, CoWaitRecord) reversed; | |
152 | QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); | |
153 | while (!QSLIST_EMPTY(&reversed)) { | |
154 | CoWaitRecord *w = QSLIST_FIRST(&reversed); | |
155 | QSLIST_REMOVE_HEAD(&reversed, next); | |
156 | QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); | |
157 | } | |
158 | } | |
159 | ||
160 | static CoWaitRecord *pop_waiter(CoMutex *mutex) | |
161 | { | |
162 | CoWaitRecord *w; | |
163 | ||
164 | if (QSLIST_EMPTY(&mutex->to_pop)) { | |
165 | move_waiters(mutex); | |
166 | if (QSLIST_EMPTY(&mutex->to_pop)) { | |
167 | return NULL; | |
168 | } | |
169 | } | |
170 | w = QSLIST_FIRST(&mutex->to_pop); | |
171 | QSLIST_REMOVE_HEAD(&mutex->to_pop, next); | |
172 | return w; | |
173 | } | |
174 | ||
175 | static bool has_waiters(CoMutex *mutex) | |
176 | { | |
177 | return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); | |
178 | } | |
179 | ||
b96e9247 KW |
180 | void qemu_co_mutex_init(CoMutex *mutex) |
181 | { | |
182 | memset(mutex, 0, sizeof(*mutex)); | |
b96e9247 KW |
183 | } |
184 | ||
480cff63 PB |
185 | static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) |
186 | { | |
187 | /* Read co before co->ctx; pairs with smp_wmb() in | |
188 | * qemu_coroutine_enter(). | |
189 | */ | |
190 | smp_read_barrier_depends(); | |
191 | mutex->ctx = co->ctx; | |
192 | aio_co_wake(co); | |
193 | } | |
194 | ||
195 | static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, | |
196 | CoMutex *mutex) | |
b96e9247 KW |
197 | { |
198 | Coroutine *self = qemu_coroutine_self(); | |
fed20a70 PB |
199 | CoWaitRecord w; |
200 | unsigned old_handoff; | |
b96e9247 KW |
201 | |
202 | trace_qemu_co_mutex_lock_entry(mutex, self); | |
fed20a70 | 203 | push_waiter(mutex, &w); |
b96e9247 | 204 | |
fed20a70 PB |
205 | /* This is the "Responsibility Hand-Off" protocol; a lock() picks from |
206 | * a concurrent unlock() the responsibility of waking somebody up. | |
207 | */ | |
d73415a3 | 208 | old_handoff = qatomic_mb_read(&mutex->handoff); |
fed20a70 PB |
209 | if (old_handoff && |
210 | has_waiters(mutex) && | |
d73415a3 | 211 | qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { |
fed20a70 PB |
212 | /* There can be no concurrent pops, because there can be only |
213 | * one active handoff at a time. | |
214 | */ | |
215 | CoWaitRecord *to_wake = pop_waiter(mutex); | |
216 | Coroutine *co = to_wake->co; | |
217 | if (co == self) { | |
218 | /* We got the lock ourselves! */ | |
219 | assert(to_wake == &w); | |
480cff63 | 220 | mutex->ctx = ctx; |
fed20a70 PB |
221 | return; |
222 | } | |
223 | ||
480cff63 | 224 | qemu_co_mutex_wake(mutex, co); |
b96e9247 KW |
225 | } |
226 | ||
fed20a70 PB |
227 | qemu_coroutine_yield(); |
228 | trace_qemu_co_mutex_lock_return(mutex, self); | |
229 | } | |
230 | ||
231 | void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) | |
232 | { | |
480cff63 | 233 | AioContext *ctx = qemu_get_current_aio_context(); |
fed20a70 | 234 | Coroutine *self = qemu_coroutine_self(); |
480cff63 PB |
235 | int waiters, i; |
236 | ||
237 | /* Running a very small critical section on pthread_mutex_t and CoMutex | |
238 | * shows that pthread_mutex_t is much faster because it doesn't actually | |
239 | * go to sleep. What happens is that the critical section is shorter | |
240 | * than the latency of entering the kernel and thus FUTEX_WAIT always | |
241 | * fails. With CoMutex there is no such latency but you still want to | |
242 | * avoid wait and wakeup. So introduce it artificially. | |
243 | */ | |
244 | i = 0; | |
245 | retry_fast_path: | |
d73415a3 | 246 | waiters = qatomic_cmpxchg(&mutex->locked, 0, 1); |
480cff63 PB |
247 | if (waiters != 0) { |
248 | while (waiters == 1 && ++i < 1000) { | |
d73415a3 | 249 | if (qatomic_read(&mutex->ctx) == ctx) { |
480cff63 PB |
250 | break; |
251 | } | |
d73415a3 | 252 | if (qatomic_read(&mutex->locked) == 0) { |
480cff63 PB |
253 | goto retry_fast_path; |
254 | } | |
255 | cpu_relax(); | |
256 | } | |
d73415a3 | 257 | waiters = qatomic_fetch_inc(&mutex->locked); |
480cff63 | 258 | } |
fed20a70 | 259 | |
480cff63 | 260 | if (waiters == 0) { |
fed20a70 PB |
261 | /* Uncontended. */ |
262 | trace_qemu_co_mutex_lock_uncontended(mutex, self); | |
480cff63 | 263 | mutex->ctx = ctx; |
fed20a70 | 264 | } else { |
480cff63 | 265 | qemu_co_mutex_lock_slowpath(ctx, mutex); |
fed20a70 | 266 | } |
0e438cdc | 267 | mutex->holder = self; |
1b7f01d9 | 268 | self->locks_held++; |
b96e9247 KW |
269 | } |
270 | ||
271 | void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | |
272 | { | |
273 | Coroutine *self = qemu_coroutine_self(); | |
274 | ||
275 | trace_qemu_co_mutex_unlock_entry(mutex, self); | |
276 | ||
fed20a70 | 277 | assert(mutex->locked); |
0e438cdc | 278 | assert(mutex->holder == self); |
b96e9247 KW |
279 | assert(qemu_in_coroutine()); |
280 | ||
480cff63 | 281 | mutex->ctx = NULL; |
0e438cdc | 282 | mutex->holder = NULL; |
1b7f01d9 | 283 | self->locks_held--; |
d73415a3 | 284 | if (qatomic_fetch_dec(&mutex->locked) == 1) { |
fed20a70 PB |
285 | /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */ |
286 | return; | |
287 | } | |
288 | ||
289 | for (;;) { | |
290 | CoWaitRecord *to_wake = pop_waiter(mutex); | |
291 | unsigned our_handoff; | |
292 | ||
293 | if (to_wake) { | |
480cff63 | 294 | qemu_co_mutex_wake(mutex, to_wake->co); |
fed20a70 PB |
295 | break; |
296 | } | |
297 | ||
298 | /* Some concurrent lock() is in progress (we know this because | |
299 | * mutex->locked was >1) but it hasn't yet put itself on the wait | |
300 | * queue. Pick a sequence number for the handoff protocol (not 0). | |
301 | */ | |
302 | if (++mutex->sequence == 0) { | |
303 | mutex->sequence = 1; | |
304 | } | |
305 | ||
306 | our_handoff = mutex->sequence; | |
d73415a3 | 307 | qatomic_mb_set(&mutex->handoff, our_handoff); |
fed20a70 PB |
308 | if (!has_waiters(mutex)) { |
309 | /* The concurrent lock has not added itself yet, so it | |
310 | * will be able to pick our handoff. | |
311 | */ | |
312 | break; | |
313 | } | |
314 | ||
315 | /* Try to do the handoff protocol ourselves; if somebody else has | |
316 | * already taken it, however, we're done and they're responsible. | |
317 | */ | |
d73415a3 | 318 | if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { |
fed20a70 PB |
319 | break; |
320 | } | |
321 | } | |
b96e9247 KW |
322 | |
323 | trace_qemu_co_mutex_unlock_return(mutex, self); | |
324 | } | |
12888904 | 325 | |
050de36b PB |
326 | struct CoRwTicket { |
327 | bool read; | |
328 | Coroutine *co; | |
329 | QSIMPLEQ_ENTRY(CoRwTicket) next; | |
330 | }; | |
331 | ||
12888904 AK |
332 | void qemu_co_rwlock_init(CoRwlock *lock) |
333 | { | |
a7b91d35 | 334 | qemu_co_mutex_init(&lock->mutex); |
050de36b PB |
335 | lock->owners = 0; |
336 | QSIMPLEQ_INIT(&lock->tickets); | |
337 | } | |
338 | ||
339 | /* Releases the internal CoMutex. */ | |
46cd09de | 340 | static void coroutine_fn qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) |
050de36b PB |
341 | { |
342 | CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets); | |
343 | Coroutine *co = NULL; | |
344 | ||
345 | /* | |
346 | * Setting lock->owners here prevents rdlock and wrlock from | |
347 | * sneaking in between unlock and wake. | |
348 | */ | |
349 | ||
350 | if (tkt) { | |
351 | if (tkt->read) { | |
352 | if (lock->owners >= 0) { | |
353 | lock->owners++; | |
354 | co = tkt->co; | |
355 | } | |
356 | } else { | |
357 | if (lock->owners == 0) { | |
358 | lock->owners = -1; | |
359 | co = tkt->co; | |
360 | } | |
361 | } | |
362 | } | |
363 | ||
364 | if (co) { | |
365 | QSIMPLEQ_REMOVE_HEAD(&lock->tickets, next); | |
366 | qemu_co_mutex_unlock(&lock->mutex); | |
367 | aio_co_wake(co); | |
368 | } else { | |
369 | qemu_co_mutex_unlock(&lock->mutex); | |
370 | } | |
12888904 AK |
371 | } |
372 | ||
46cd09de | 373 | void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock) |
12888904 | 374 | { |
1b7f01d9 KW |
375 | Coroutine *self = qemu_coroutine_self(); |
376 | ||
a7b91d35 PB |
377 | qemu_co_mutex_lock(&lock->mutex); |
378 | /* For fairness, wait if a writer is in line. */ | |
050de36b PB |
379 | if (lock->owners == 0 || (lock->owners > 0 && QSIMPLEQ_EMPTY(&lock->tickets))) { |
380 | lock->owners++; | |
381 | qemu_co_mutex_unlock(&lock->mutex); | |
382 | } else { | |
383 | CoRwTicket my_ticket = { true, self }; | |
384 | ||
385 | QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); | |
386 | qemu_co_mutex_unlock(&lock->mutex); | |
387 | qemu_coroutine_yield(); | |
388 | assert(lock->owners >= 1); | |
389 | ||
390 | /* Possibly wake another reader, which will wake the next in line. */ | |
391 | qemu_co_mutex_lock(&lock->mutex); | |
392 | qemu_co_rwlock_maybe_wake_one(lock); | |
12888904 | 393 | } |
a7b91d35 | 394 | |
1b7f01d9 | 395 | self->locks_held++; |
12888904 AK |
396 | } |
397 | ||
46cd09de | 398 | void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock) |
12888904 | 399 | { |
1b7f01d9 KW |
400 | Coroutine *self = qemu_coroutine_self(); |
401 | ||
12888904 | 402 | assert(qemu_in_coroutine()); |
050de36b | 403 | self->locks_held--; |
a7b91d35 | 404 | |
050de36b PB |
405 | qemu_co_mutex_lock(&lock->mutex); |
406 | if (lock->owners > 0) { | |
407 | lock->owners--; | |
408 | } else { | |
409 | assert(lock->owners == -1); | |
410 | lock->owners = 0; | |
12888904 | 411 | } |
050de36b PB |
412 | |
413 | qemu_co_rwlock_maybe_wake_one(lock); | |
12888904 AK |
414 | } |
415 | ||
46cd09de | 416 | void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock) |
667221c1 | 417 | { |
050de36b PB |
418 | qemu_co_mutex_lock(&lock->mutex); |
419 | assert(lock->owners == -1); | |
420 | lock->owners = 1; | |
667221c1 | 421 | |
050de36b PB |
422 | /* Possibly wake another reader, which will wake the next in line. */ |
423 | qemu_co_rwlock_maybe_wake_one(lock); | |
667221c1 PB |
424 | } |
425 | ||
46cd09de | 426 | void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock) |
12888904 | 427 | { |
050de36b PB |
428 | Coroutine *self = qemu_coroutine_self(); |
429 | ||
a7b91d35 | 430 | qemu_co_mutex_lock(&lock->mutex); |
050de36b PB |
431 | if (lock->owners == 0) { |
432 | lock->owners = -1; | |
433 | qemu_co_mutex_unlock(&lock->mutex); | |
434 | } else { | |
435 | CoRwTicket my_ticket = { false, qemu_coroutine_self() }; | |
436 | ||
437 | QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); | |
438 | qemu_co_mutex_unlock(&lock->mutex); | |
439 | qemu_coroutine_yield(); | |
440 | assert(lock->owners == -1); | |
12888904 | 441 | } |
a7b91d35 | 442 | |
050de36b | 443 | self->locks_held++; |
12888904 | 444 | } |
667221c1 | 445 | |
46cd09de | 446 | void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock) |
667221c1 | 447 | { |
667221c1 | 448 | qemu_co_mutex_lock(&lock->mutex); |
050de36b PB |
449 | assert(lock->owners > 0); |
450 | /* For fairness, wait if a writer is in line. */ | |
451 | if (lock->owners == 1 && QSIMPLEQ_EMPTY(&lock->tickets)) { | |
452 | lock->owners = -1; | |
453 | qemu_co_mutex_unlock(&lock->mutex); | |
454 | } else { | |
455 | CoRwTicket my_ticket = { false, qemu_coroutine_self() }; | |
667221c1 | 456 | |
050de36b PB |
457 | lock->owners--; |
458 | QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); | |
459 | qemu_co_rwlock_maybe_wake_one(lock); | |
460 | qemu_coroutine_yield(); | |
461 | assert(lock->owners == -1); | |
462 | } | |
667221c1 | 463 | } |