]>
Commit | Line | Data |
---|---|---|
e5d355d1 AL |
1 | /* |
2 | * Wrappers around mutex/cond/thread functions | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2009 | |
5 | * | |
6 | * Author: | |
7 | * Marcelo Tosatti <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
aafd7584 | 13 | #include "qemu/osdep.h" |
c7c4d063 PB |
14 | #ifdef __linux__ |
15 | #include <sys/syscall.h> | |
16 | #include <linux/futex.h> | |
17 | #endif | |
1de7afc9 | 18 | #include "qemu/thread.h" |
c7c4d063 | 19 | #include "qemu/atomic.h" |
ef57137f | 20 | #include "qemu/notify.h" |
e5d355d1 | 21 | |
8f480de0 DDAG |
22 | static bool name_threads; |
23 | ||
24 | void qemu_thread_naming(bool enable) | |
25 | { | |
26 | name_threads = enable; | |
5c312079 DDAG |
27 | |
28 | #ifndef CONFIG_THREAD_SETNAME_BYTHREAD | |
29 | /* This is a debugging option, not fatal */ | |
30 | if (enable) { | |
31 | fprintf(stderr, "qemu: thread naming not supported on this host\n"); | |
32 | } | |
33 | #endif | |
8f480de0 DDAG |
34 | } |
35 | ||
e5d355d1 AL |
36 | static void error_exit(int err, const char *msg) |
37 | { | |
38 | fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err)); | |
53380ac3 | 39 | abort(); |
e5d355d1 AL |
40 | } |
41 | ||
42 | void qemu_mutex_init(QemuMutex *mutex) | |
43 | { | |
44 | int err; | |
45 | ||
24fa9049 | 46 | err = pthread_mutex_init(&mutex->lock, NULL); |
e5d355d1 AL |
47 | if (err) |
48 | error_exit(err, __func__); | |
49 | } | |
50 | ||
313b1d69 CC |
51 | void qemu_mutex_destroy(QemuMutex *mutex) |
52 | { | |
53 | int err; | |
54 | ||
55 | err = pthread_mutex_destroy(&mutex->lock); | |
56 | if (err) | |
57 | error_exit(err, __func__); | |
58 | } | |
59 | ||
e5d355d1 AL |
60 | void qemu_mutex_lock(QemuMutex *mutex) |
61 | { | |
62 | int err; | |
63 | ||
64 | err = pthread_mutex_lock(&mutex->lock); | |
65 | if (err) | |
66 | error_exit(err, __func__); | |
67 | } | |
68 | ||
69 | int qemu_mutex_trylock(QemuMutex *mutex) | |
70 | { | |
71 | return pthread_mutex_trylock(&mutex->lock); | |
72 | } | |
73 | ||
e5d355d1 AL |
74 | void qemu_mutex_unlock(QemuMutex *mutex) |
75 | { | |
76 | int err; | |
77 | ||
78 | err = pthread_mutex_unlock(&mutex->lock); | |
79 | if (err) | |
80 | error_exit(err, __func__); | |
81 | } | |
82 | ||
83 | void qemu_cond_init(QemuCond *cond) | |
84 | { | |
85 | int err; | |
86 | ||
87 | err = pthread_cond_init(&cond->cond, NULL); | |
88 | if (err) | |
89 | error_exit(err, __func__); | |
90 | } | |
91 | ||
313b1d69 CC |
92 | void qemu_cond_destroy(QemuCond *cond) |
93 | { | |
94 | int err; | |
95 | ||
96 | err = pthread_cond_destroy(&cond->cond); | |
97 | if (err) | |
98 | error_exit(err, __func__); | |
99 | } | |
100 | ||
e5d355d1 AL |
101 | void qemu_cond_signal(QemuCond *cond) |
102 | { | |
103 | int err; | |
104 | ||
105 | err = pthread_cond_signal(&cond->cond); | |
106 | if (err) | |
107 | error_exit(err, __func__); | |
108 | } | |
109 | ||
110 | void qemu_cond_broadcast(QemuCond *cond) | |
111 | { | |
112 | int err; | |
113 | ||
114 | err = pthread_cond_broadcast(&cond->cond); | |
115 | if (err) | |
116 | error_exit(err, __func__); | |
117 | } | |
118 | ||
119 | void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) | |
120 | { | |
121 | int err; | |
122 | ||
123 | err = pthread_cond_wait(&cond->cond, &mutex->lock); | |
124 | if (err) | |
125 | error_exit(err, __func__); | |
126 | } | |
127 | ||
38b14db3 PB |
128 | void qemu_sem_init(QemuSemaphore *sem, int init) |
129 | { | |
130 | int rc; | |
131 | ||
927fa909 | 132 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 PB |
133 | rc = pthread_mutex_init(&sem->lock, NULL); |
134 | if (rc != 0) { | |
135 | error_exit(rc, __func__); | |
136 | } | |
137 | rc = pthread_cond_init(&sem->cond, NULL); | |
138 | if (rc != 0) { | |
139 | error_exit(rc, __func__); | |
140 | } | |
141 | if (init < 0) { | |
142 | error_exit(EINVAL, __func__); | |
143 | } | |
144 | sem->count = init; | |
145 | #else | |
38b14db3 PB |
146 | rc = sem_init(&sem->sem, 0, init); |
147 | if (rc < 0) { | |
148 | error_exit(errno, __func__); | |
149 | } | |
c166cb72 | 150 | #endif |
38b14db3 PB |
151 | } |
152 | ||
153 | void qemu_sem_destroy(QemuSemaphore *sem) | |
154 | { | |
155 | int rc; | |
156 | ||
927fa909 | 157 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 PB |
158 | rc = pthread_cond_destroy(&sem->cond); |
159 | if (rc < 0) { | |
160 | error_exit(rc, __func__); | |
161 | } | |
162 | rc = pthread_mutex_destroy(&sem->lock); | |
163 | if (rc < 0) { | |
164 | error_exit(rc, __func__); | |
165 | } | |
166 | #else | |
38b14db3 PB |
167 | rc = sem_destroy(&sem->sem); |
168 | if (rc < 0) { | |
169 | error_exit(errno, __func__); | |
170 | } | |
c166cb72 | 171 | #endif |
38b14db3 PB |
172 | } |
173 | ||
174 | void qemu_sem_post(QemuSemaphore *sem) | |
175 | { | |
176 | int rc; | |
177 | ||
927fa909 | 178 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 | 179 | pthread_mutex_lock(&sem->lock); |
79761c66 | 180 | if (sem->count == UINT_MAX) { |
c166cb72 | 181 | rc = EINVAL; |
c166cb72 | 182 | } else { |
79761c66 IT |
183 | sem->count++; |
184 | rc = pthread_cond_signal(&sem->cond); | |
c166cb72 PB |
185 | } |
186 | pthread_mutex_unlock(&sem->lock); | |
187 | if (rc != 0) { | |
188 | error_exit(rc, __func__); | |
189 | } | |
190 | #else | |
38b14db3 PB |
191 | rc = sem_post(&sem->sem); |
192 | if (rc < 0) { | |
193 | error_exit(errno, __func__); | |
194 | } | |
c166cb72 PB |
195 | #endif |
196 | } | |
197 | ||
198 | static void compute_abs_deadline(struct timespec *ts, int ms) | |
199 | { | |
200 | struct timeval tv; | |
201 | gettimeofday(&tv, NULL); | |
202 | ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000; | |
203 | ts->tv_sec = tv.tv_sec + ms / 1000; | |
204 | if (ts->tv_nsec >= 1000000000) { | |
205 | ts->tv_sec++; | |
206 | ts->tv_nsec -= 1000000000; | |
207 | } | |
38b14db3 PB |
208 | } |
209 | ||
210 | int qemu_sem_timedwait(QemuSemaphore *sem, int ms) | |
211 | { | |
212 | int rc; | |
c166cb72 PB |
213 | struct timespec ts; |
214 | ||
927fa909 | 215 | #if defined(__APPLE__) || defined(__NetBSD__) |
79761c66 | 216 | rc = 0; |
c166cb72 PB |
217 | compute_abs_deadline(&ts, ms); |
218 | pthread_mutex_lock(&sem->lock); | |
79761c66 | 219 | while (sem->count == 0) { |
c166cb72 PB |
220 | rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts); |
221 | if (rc == ETIMEDOUT) { | |
222 | break; | |
223 | } | |
224 | if (rc != 0) { | |
225 | error_exit(rc, __func__); | |
226 | } | |
227 | } | |
79761c66 IT |
228 | if (rc != ETIMEDOUT) { |
229 | --sem->count; | |
230 | } | |
c166cb72 PB |
231 | pthread_mutex_unlock(&sem->lock); |
232 | return (rc == ETIMEDOUT ? -1 : 0); | |
233 | #else | |
38b14db3 PB |
234 | if (ms <= 0) { |
235 | /* This is cheaper than sem_timedwait. */ | |
236 | do { | |
237 | rc = sem_trywait(&sem->sem); | |
238 | } while (rc == -1 && errno == EINTR); | |
239 | if (rc == -1 && errno == EAGAIN) { | |
240 | return -1; | |
241 | } | |
242 | } else { | |
c166cb72 | 243 | compute_abs_deadline(&ts, ms); |
38b14db3 PB |
244 | do { |
245 | rc = sem_timedwait(&sem->sem, &ts); | |
246 | } while (rc == -1 && errno == EINTR); | |
247 | if (rc == -1 && errno == ETIMEDOUT) { | |
248 | return -1; | |
249 | } | |
250 | } | |
251 | if (rc < 0) { | |
252 | error_exit(errno, __func__); | |
253 | } | |
254 | return 0; | |
c166cb72 | 255 | #endif |
38b14db3 PB |
256 | } |
257 | ||
258 | void qemu_sem_wait(QemuSemaphore *sem) | |
259 | { | |
79761c66 IT |
260 | int rc; |
261 | ||
927fa909 | 262 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 | 263 | pthread_mutex_lock(&sem->lock); |
79761c66 IT |
264 | while (sem->count == 0) { |
265 | rc = pthread_cond_wait(&sem->cond, &sem->lock); | |
266 | if (rc != 0) { | |
267 | error_exit(rc, __func__); | |
268 | } | |
c166cb72 | 269 | } |
79761c66 | 270 | --sem->count; |
c166cb72 PB |
271 | pthread_mutex_unlock(&sem->lock); |
272 | #else | |
38b14db3 PB |
273 | do { |
274 | rc = sem_wait(&sem->sem); | |
275 | } while (rc == -1 && errno == EINTR); | |
276 | if (rc < 0) { | |
277 | error_exit(errno, __func__); | |
278 | } | |
c166cb72 | 279 | #endif |
38b14db3 PB |
280 | } |
281 | ||
c7c4d063 PB |
282 | #ifdef __linux__ |
283 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) | |
284 | ||
285 | static inline void futex_wake(QemuEvent *ev, int n) | |
286 | { | |
287 | futex(ev, FUTEX_WAKE, n, NULL, NULL, 0); | |
288 | } | |
289 | ||
290 | static inline void futex_wait(QemuEvent *ev, unsigned val) | |
291 | { | |
16ef9d02 EC |
292 | while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) { |
293 | switch (errno) { | |
294 | case EWOULDBLOCK: | |
295 | return; | |
296 | case EINTR: | |
297 | break; /* get out of switch and retry */ | |
298 | default: | |
299 | abort(); | |
300 | } | |
301 | } | |
c7c4d063 PB |
302 | } |
303 | #else | |
304 | static inline void futex_wake(QemuEvent *ev, int n) | |
305 | { | |
158ef8cb | 306 | pthread_mutex_lock(&ev->lock); |
c7c4d063 PB |
307 | if (n == 1) { |
308 | pthread_cond_signal(&ev->cond); | |
309 | } else { | |
310 | pthread_cond_broadcast(&ev->cond); | |
311 | } | |
158ef8cb | 312 | pthread_mutex_unlock(&ev->lock); |
c7c4d063 PB |
313 | } |
314 | ||
315 | static inline void futex_wait(QemuEvent *ev, unsigned val) | |
316 | { | |
317 | pthread_mutex_lock(&ev->lock); | |
318 | if (ev->value == val) { | |
319 | pthread_cond_wait(&ev->cond, &ev->lock); | |
320 | } | |
321 | pthread_mutex_unlock(&ev->lock); | |
322 | } | |
323 | #endif | |
324 | ||
325 | /* Valid transitions: | |
326 | * - free->set, when setting the event | |
327 | * - busy->set, when setting the event, followed by futex_wake | |
328 | * - set->free, when resetting the event | |
329 | * - free->busy, when waiting | |
330 | * | |
331 | * set->busy does not happen (it can be observed from the outside but | |
332 | * it really is set->free->busy). | |
333 | * | |
334 | * busy->free provably cannot happen; to enforce it, the set->free transition | |
335 | * is done with an OR, which becomes a no-op if the event has concurrently | |
336 | * transitioned to free or busy. | |
337 | */ | |
338 | ||
339 | #define EV_SET 0 | |
340 | #define EV_FREE 1 | |
341 | #define EV_BUSY -1 | |
342 | ||
343 | void qemu_event_init(QemuEvent *ev, bool init) | |
344 | { | |
345 | #ifndef __linux__ | |
346 | pthread_mutex_init(&ev->lock, NULL); | |
347 | pthread_cond_init(&ev->cond, NULL); | |
348 | #endif | |
349 | ||
350 | ev->value = (init ? EV_SET : EV_FREE); | |
351 | } | |
352 | ||
353 | void qemu_event_destroy(QemuEvent *ev) | |
354 | { | |
355 | #ifndef __linux__ | |
356 | pthread_mutex_destroy(&ev->lock); | |
357 | pthread_cond_destroy(&ev->cond); | |
358 | #endif | |
359 | } | |
360 | ||
361 | void qemu_event_set(QemuEvent *ev) | |
362 | { | |
363 | if (atomic_mb_read(&ev->value) != EV_SET) { | |
364 | if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { | |
365 | /* There were waiters, wake them up. */ | |
366 | futex_wake(ev, INT_MAX); | |
367 | } | |
368 | } | |
369 | } | |
370 | ||
371 | void qemu_event_reset(QemuEvent *ev) | |
372 | { | |
373 | if (atomic_mb_read(&ev->value) == EV_SET) { | |
374 | /* | |
375 | * If there was a concurrent reset (or even reset+wait), | |
376 | * do nothing. Otherwise change EV_SET->EV_FREE. | |
377 | */ | |
378 | atomic_or(&ev->value, EV_FREE); | |
379 | } | |
380 | } | |
381 | ||
382 | void qemu_event_wait(QemuEvent *ev) | |
383 | { | |
384 | unsigned value; | |
385 | ||
386 | value = atomic_mb_read(&ev->value); | |
387 | if (value != EV_SET) { | |
388 | if (value == EV_FREE) { | |
389 | /* | |
390 | * Leave the event reset and tell qemu_event_set that there | |
391 | * are waiters. No need to retry, because there cannot be | |
67cc32eb | 392 | * a concurrent busy->free transition. After the CAS, the |
c7c4d063 PB |
393 | * event will be either set or busy. |
394 | */ | |
395 | if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { | |
396 | return; | |
397 | } | |
398 | } | |
399 | futex_wait(ev, EV_BUSY); | |
400 | } | |
401 | } | |
402 | ||
ef57137f PB |
403 | static pthread_key_t exit_key; |
404 | ||
405 | union NotifierThreadData { | |
406 | void *ptr; | |
407 | NotifierList list; | |
408 | }; | |
409 | QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *)); | |
410 | ||
411 | void qemu_thread_atexit_add(Notifier *notifier) | |
412 | { | |
413 | union NotifierThreadData ntd; | |
414 | ntd.ptr = pthread_getspecific(exit_key); | |
415 | notifier_list_add(&ntd.list, notifier); | |
416 | pthread_setspecific(exit_key, ntd.ptr); | |
417 | } | |
418 | ||
419 | void qemu_thread_atexit_remove(Notifier *notifier) | |
420 | { | |
421 | union NotifierThreadData ntd; | |
422 | ntd.ptr = pthread_getspecific(exit_key); | |
423 | notifier_remove(notifier); | |
424 | pthread_setspecific(exit_key, ntd.ptr); | |
425 | } | |
426 | ||
427 | static void qemu_thread_atexit_run(void *arg) | |
428 | { | |
429 | union NotifierThreadData ntd = { .ptr = arg }; | |
430 | notifier_list_notify(&ntd.list, NULL); | |
431 | } | |
432 | ||
433 | static void __attribute__((constructor)) qemu_thread_atexit_init(void) | |
434 | { | |
435 | pthread_key_create(&exit_key, qemu_thread_atexit_run); | |
436 | } | |
437 | ||
438 | ||
5c312079 DDAG |
439 | /* Attempt to set the threads name; note that this is for debug, so |
440 | * we're not going to fail if we can't set it. | |
441 | */ | |
442 | static void qemu_thread_set_name(QemuThread *thread, const char *name) | |
443 | { | |
444 | #ifdef CONFIG_PTHREAD_SETNAME_NP | |
445 | pthread_setname_np(thread->thread, name); | |
446 | #endif | |
447 | } | |
448 | ||
4900116e | 449 | void qemu_thread_create(QemuThread *thread, const char *name, |
e5d355d1 | 450 | void *(*start_routine)(void*), |
cf218714 | 451 | void *arg, int mode) |
e5d355d1 | 452 | { |
cf218714 | 453 | sigset_t set, oldset; |
e5d355d1 | 454 | int err; |
8763046b | 455 | pthread_attr_t attr; |
e5d355d1 | 456 | |
8763046b JK |
457 | err = pthread_attr_init(&attr); |
458 | if (err) { | |
459 | error_exit(err, __func__); | |
460 | } | |
461 | if (mode == QEMU_THREAD_DETACHED) { | |
462 | err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); | |
463 | if (err) { | |
464 | error_exit(err, __func__); | |
465 | } | |
466 | } | |
55541c8a | 467 | |
cf218714 | 468 | /* Leave signal handling to the iothread. */ |
55541c8a PB |
469 | sigfillset(&set); |
470 | pthread_sigmask(SIG_SETMASK, &set, &oldset); | |
8763046b | 471 | err = pthread_create(&thread->thread, &attr, start_routine, arg); |
e5d355d1 AL |
472 | if (err) |
473 | error_exit(err, __func__); | |
55541c8a | 474 | |
4900116e | 475 | if (name_threads) { |
5c312079 | 476 | qemu_thread_set_name(thread, name); |
4900116e | 477 | } |
4900116e | 478 | |
55541c8a | 479 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
8763046b JK |
480 | |
481 | pthread_attr_destroy(&attr); | |
e5d355d1 AL |
482 | } |
483 | ||
b7680cb6 | 484 | void qemu_thread_get_self(QemuThread *thread) |
e5d355d1 AL |
485 | { |
486 | thread->thread = pthread_self(); | |
487 | } | |
488 | ||
2d797b65 | 489 | bool qemu_thread_is_self(QemuThread *thread) |
e5d355d1 | 490 | { |
b7680cb6 | 491 | return pthread_equal(pthread_self(), thread->thread); |
e5d355d1 AL |
492 | } |
493 | ||
313b1d69 CC |
494 | void qemu_thread_exit(void *retval) |
495 | { | |
496 | pthread_exit(retval); | |
497 | } | |
8763046b JK |
498 | |
499 | void *qemu_thread_join(QemuThread *thread) | |
500 | { | |
501 | int err; | |
502 | void *ret; | |
503 | ||
504 | err = pthread_join(thread->thread, &ret); | |
505 | if (err) { | |
506 | error_exit(err, __func__); | |
507 | } | |
508 | return ret; | |
509 | } |