]> Git Repo - qemu.git/blame - util/qemu-thread-posix.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[qemu.git] / util / qemu-thread-posix.c
CommitLineData
e5d355d1
AL
1/*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
aafd7584 13#include "qemu/osdep.h"
1de7afc9 14#include "qemu/thread.h"
c7c4d063 15#include "qemu/atomic.h"
ef57137f 16#include "qemu/notify.h"
31f5a726 17#include "trace.h"
e5d355d1 18
8f480de0
DDAG
19static bool name_threads;
20
21void qemu_thread_naming(bool enable)
22{
23 name_threads = enable;
5c312079
DDAG
24
25#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
26 /* This is a debugging option, not fatal */
27 if (enable) {
28 fprintf(stderr, "qemu: thread naming not supported on this host\n");
29 }
30#endif
8f480de0
DDAG
31}
32
e5d355d1
AL
33static void error_exit(int err, const char *msg)
34{
35 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
53380ac3 36 abort();
e5d355d1
AL
37}
38
39void qemu_mutex_init(QemuMutex *mutex)
40{
41 int err;
42
24fa9049 43 err = pthread_mutex_init(&mutex->lock, NULL);
e5d355d1
AL
44 if (err)
45 error_exit(err, __func__);
c096358e 46 mutex->initialized = true;
e5d355d1
AL
47}
48
313b1d69
CC
49void qemu_mutex_destroy(QemuMutex *mutex)
50{
51 int err;
52
c096358e
FZ
53 assert(mutex->initialized);
54 mutex->initialized = false;
313b1d69
CC
55 err = pthread_mutex_destroy(&mutex->lock);
56 if (err)
57 error_exit(err, __func__);
58}
59
6c27a0de 60void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
61{
62 int err;
63
c096358e 64 assert(mutex->initialized);
6c27a0de
AB
65 trace_qemu_mutex_lock(mutex, file, line);
66
e5d355d1
AL
67 err = pthread_mutex_lock(&mutex->lock);
68 if (err)
69 error_exit(err, __func__);
31f5a726 70
6c27a0de 71 trace_qemu_mutex_locked(mutex, file, line);
e5d355d1
AL
72}
73
6c27a0de 74int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1 75{
31f5a726
JRZ
76 int err;
77
c096358e 78 assert(mutex->initialized);
31f5a726
JRZ
79 err = pthread_mutex_trylock(&mutex->lock);
80 if (err == 0) {
6c27a0de 81 trace_qemu_mutex_locked(mutex, file, line);
31f5a726
JRZ
82 return 0;
83 }
84 if (err != EBUSY) {
85 error_exit(err, __func__);
86 }
87 return -EBUSY;
e5d355d1
AL
88}
89
6c27a0de 90void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
91{
92 int err;
93
c096358e 94 assert(mutex->initialized);
e5d355d1
AL
95 err = pthread_mutex_unlock(&mutex->lock);
96 if (err)
97 error_exit(err, __func__);
6c27a0de
AB
98
99 trace_qemu_mutex_unlock(mutex, file, line);
e5d355d1
AL
100}
101
feadec63
PB
102void qemu_rec_mutex_init(QemuRecMutex *mutex)
103{
104 int err;
105 pthread_mutexattr_t attr;
106
107 pthread_mutexattr_init(&attr);
108 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
109 err = pthread_mutex_init(&mutex->lock, &attr);
110 pthread_mutexattr_destroy(&attr);
111 if (err) {
112 error_exit(err, __func__);
113 }
c096358e 114 mutex->initialized = true;
feadec63
PB
115}
116
e5d355d1
AL
117void qemu_cond_init(QemuCond *cond)
118{
119 int err;
120
121 err = pthread_cond_init(&cond->cond, NULL);
122 if (err)
123 error_exit(err, __func__);
c096358e 124 cond->initialized = true;
e5d355d1
AL
125}
126
313b1d69
CC
127void qemu_cond_destroy(QemuCond *cond)
128{
129 int err;
130
c096358e
FZ
131 assert(cond->initialized);
132 cond->initialized = false;
313b1d69
CC
133 err = pthread_cond_destroy(&cond->cond);
134 if (err)
135 error_exit(err, __func__);
136}
137
e5d355d1
AL
138void qemu_cond_signal(QemuCond *cond)
139{
140 int err;
141
c096358e 142 assert(cond->initialized);
e5d355d1
AL
143 err = pthread_cond_signal(&cond->cond);
144 if (err)
145 error_exit(err, __func__);
146}
147
148void qemu_cond_broadcast(QemuCond *cond)
149{
150 int err;
151
c096358e 152 assert(cond->initialized);
e5d355d1
AL
153 err = pthread_cond_broadcast(&cond->cond);
154 if (err)
155 error_exit(err, __func__);
156}
157
6c27a0de 158void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
159{
160 int err;
161
c096358e 162 assert(cond->initialized);
6c27a0de 163 trace_qemu_mutex_unlock(mutex, file, line);
e5d355d1 164 err = pthread_cond_wait(&cond->cond, &mutex->lock);
6c27a0de 165 trace_qemu_mutex_locked(mutex, file, line);
e5d355d1
AL
166 if (err)
167 error_exit(err, __func__);
168}
169
38b14db3
PB
170void qemu_sem_init(QemuSemaphore *sem, int init)
171{
172 int rc;
173
401bc051 174#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72
PB
175 rc = pthread_mutex_init(&sem->lock, NULL);
176 if (rc != 0) {
177 error_exit(rc, __func__);
178 }
179 rc = pthread_cond_init(&sem->cond, NULL);
180 if (rc != 0) {
181 error_exit(rc, __func__);
182 }
183 if (init < 0) {
184 error_exit(EINVAL, __func__);
185 }
186 sem->count = init;
187#else
38b14db3
PB
188 rc = sem_init(&sem->sem, 0, init);
189 if (rc < 0) {
190 error_exit(errno, __func__);
191 }
c166cb72 192#endif
c096358e 193 sem->initialized = true;
38b14db3
PB
194}
195
196void qemu_sem_destroy(QemuSemaphore *sem)
197{
198 int rc;
199
c096358e
FZ
200 assert(sem->initialized);
201 sem->initialized = false;
401bc051 202#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72
PB
203 rc = pthread_cond_destroy(&sem->cond);
204 if (rc < 0) {
205 error_exit(rc, __func__);
206 }
207 rc = pthread_mutex_destroy(&sem->lock);
208 if (rc < 0) {
209 error_exit(rc, __func__);
210 }
211#else
38b14db3
PB
212 rc = sem_destroy(&sem->sem);
213 if (rc < 0) {
214 error_exit(errno, __func__);
215 }
c166cb72 216#endif
38b14db3
PB
217}
218
219void qemu_sem_post(QemuSemaphore *sem)
220{
221 int rc;
222
c096358e 223 assert(sem->initialized);
401bc051 224#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72 225 pthread_mutex_lock(&sem->lock);
79761c66 226 if (sem->count == UINT_MAX) {
c166cb72 227 rc = EINVAL;
c166cb72 228 } else {
79761c66
IT
229 sem->count++;
230 rc = pthread_cond_signal(&sem->cond);
c166cb72
PB
231 }
232 pthread_mutex_unlock(&sem->lock);
233 if (rc != 0) {
234 error_exit(rc, __func__);
235 }
236#else
38b14db3
PB
237 rc = sem_post(&sem->sem);
238 if (rc < 0) {
239 error_exit(errno, __func__);
240 }
c166cb72
PB
241#endif
242}
243
244static void compute_abs_deadline(struct timespec *ts, int ms)
245{
246 struct timeval tv;
247 gettimeofday(&tv, NULL);
248 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
249 ts->tv_sec = tv.tv_sec + ms / 1000;
250 if (ts->tv_nsec >= 1000000000) {
251 ts->tv_sec++;
252 ts->tv_nsec -= 1000000000;
253 }
38b14db3
PB
254}
255
256int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
257{
258 int rc;
c166cb72
PB
259 struct timespec ts;
260
c096358e 261 assert(sem->initialized);
401bc051 262#ifndef CONFIG_SEM_TIMEDWAIT
79761c66 263 rc = 0;
c166cb72
PB
264 compute_abs_deadline(&ts, ms);
265 pthread_mutex_lock(&sem->lock);
79761c66 266 while (sem->count == 0) {
c166cb72
PB
267 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
268 if (rc == ETIMEDOUT) {
269 break;
270 }
271 if (rc != 0) {
272 error_exit(rc, __func__);
273 }
274 }
79761c66
IT
275 if (rc != ETIMEDOUT) {
276 --sem->count;
277 }
c166cb72
PB
278 pthread_mutex_unlock(&sem->lock);
279 return (rc == ETIMEDOUT ? -1 : 0);
280#else
38b14db3
PB
281 if (ms <= 0) {
282 /* This is cheaper than sem_timedwait. */
283 do {
284 rc = sem_trywait(&sem->sem);
285 } while (rc == -1 && errno == EINTR);
286 if (rc == -1 && errno == EAGAIN) {
287 return -1;
288 }
289 } else {
c166cb72 290 compute_abs_deadline(&ts, ms);
38b14db3
PB
291 do {
292 rc = sem_timedwait(&sem->sem, &ts);
293 } while (rc == -1 && errno == EINTR);
294 if (rc == -1 && errno == ETIMEDOUT) {
295 return -1;
296 }
297 }
298 if (rc < 0) {
299 error_exit(errno, __func__);
300 }
301 return 0;
c166cb72 302#endif
38b14db3
PB
303}
304
305void qemu_sem_wait(QemuSemaphore *sem)
306{
79761c66
IT
307 int rc;
308
c096358e 309 assert(sem->initialized);
401bc051 310#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72 311 pthread_mutex_lock(&sem->lock);
79761c66
IT
312 while (sem->count == 0) {
313 rc = pthread_cond_wait(&sem->cond, &sem->lock);
314 if (rc != 0) {
315 error_exit(rc, __func__);
316 }
c166cb72 317 }
79761c66 318 --sem->count;
c166cb72
PB
319 pthread_mutex_unlock(&sem->lock);
320#else
38b14db3
PB
321 do {
322 rc = sem_wait(&sem->sem);
323 } while (rc == -1 && errno == EINTR);
324 if (rc < 0) {
325 error_exit(errno, __func__);
326 }
c166cb72 327#endif
38b14db3
PB
328}
329
c7c4d063 330#ifdef __linux__
fbcc3e50 331#include "qemu/futex.h"
c7c4d063 332#else
fbcc3e50 333static inline void qemu_futex_wake(QemuEvent *ev, int n)
c7c4d063 334{
c096358e 335 assert(ev->initialized);
158ef8cb 336 pthread_mutex_lock(&ev->lock);
c7c4d063
PB
337 if (n == 1) {
338 pthread_cond_signal(&ev->cond);
339 } else {
340 pthread_cond_broadcast(&ev->cond);
341 }
158ef8cb 342 pthread_mutex_unlock(&ev->lock);
c7c4d063
PB
343}
344
fbcc3e50 345static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
c7c4d063 346{
c096358e 347 assert(ev->initialized);
c7c4d063
PB
348 pthread_mutex_lock(&ev->lock);
349 if (ev->value == val) {
350 pthread_cond_wait(&ev->cond, &ev->lock);
351 }
352 pthread_mutex_unlock(&ev->lock);
353}
354#endif
355
356/* Valid transitions:
357 * - free->set, when setting the event
fbcc3e50 358 * - busy->set, when setting the event, followed by qemu_futex_wake
c7c4d063
PB
359 * - set->free, when resetting the event
360 * - free->busy, when waiting
361 *
362 * set->busy does not happen (it can be observed from the outside but
363 * it really is set->free->busy).
364 *
365 * busy->free provably cannot happen; to enforce it, the set->free transition
366 * is done with an OR, which becomes a no-op if the event has concurrently
367 * transitioned to free or busy.
368 */
369
370#define EV_SET 0
371#define EV_FREE 1
372#define EV_BUSY -1
373
374void qemu_event_init(QemuEvent *ev, bool init)
375{
376#ifndef __linux__
377 pthread_mutex_init(&ev->lock, NULL);
378 pthread_cond_init(&ev->cond, NULL);
379#endif
380
381 ev->value = (init ? EV_SET : EV_FREE);
c096358e 382 ev->initialized = true;
c7c4d063
PB
383}
384
385void qemu_event_destroy(QemuEvent *ev)
386{
c096358e
FZ
387 assert(ev->initialized);
388 ev->initialized = false;
c7c4d063
PB
389#ifndef __linux__
390 pthread_mutex_destroy(&ev->lock);
391 pthread_cond_destroy(&ev->cond);
392#endif
393}
394
395void qemu_event_set(QemuEvent *ev)
396{
374293ca
PB
397 /* qemu_event_set has release semantics, but because it *loads*
398 * ev->value we need a full memory barrier here.
399 */
c096358e 400 assert(ev->initialized);
374293ca
PB
401 smp_mb();
402 if (atomic_read(&ev->value) != EV_SET) {
c7c4d063
PB
403 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
404 /* There were waiters, wake them up. */
fbcc3e50 405 qemu_futex_wake(ev, INT_MAX);
c7c4d063
PB
406 }
407 }
408}
409
410void qemu_event_reset(QemuEvent *ev)
411{
374293ca
PB
412 unsigned value;
413
c096358e 414 assert(ev->initialized);
374293ca
PB
415 value = atomic_read(&ev->value);
416 smp_mb_acquire();
417 if (value == EV_SET) {
c7c4d063
PB
418 /*
419 * If there was a concurrent reset (or even reset+wait),
420 * do nothing. Otherwise change EV_SET->EV_FREE.
421 */
422 atomic_or(&ev->value, EV_FREE);
423 }
424}
425
426void qemu_event_wait(QemuEvent *ev)
427{
428 unsigned value;
429
c096358e 430 assert(ev->initialized);
374293ca
PB
431 value = atomic_read(&ev->value);
432 smp_mb_acquire();
c7c4d063
PB
433 if (value != EV_SET) {
434 if (value == EV_FREE) {
435 /*
436 * Leave the event reset and tell qemu_event_set that there
437 * are waiters. No need to retry, because there cannot be
67cc32eb 438 * a concurrent busy->free transition. After the CAS, the
c7c4d063
PB
439 * event will be either set or busy.
440 */
441 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
442 return;
443 }
444 }
fbcc3e50 445 qemu_futex_wait(ev, EV_BUSY);
c7c4d063
PB
446 }
447}
448
ef57137f
PB
449static pthread_key_t exit_key;
450
451union NotifierThreadData {
452 void *ptr;
453 NotifierList list;
454};
455QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
456
457void qemu_thread_atexit_add(Notifier *notifier)
458{
459 union NotifierThreadData ntd;
460 ntd.ptr = pthread_getspecific(exit_key);
461 notifier_list_add(&ntd.list, notifier);
462 pthread_setspecific(exit_key, ntd.ptr);
463}
464
465void qemu_thread_atexit_remove(Notifier *notifier)
466{
467 union NotifierThreadData ntd;
468 ntd.ptr = pthread_getspecific(exit_key);
469 notifier_remove(notifier);
470 pthread_setspecific(exit_key, ntd.ptr);
471}
472
473static void qemu_thread_atexit_run(void *arg)
474{
475 union NotifierThreadData ntd = { .ptr = arg };
476 notifier_list_notify(&ntd.list, NULL);
477}
478
479static void __attribute__((constructor)) qemu_thread_atexit_init(void)
480{
481 pthread_key_create(&exit_key, qemu_thread_atexit_run);
482}
483
484
5c312079 485#ifdef CONFIG_PTHREAD_SETNAME_NP
68a93982 486typedef struct {
487 void *(*start_routine)(void *);
488 void *arg;
489 char *name;
490} QemuThreadArgs;
491
492static void *qemu_thread_start(void *args)
493{
494 QemuThreadArgs *qemu_thread_args = args;
495 void *(*start_routine)(void *) = qemu_thread_args->start_routine;
496 void *arg = qemu_thread_args->arg;
497
498 /* Attempt to set the threads name; note that this is for debug, so
499 * we're not going to fail if we can't set it.
500 */
501 pthread_setname_np(pthread_self(), qemu_thread_args->name);
502 g_free(qemu_thread_args->name);
503 g_free(qemu_thread_args);
504 return start_routine(arg);
5c312079 505}
68a93982 506#endif
507
5c312079 508
4900116e 509void qemu_thread_create(QemuThread *thread, const char *name,
e5d355d1 510 void *(*start_routine)(void*),
cf218714 511 void *arg, int mode)
e5d355d1 512{
cf218714 513 sigset_t set, oldset;
e5d355d1 514 int err;
8763046b 515 pthread_attr_t attr;
e5d355d1 516
8763046b
JK
517 err = pthread_attr_init(&attr);
518 if (err) {
519 error_exit(err, __func__);
520 }
55541c8a 521
68a93982 522 if (mode == QEMU_THREAD_DETACHED) {
523 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
524 }
525
cf218714 526 /* Leave signal handling to the iothread. */
55541c8a
PB
527 sigfillset(&set);
528 pthread_sigmask(SIG_SETMASK, &set, &oldset);
55541c8a 529
68a93982 530#ifdef CONFIG_PTHREAD_SETNAME_NP
4900116e 531 if (name_threads) {
68a93982 532 QemuThreadArgs *qemu_thread_args;
533 qemu_thread_args = g_new0(QemuThreadArgs, 1);
534 qemu_thread_args->name = g_strdup(name);
535 qemu_thread_args->start_routine = start_routine;
536 qemu_thread_args->arg = arg;
537
538 err = pthread_create(&thread->thread, &attr,
539 qemu_thread_start, qemu_thread_args);
540 } else
541#endif
542 {
543 err = pthread_create(&thread->thread, &attr,
544 start_routine, arg);
4900116e 545 }
4900116e 546
68a93982 547 if (err)
548 error_exit(err, __func__);
549
55541c8a 550 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
8763046b
JK
551
552 pthread_attr_destroy(&attr);
e5d355d1
AL
553}
554
b7680cb6 555void qemu_thread_get_self(QemuThread *thread)
e5d355d1
AL
556{
557 thread->thread = pthread_self();
558}
559
2d797b65 560bool qemu_thread_is_self(QemuThread *thread)
e5d355d1 561{
b7680cb6 562 return pthread_equal(pthread_self(), thread->thread);
e5d355d1
AL
563}
564
313b1d69
CC
565void qemu_thread_exit(void *retval)
566{
567 pthread_exit(retval);
568}
8763046b
JK
569
570void *qemu_thread_join(QemuThread *thread)
571{
572 int err;
573 void *ret;
574
575 err = pthread_join(thread->thread, &ret);
576 if (err) {
577 error_exit(err, __func__);
578 }
579 return ret;
580}
This page took 0.727808 seconds and 4 git commands to generate.