#include "trace.h"
#include "qemu-common.h"
#include "qemu/thread.h"
+#include "qemu/atomic.h"
#include "block/coroutine.h"
#include "block/coroutine_int.h"
enum {
- /* Maximum free pool size prevents holding too many freed coroutines */
- POOL_MAX_SIZE = 64,
+ POOL_BATCH_SIZE = 64,
};
/** Free list to speed up creation */
-static QemuMutex pool_lock;
-static QSLIST_HEAD(, Coroutine) pool = QSLIST_HEAD_INITIALIZER(pool);
-static unsigned int pool_size;
+static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool);
+static unsigned int release_pool_size;
+static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool);
+static __thread unsigned int alloc_pool_size;
+static __thread Notifier coroutine_pool_cleanup_notifier;
-Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
+static void coroutine_pool_cleanup(Notifier *n, void *value)
{
Coroutine *co;
+ Coroutine *tmp;
- qemu_mutex_lock(&pool_lock);
- co = QSLIST_FIRST(&pool);
- if (co) {
- QSLIST_REMOVE_HEAD(&pool, pool_next);
- pool_size--;
+ QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) {
+ QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
+ qemu_coroutine_delete(co);
+ }
+}
+
+Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
+{
+ Coroutine *co = NULL;
+
+ if (CONFIG_COROUTINE_POOL) {
+ co = QSLIST_FIRST(&alloc_pool);
+ if (!co) {
+ if (release_pool_size > POOL_BATCH_SIZE) {
+ /* Slow path; a good place to register the destructor, too. */
+ if (!coroutine_pool_cleanup_notifier.notify) {
+ coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup;
+ qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier);
+ }
+
+ /* This is not exact; there could be a little skew between
+ * release_pool_size and the actual size of release_pool. But
+ * it is just a heuristic, it does not need to be perfect.
+ */
+ alloc_pool_size = atomic_xchg(&release_pool_size, 0);
+ QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool);
+ co = QSLIST_FIRST(&alloc_pool);
+ }
+ }
+ if (co) {
+ QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
+ alloc_pool_size--;
+ }
}
- qemu_mutex_unlock(&pool_lock);
if (!co) {
co = qemu_coroutine_new();
static void coroutine_delete(Coroutine *co)
{
- qemu_mutex_lock(&pool_lock);
- if (pool_size < POOL_MAX_SIZE) {
- QSLIST_INSERT_HEAD(&pool, co, pool_next);
- co->caller = NULL;
- pool_size++;
- qemu_mutex_unlock(&pool_lock);
- return;
+ co->caller = NULL;
+
+ if (CONFIG_COROUTINE_POOL) {
+ if (release_pool_size < POOL_BATCH_SIZE * 2) {
+ QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
+ atomic_inc(&release_pool_size);
+ return;
+ }
+ if (alloc_pool_size < POOL_BATCH_SIZE) {
+ QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next);
+ alloc_pool_size++;
+ return;
+ }
}
- qemu_mutex_unlock(&pool_lock);
qemu_coroutine_delete(co);
}
-static void __attribute__((constructor)) coroutine_pool_init(void)
+void qemu_coroutine_enter(Coroutine *co, void *opaque)
{
- qemu_mutex_init(&pool_lock);
-}
+ Coroutine *self = qemu_coroutine_self();
+ CoroutineAction ret;
-static void __attribute__((destructor)) coroutine_pool_cleanup(void)
-{
- Coroutine *co;
- Coroutine *tmp;
+ trace_qemu_coroutine_enter(self, co, opaque);
- QSLIST_FOREACH_SAFE(co, &pool, pool_next, tmp) {
- QSLIST_REMOVE_HEAD(&pool, pool_next);
- qemu_coroutine_delete(co);
+ if (co->caller) {
+ fprintf(stderr, "Co-routine re-entered recursively\n");
+ abort();
}
- qemu_mutex_destroy(&pool_lock);
-}
-
-static void coroutine_swap(Coroutine *from, Coroutine *to)
-{
- CoroutineAction ret;
-
- ret = qemu_coroutine_switch(from, to, COROUTINE_YIELD);
+ co->caller = self;
+ co->entry_arg = opaque;
+ ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
- qemu_co_queue_run_restart(to);
+ qemu_co_queue_run_restart(co);
switch (ret) {
case COROUTINE_YIELD:
return;
case COROUTINE_TERMINATE:
- trace_qemu_coroutine_terminate(to);
- coroutine_delete(to);
+ trace_qemu_coroutine_terminate(co);
+ coroutine_delete(co);
return;
default:
abort();
}
}
-void qemu_coroutine_enter(Coroutine *co, void *opaque)
-{
- Coroutine *self = qemu_coroutine_self();
-
- trace_qemu_coroutine_enter(self, co, opaque);
-
- if (co->caller) {
- fprintf(stderr, "Co-routine re-entered recursively\n");
- abort();
- }
-
- co->caller = self;
- co->entry_arg = opaque;
- coroutine_swap(self, co);
-}
-
void coroutine_fn qemu_coroutine_yield(void)
{
Coroutine *self = qemu_coroutine_self();
}
self->caller = NULL;
- coroutine_swap(self, to);
+ qemu_coroutine_switch(self, to, COROUTINE_YIELD);
}