* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
+#include "qemu/osdep.h"
#include "qemu-common.h"
-#include <stdio.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <errno.h>
#include "qemu/rcu.h"
#include "qemu/atomic.h"
#include "qemu/thread.h"
+#include "qemu/main-loop.h"
/*
* Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
QemuEvent rcu_gp_event;
-static QemuMutex rcu_gp_lock;
+static QemuMutex rcu_registry_lock;
+static QemuMutex rcu_sync_lock;
/*
* Check whether a quiescent state was crossed between the beginning of
*/
__thread struct rcu_reader_data rcu_reader;
-/* Protected by rcu_gp_lock. */
+/* Protected by rcu_registry_lock. */
typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
break;
}
- /* Wait for one thread to report a quiescent state and
- * try again.
+ /* Wait for one thread to report a quiescent state and try again.
+ * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
+ * wait too much time.
+ *
+ * rcu_register_thread() may add nodes to ®istry; it will not
+ * wake up synchronize_rcu, but that is okay because at least another
+ * thread must exit its RCU read-side critical section before
+ * synchronize_rcu is done. The next iteration of the loop will
+ * move the new thread's rcu_reader from ®istry to &qsreaders,
+ * because rcu_gp_ongoing() will return false.
+ *
+ * rcu_unregister_thread() may remove nodes from &qsreaders instead
+ * of ®istry if it runs during qemu_event_wait. That's okay;
+ * the node then will not be added back to ®istry by QLIST_SWAP
+ * below. The invariant is that the node is part of one list when
+ * rcu_registry_lock is released.
*/
+ qemu_mutex_unlock(&rcu_registry_lock);
qemu_event_wait(&rcu_gp_event);
+ qemu_mutex_lock(&rcu_registry_lock);
}
/* put back the reader list in the registry */
void synchronize_rcu(void)
{
- qemu_mutex_lock(&rcu_gp_lock);
+ qemu_mutex_lock(&rcu_sync_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(®istry)) {
/* In either case, the atomic_mb_set below blocks stores that free
wait_for_readers();
}
- qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_registry_lock);
+ qemu_mutex_unlock(&rcu_sync_lock);
}
{
struct rcu_head *node;
+ rcu_register_thread();
+
for (;;) {
int tries = 0;
int n = atomic_read(&rcu_call_count);
* Fetch rcu_call_count now, we only must process elements that were
* added before synchronize_rcu() starts.
*/
- while (n < RCU_CALL_MIN_SIZE && ++tries <= 5) {
- g_usleep(100000);
- qemu_event_reset(&rcu_call_ready_event);
- n = atomic_read(&rcu_call_count);
- if (n < RCU_CALL_MIN_SIZE) {
- qemu_event_wait(&rcu_call_ready_event);
+ while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
+ g_usleep(10000);
+ if (n == 0) {
+ qemu_event_reset(&rcu_call_ready_event);
n = atomic_read(&rcu_call_count);
+ if (n == 0) {
+ qemu_event_wait(&rcu_call_ready_event);
+ }
}
+ n = atomic_read(&rcu_call_count);
}
atomic_sub(&rcu_call_count, n);
synchronize_rcu();
+ qemu_mutex_lock_iothread();
while (n > 0) {
node = try_dequeue();
while (!node) {
+ qemu_mutex_unlock_iothread();
qemu_event_reset(&rcu_call_ready_event);
node = try_dequeue();
if (!node) {
qemu_event_wait(&rcu_call_ready_event);
node = try_dequeue();
}
+ qemu_mutex_lock_iothread();
}
n--;
node->func(node);
}
+ qemu_mutex_unlock_iothread();
}
abort();
}
void rcu_register_thread(void)
{
assert(rcu_reader.ctr == 0);
- qemu_mutex_lock(&rcu_gp_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
QLIST_INSERT_HEAD(®istry, &rcu_reader, node);
- qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_registry_lock);
}
void rcu_unregister_thread(void)
{
- qemu_mutex_lock(&rcu_gp_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
QLIST_REMOVE(&rcu_reader, node);
- qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_registry_lock);
}
-static void __attribute__((__constructor__)) rcu_init(void)
+static void rcu_init_complete(void)
{
QemuThread thread;
- qemu_mutex_init(&rcu_gp_lock);
+ qemu_mutex_init(&rcu_registry_lock);
+ qemu_mutex_init(&rcu_sync_lock);
qemu_event_init(&rcu_gp_event, true);
qemu_event_init(&rcu_call_ready_event, false);
+
+ /* The caller is assumed to have iothread lock, so the call_rcu thread
+ * must have been quiescent even after forking, just recreate it.
+ */
qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
NULL, QEMU_THREAD_DETACHED);
rcu_register_thread();
}
+
+#ifdef CONFIG_POSIX
+static void rcu_init_lock(void)
+{
+ qemu_mutex_lock(&rcu_sync_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
+}
+
+static void rcu_init_unlock(void)
+{
+ qemu_mutex_unlock(&rcu_registry_lock);
+ qemu_mutex_unlock(&rcu_sync_lock);
+}
+#endif
+
+void rcu_after_fork(void)
+{
+ memset(®istry, 0, sizeof(registry));
+ rcu_init_complete();
+}
+
+static void __attribute__((__constructor__)) rcu_init(void)
+{
+#ifdef CONFIG_POSIX
+ pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock);
+#endif
+ rcu_init_complete();
+}