* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
+#include "qemu/osdep.h"
#include "qemu-common.h"
-#include <stdio.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <errno.h>
#include "qemu/rcu.h"
#include "qemu/atomic.h"
#include "qemu/thread.h"
unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
QemuEvent rcu_gp_event;
-static QemuMutex rcu_gp_lock;
+static QemuMutex rcu_registry_lock;
+static QemuMutex rcu_sync_lock;
/*
* Check whether a quiescent state was crossed between the beginning of
*/
__thread struct rcu_reader_data rcu_reader;
-/* Protected by rcu_gp_lock. */
+/* Protected by rcu_registry_lock. */
typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
/* Instead of using atomic_mb_set for index->waiting, and
* atomic_mb_read for index->ctr, memory barriers are placed
* manually since writes to different threads are independent.
- * atomic_mb_set has a smp_wmb before...
+ * qemu_event_reset has acquire semantics, so no memory barrier
+ * is needed here.
*/
- smp_wmb();
QLIST_FOREACH(index, ®istry, node) {
atomic_set(&index->waiting, true);
}
- /* ... and a smp_mb after. */
+ /* Here, order the stores to index->waiting before the
+ * loads of index->ctr.
+ */
smp_mb();
QLIST_FOREACH_SAFE(index, ®istry, node, tmp) {
}
}
- /* atomic_mb_read has smp_rmb after. */
- smp_rmb();
-
if (QLIST_EMPTY(®istry)) {
break;
}
- /* Wait for one thread to report a quiescent state and
- * try again.
+ /* Wait for one thread to report a quiescent state and try again.
+ * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
+ * wait too much time.
+ *
+ * rcu_register_thread() may add nodes to ®istry; it will not
+ * wake up synchronize_rcu, but that is okay because at least another
+ * thread must exit its RCU read-side critical section before
+ * synchronize_rcu is done. The next iteration of the loop will
+ * move the new thread's rcu_reader from ®istry to &qsreaders,
+ * because rcu_gp_ongoing() will return false.
+ *
+ * rcu_unregister_thread() may remove nodes from &qsreaders instead
+ * of ®istry if it runs during qemu_event_wait. That's okay;
+ * the node then will not be added back to ®istry by QLIST_SWAP
+ * below. The invariant is that the node is part of one list when
+ * rcu_registry_lock is released.
*/
+ qemu_mutex_unlock(&rcu_registry_lock);
qemu_event_wait(&rcu_gp_event);
+ qemu_mutex_lock(&rcu_registry_lock);
}
/* put back the reader list in the registry */
void synchronize_rcu(void)
{
- qemu_mutex_lock(&rcu_gp_lock);
+ qemu_mutex_lock(&rcu_sync_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(®istry)) {
/* In either case, the atomic_mb_set below blocks stores that free
wait_for_readers();
}
- qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_registry_lock);
+ qemu_mutex_unlock(&rcu_sync_lock);
}
{
struct rcu_head *node;
+ rcu_register_thread();
+
for (;;) {
int tries = 0;
int n = atomic_read(&rcu_call_count);
void rcu_register_thread(void)
{
assert(rcu_reader.ctr == 0);
- qemu_mutex_lock(&rcu_gp_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
QLIST_INSERT_HEAD(®istry, &rcu_reader, node);
- qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_registry_lock);
}
void rcu_unregister_thread(void)
{
- qemu_mutex_lock(&rcu_gp_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
QLIST_REMOVE(&rcu_reader, node);
- qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_registry_lock);
}
static void rcu_init_complete(void)
{
QemuThread thread;
- qemu_mutex_init(&rcu_gp_lock);
+ qemu_mutex_init(&rcu_registry_lock);
+ qemu_mutex_init(&rcu_sync_lock);
qemu_event_init(&rcu_gp_event, true);
qemu_event_init(&rcu_call_ready_event, false);
rcu_register_thread();
}
+static int atfork_depth = 1;
+
+void rcu_enable_atfork(void)
+{
+ atfork_depth++;
+}
+
+void rcu_disable_atfork(void)
+{
+ atfork_depth--;
+}
+
#ifdef CONFIG_POSIX
static void rcu_init_lock(void)
{
- qemu_mutex_lock(&rcu_gp_lock);
+ if (atfork_depth < 1) {
+ return;
+ }
+
+ qemu_mutex_lock(&rcu_sync_lock);
+ qemu_mutex_lock(&rcu_registry_lock);
}
static void rcu_init_unlock(void)
{
- qemu_mutex_unlock(&rcu_gp_lock);
+ if (atfork_depth < 1) {
+ return;
+ }
+
+ qemu_mutex_unlock(&rcu_registry_lock);
+ qemu_mutex_unlock(&rcu_sync_lock);
}
static void rcu_init_child(void)
{
- qemu_mutex_unlock(&rcu_gp_lock);
+ if (atfork_depth < 1) {
+ return;
+ }
+
memset(®istry, 0, sizeof(registry));
rcu_init_complete();
}