* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
+#include "qemu/osdep.h"
#include "qemu-common.h"
-#include <stdio.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <errno.h>
#include "qemu/rcu.h"
#include "qemu/atomic.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
+#if defined(CONFIG_MALLOC_TRIM)
+#include <malloc.h>
+#endif
/*
* Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
/* Instead of using atomic_mb_set for index->waiting, and
* atomic_mb_read for index->ctr, memory barriers are placed
* manually since writes to different threads are independent.
- * atomic_mb_set has a smp_wmb before...
+ * qemu_event_reset has acquire semantics, so no memory barrier
+ * is needed here.
*/
- smp_wmb();
QLIST_FOREACH(index, ®istry, node) {
atomic_set(&index->waiting, true);
}
- /* ... and a smp_mb after. */
- smp_mb();
+ /* Here, order the stores to index->waiting before the loads of
+ * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
+ * ensuring that the loads of index->ctr are sequentially consistent.
+ */
+ smp_mb_global();
QLIST_FOREACH_SAFE(index, ®istry, node, tmp) {
if (!rcu_gp_ongoing(&index->ctr)) {
}
}
- /* atomic_mb_read has smp_rmb after. */
- smp_rmb();
-
if (QLIST_EMPTY(®istry)) {
break;
}
void synchronize_rcu(void)
{
qemu_mutex_lock(&rcu_sync_lock);
- qemu_mutex_lock(&rcu_registry_lock);
+ /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
+ * Pairs with smp_mb_placeholder() in rcu_read_lock().
+ */
+ smp_mb_global();
+
+ qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(®istry)) {
/* In either case, the atomic_mb_set below blocks stores that free
* old RCU-protected pointers.
qemu_event_reset(&rcu_call_ready_event);
n = atomic_read(&rcu_call_count);
if (n == 0) {
+#if defined(CONFIG_MALLOC_TRIM)
+ malloc_trim(4 * 1024 * 1024);
+#endif
qemu_event_wait(&rcu_call_ready_event);
}
}
rcu_register_thread();
}
+static int atfork_depth = 1;
+
+void rcu_enable_atfork(void)
+{
+ atfork_depth++;
+}
+
+void rcu_disable_atfork(void)
+{
+ atfork_depth--;
+}
+
#ifdef CONFIG_POSIX
static void rcu_init_lock(void)
{
+ if (atfork_depth < 1) {
+ return;
+ }
+
qemu_mutex_lock(&rcu_sync_lock);
qemu_mutex_lock(&rcu_registry_lock);
}
static void rcu_init_unlock(void)
{
+ if (atfork_depth < 1) {
+ return;
+ }
+
qemu_mutex_unlock(&rcu_registry_lock);
qemu_mutex_unlock(&rcu_sync_lock);
}
static void rcu_init_child(void)
{
- qemu_mutex_init(&rcu_registry_lock);
-}
-#endif
+ if (atfork_depth < 1) {
+ return;
+ }
-void rcu_after_fork(void)
-{
memset(®istry, 0, sizeof(registry));
rcu_init_complete();
}
+#endif
static void __attribute__((__constructor__)) rcu_init(void)
{
+ smp_mb_global_init();
#ifdef CONFIG_POSIX
pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
#endif