atomic_set(&index->waiting, true);
}
- /* Here, order the stores to index->waiting before the
- * loads of index->ctr.
+ /* Here, order the stores to index->waiting before the loads of
+ * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
+ * ensuring that the loads of index->ctr are sequentially consistent.
*/
- smp_mb();
+ smp_mb_global();
QLIST_FOREACH_SAFE(index, ®istry, node, tmp) {
if (!rcu_gp_ongoing(&index->ctr)) {
void synchronize_rcu(void)
{
qemu_mutex_lock(&rcu_sync_lock);
- qemu_mutex_lock(&rcu_registry_lock);
+ /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
+ * Pairs with smp_mb_placeholder() in rcu_read_lock().
+ */
+ smp_mb_global();
+
+ qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(®istry)) {
/* In either case, the atomic_mb_set below blocks stores that free
* old RCU-protected pointers.
static void __attribute__((__constructor__)) rcu_init(void)
{
+ smp_mb_global_init();
#ifdef CONFIG_POSIX
pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
#endif