#include "qemu/atomic.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
+#include "qemu/lockable.h"
#if defined(CONFIG_MALLOC_TRIM)
#include <malloc.h>
#endif
{
unsigned long v;
- v = atomic_read(ctr);
+ v = qatomic_read(ctr);
return v && (v != rcu_gp_ctr);
}
*/
qemu_event_reset(&rcu_gp_event);
- /* Instead of using atomic_mb_set for index->waiting, and
- * atomic_mb_read for index->ctr, memory barriers are placed
+ /* Instead of using qatomic_mb_set for index->waiting, and
+ * qatomic_mb_read for index->ctr, memory barriers are placed
* manually since writes to different threads are independent.
* qemu_event_reset has acquire semantics, so no memory barrier
* is needed here.
*/
QLIST_FOREACH(index, ®istry, node) {
- atomic_set(&index->waiting, true);
+ qatomic_set(&index->waiting, true);
}
/* Here, order the stores to index->waiting before the loads of
/* No need for mb_set here, worst of all we
* get some extra futex wakeups.
*/
- atomic_set(&index->waiting, false);
+ qatomic_set(&index->waiting, false);
}
}
void synchronize_rcu(void)
{
- qemu_mutex_lock(&rcu_sync_lock);
+ QEMU_LOCK_GUARD(&rcu_sync_lock);
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
* Pairs with smp_mb_placeholder() in rcu_read_lock().
*/
smp_mb_global();
- qemu_mutex_lock(&rcu_registry_lock);
+ QEMU_LOCK_GUARD(&rcu_registry_lock);
if (!QLIST_EMPTY(®istry)) {
- /* In either case, the atomic_mb_set below blocks stores that free
+ /* In either case, the qatomic_mb_set below blocks stores that free
* old RCU-protected pointers.
*/
if (sizeof(rcu_gp_ctr) < 8) {
*
* Switch parity: 0 -> 1, 1 -> 0.
*/
- atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
wait_for_readers();
- atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
} else {
/* Increment current grace period. */
- atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+ qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
}
wait_for_readers();
}
-
- qemu_mutex_unlock(&rcu_registry_lock);
- qemu_mutex_unlock(&rcu_sync_lock);
}
struct rcu_head **old_tail;
node->next = NULL;
- old_tail = atomic_xchg(&tail, &node->next);
- atomic_mb_set(old_tail, node);
+ old_tail = qatomic_xchg(&tail, &node->next);
+ qatomic_mb_set(old_tail, node);
}
static struct rcu_head *try_dequeue(void)
* The tail, because it is the first step in the enqueuing.
* It is only the next pointers that might be inconsistent.
*/
- if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
+ if (head == &dummy && qatomic_mb_read(&tail) == &dummy.next) {
abort();
}
* wrong and we need to wait until its enqueuer finishes the update.
*/
node = head;
- next = atomic_mb_read(&head->next);
+ next = qatomic_mb_read(&head->next);
if (!next) {
return NULL;
}
for (;;) {
int tries = 0;
- int n = atomic_read(&rcu_call_count);
+ int n = qatomic_read(&rcu_call_count);
/* Heuristically wait for a decent number of callbacks to pile up.
* Fetch rcu_call_count now, we only must process elements that were
g_usleep(10000);
if (n == 0) {
qemu_event_reset(&rcu_call_ready_event);
- n = atomic_read(&rcu_call_count);
+ n = qatomic_read(&rcu_call_count);
if (n == 0) {
#if defined(CONFIG_MALLOC_TRIM)
malloc_trim(4 * 1024 * 1024);
qemu_event_wait(&rcu_call_ready_event);
}
}
- n = atomic_read(&rcu_call_count);
+ n = qatomic_read(&rcu_call_count);
}
- atomic_sub(&rcu_call_count, n);
+ qatomic_sub(&rcu_call_count, n);
synchronize_rcu();
qemu_mutex_lock_iothread();
while (n > 0) {
{
node->func = func;
enqueue(node);
- atomic_inc(&rcu_call_count);
+ qatomic_inc(&rcu_call_count);
qemu_event_set(&rcu_call_ready_event);
}
+
+struct rcu_drain {
+ struct rcu_head rcu;
+ QemuEvent drain_complete_event;
+};
+
+static void drain_rcu_callback(struct rcu_head *node)
+{
+ struct rcu_drain *event = (struct rcu_drain *)node;
+ qemu_event_set(&event->drain_complete_event);
+}
+
+/*
+ * This function ensures that all pending RCU callbacks
+ * on the current thread are done executing
+
+ * drops big qemu lock during the wait to allow RCU thread
+ * to process the callbacks
+ *
+ */
+
+void drain_call_rcu(void)
+{
+ struct rcu_drain rcu_drain;
+ bool locked = qemu_mutex_iothread_locked();
+
+ memset(&rcu_drain, 0, sizeof(struct rcu_drain));
+ qemu_event_init(&rcu_drain.drain_complete_event, false);
+
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
+
+
+ /*
+ * RCU callbacks are invoked in the same order as in which they
+ * are registered, thus we can be sure that when 'drain_rcu_callback'
+ * is called, all RCU callbacks that were registered on this thread
+ * prior to calling this function are completed.
+ *
+ * Note that since we have only one global queue of the RCU callbacks,
+ * we also end up waiting for most of RCU callbacks that were registered
+ * on the other threads, but this is a side effect that shoudn't be
+ * assumed.
+ */
+
+ call_rcu1(&rcu_drain.rcu, drain_rcu_callback);
+ qemu_event_wait(&rcu_drain.drain_complete_event);
+
+ if (locked) {
+ qemu_mutex_lock_iothread();
+ }
+
+}
+
void rcu_register_thread(void)
{
assert(rcu_reader.ctr == 0);