#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
---- --void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
---- --static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
+ +++++// See RCU_LOCKDEP_WARN() for an explanation of the double call to
+ +++++// debug_lockdep_rcu_enabled().
+ +++++static inline bool lockdep_assert_rcu_helper(bool c)
+ +++++{
+ +++++ return debug_lockdep_rcu_enabled() &&
+ +++++ (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
+ +++++ debug_lockdep_rcu_enabled();
+ +++++}
+ +++++
+ +++++/**
+ +++++ * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock()
+ +++++ *
+ +++++ * Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
+ +++++ */
+ +++++#define lockdep_assert_in_rcu_read_lock() \
+ +++++ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+ +++++
+ +++++/**
+ +++++ * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
+ +++++ *
+ +++++ * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect.
+ +++++ * Note that local_bh_disable() and friends do not suffice here, instead an
+ +++++ * actual rcu_read_lock_bh() is required.
+ +++++ */
+ +++++#define lockdep_assert_in_rcu_read_lock_bh() \
+ +++++ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+ +++++
+ +++++/**
+ +++++ * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
+ +++++ *
+ +++++ * Splats if lockdep is enabled and there is no rcu_read_lock_sched()
+ +++++ * in effect. Note that preempt_disable() and friends do not suffice here,
+ +++++ * instead an actual rcu_read_lock_sched() is required.
+ +++++ */
+ +++++#define lockdep_assert_in_rcu_read_lock_sched() \
+ +++++ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+ +++++
+ +++++/**
+ +++++ * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
+ +++++ *
+ +++++ * Splats if lockdep is enabled and there is no RCU reader of any
+ +++++ * type in effect. Note that regions of code protected by things like
+ +++++ * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify
+ +++++ * as RCU readers.
+ +++++ *
+ +++++ * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY
+ +++++ * kernels that are not also built with PREEMPT_COUNT. But if you have
+ +++++ * lockdep enabled, you might as well also enable PREEMPT_COUNT.
+ +++++ */
+ +++++#define lockdep_assert_in_rcu_reader() \
+ +++++ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
+ +++++ !lock_is_held(&rcu_bh_lock_map) && \
+ +++++ !lock_is_held(&rcu_sched_lock_map) && \
+ +++++ preemptible()))
+ +++++
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
+ +++++#define lockdep_assert_in_rcu_read_lock() do { } while (0)
+ +++++#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
+ +++++#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
+ +++++#define lockdep_assert_in_rcu_reader() do { } while (0)
+ +++++
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/*
pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
__func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed);
------ return; /* Caller forgot to stop doing call_srcu()? */
++++++ return; // Caller forgot to stop doing call_srcu()?
++++++ // Or caller invoked start_poll_synchronize_srcu()
++++++ // and then cleanup_srcu_struct() before that grace
++++++ // period ended?
}
kfree(sup->node);
sup->node = NULL;
bool cbs;
bool last_lvl;
int cpu;
- ----- unsigned long flags;
unsigned long gpseq;
int idx;
unsigned long mask;
if (!(gpseq & counter_wrap_check))
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(ssp->sda, cpu);
- ----- spin_lock_irqsave_rcu_node(sdp, flags);
+ +++++ spin_lock_irq_rcu_node(sdp);
if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
sdp->srcu_gp_seq_needed = gpseq;
if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
sdp->srcu_gp_seq_needed_exp = gpseq;
- ----- spin_unlock_irqrestore_rcu_node(sdp, flags);
+ +++++ spin_unlock_irq_rcu_node(sdp);
}
/* Callback initiation done, allow grace periods after next. */
*/
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
{
------ if (!rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
++++++ if (cookie != SRCU_GET_STATE_COMPLETED &&
++++++ !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
return false;
// Ensure that the end of the SRCU grace period happens before
// any subsequent code that the caller might execute.
.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
rcu_sr_normal_gp_cleanup_work),
+ +++++ .srs_cleanups_pending = ATOMIC_INIT(0),
};
/* Dump rcu_node combining tree at boot to verify correct setup. */
module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
+ +++++static int nohz_full_patience_delay;
+ +++++module_param(nohz_full_patience_delay, int, 0444);
+ +++++static int nohz_full_patience_delay_jiffies;
// Add delay to rcu_read_unlock() for strict grace periods.
static int rcu_unlock_delay;
ct_state_inc(RCU_DYNTICKS_IDX);
}
-- ----/*
-- ---- * Snapshot the ->dynticks counter with full ordering so as to allow
-- ---- * stable comparison of this counter with past and future snapshots.
-- ---- */
-- ----static int rcu_dynticks_snap(int cpu)
-- ----{
-- ---- smp_mb(); // Fundamental RCU ordering guarantee.
-- ---- return ct_dynticks_cpu_acquire(cpu);
-- ----}
-- ----
/*
* Return true if the snapshot returned from rcu_dynticks_snap()
* indicates that RCU is in an extended quiescent state.
*/
static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
{
-- ---- return snap != rcu_dynticks_snap(rdp->cpu);
++ ++++ /*
++ ++++ * The first failing snapshot is already ordered against the accesses
++ ++++ * performed by the remote CPU after it exits idle.
++ ++++ *
++ ++++ * The second snapshot therefore only needs to order against accesses
++ ++++ * performed by the remote CPU prior to entering idle and therefore can
++ ++++ * rely solely on acquire semantics.
++ ++++ */
++ ++++ return snap != ct_dynticks_cpu_acquire(rdp->cpu);
}
/*
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
-- ---- rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
++ ++++ /*
++ ++++ * Full ordering between remote CPU's post idle accesses and updater's
++ ++++ * accesses prior to current GP (and also the started GP sequence number)
++ ++++ * is enforced by rcu_seq_start() implicit barrier and even further by
++ ++++ * smp_mb__after_unlock_lock() barriers chained all the way throughout the
++ ++++ * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
++ ++++ * locking.
++ ++++ *
++ ++++ * Ordering between remote CPU's pre idle accesses and post grace period
++ ++++ * updater's accesses is enforced by the below acquire semantic.
++ ++++ */
++ ++++ rdp->dynticks_snap = ct_dynticks_cpu_acquire(rdp->cpu);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
rcu_sr_put_wait_head(rcu);
}
+ +++++
+ +++++ /* Order list manipulations with atomic access. */
+ +++++ atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
}
/*
*/
static void rcu_sr_normal_gp_cleanup(void)
{
- ----- struct llist_node *wait_tail, *next, *rcu;
+ +++++ struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
int done = 0;
wait_tail = rcu_state.srs_wait_tail;
break;
}
- ----- // concurrent sr_normal_gp_cleanup work might observe this update.
- ----- smp_store_release(&rcu_state.srs_done_tail, wait_tail);
+ +++++ /*
+ +++++ * Fast path, no more users to process except putting the second last
+ +++++ * wait head if no inflight-workers. If there are in-flight workers,
+ +++++ * they will remove the last wait head.
+ +++++ *
+ +++++ * Note that the ACQUIRE orders atomic access with list manipulation.
+ +++++ */
+ +++++ if (wait_tail->next && wait_tail->next->next == NULL &&
+ +++++ rcu_sr_is_wait_head(wait_tail->next) &&
+ +++++ !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
+ +++++ rcu_sr_put_wait_head(wait_tail->next);
+ +++++ wait_tail->next = NULL;
+ +++++ }
+ +++++
+ +++++ /* Concurrent sr_normal_gp_cleanup work might observe this update. */
ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
+ +++++ smp_store_release(&rcu_state.srs_done_tail, wait_tail);
/*
* We schedule a work in order to perform a final processing
* of outstanding users(if still left) and releasing wait-heads
* added by rcu_sr_normal_gp_init() call.
*/
- ----- queue_work(sync_wq, &rcu_state.srs_cleanup_work);
+ +++++ if (wait_tail->next) {
+ +++++ atomic_inc(&rcu_state.srs_cleanups_pending);
+ +++++ if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
+ +++++ atomic_dec(&rcu_state.srs_cleanups_pending);
+ +++++ }
}
/*
WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
/* Exclude CPU hotplug operations. */
rcu_for_each_leaf_node(rnp) {
- ----- local_irq_save(flags);
+ +++++ local_irq_disable();
arch_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_rcu_node(rnp);
arch_spin_unlock(&rcu_state.ofl_lock);
- ----- local_irq_restore(flags);
+ +++++ local_irq_enable();
continue;
}
raw_spin_unlock_rcu_node(rnp);
arch_spin_unlock(&rcu_state.ofl_lock);
- ----- local_irq_restore(flags);
+ +++++ local_irq_enable();
}
rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
return 1;
/* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
- ----- if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
+ +++++ gp_in_progress = rcu_gp_in_progress();
+ +++++ if ((user || rcu_is_cpu_rrupt_from_idle() ||
+ +++++ (gp_in_progress &&
+ +++++ time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
+ +++++ nohz_full_patience_delay_jiffies))) &&
+ +++++ rcu_nohz_full_cpu())
return 0;
/* Is the RCU core waiting for a quiescent state from this CPU? */
- ----- gp_in_progress = rcu_gp_in_progress();
if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
return 1;
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
INIT_WORK(&rdp->strict_work, strict_work_handler);
WARN_ON_ONCE(ct->dynticks_nesting != 1);
-- ---- WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
++ ++++ WARN_ON_ONCE(rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu)));
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
bool needwake;
- ----- if (rcu_rdp_is_offloaded(rdp) ||
- ----- rcu_segcblist_empty(&rdp->cblist))
- ----- return; /* No callbacks to migrate. */
+ +++++ if (rcu_rdp_is_offloaded(rdp))
+ +++++ return;
raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
+ +++++ if (rcu_segcblist_empty(&rdp->cblist)) {
+ +++++ raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
+ +++++ return; /* No callbacks to migrate. */
+ +++++ }
+ +++++
WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
rcu_barrier_entrain(rdp);
my_rdp = this_cpu_ptr(&rcu_data);