docs.2022.04.20a: Documentation updates.
fixes.2022.04.20a: Miscellaneous fixes.
nocb.2022.04.11b: Callback-offloading updates.
rcu-tasks.2022.04.11b: RCU-tasks updates.
srcu.2022.05.03a: Put SRCU on a memory diet.
torture.2022.04.11b: Torture-test updates.
torture-tasks.2022.04.20a: Avoid torture testing changing RCU configuration.
torturescript.2022.04.20a: Torture-test scripting updates.
number avoids disturbing real-time workloads,
but lengthens grace periods.
+++ ++++ rcupdate.rcu_task_stall_info= [KNL]
+++ ++++ Set initial timeout in jiffies for RCU task stall
+++ ++++ informational messages, which give some indication
+++ ++++ of the problem for those not patient enough to
+++ ++++ wait for ten minutes. Informational messages are
+++ ++++ only printed prior to the stall-warning message
+++ ++++ for a given grace period. Disable with a value
+++ ++++ less than or equal to zero. Defaults to ten
+++ ++++ seconds. A change in value does not take effect
+++ ++++ until the beginning of the next grace period.
+++ ++++
+++ ++++ rcupdate.rcu_task_stall_info_mult= [KNL]
+++ ++++ Multiplier for time interval between successive
+++ ++++ RCU task stall informational messages for a given
+++ ++++ RCU tasks grace period. This value is clamped
+++ ++++ to one through ten, inclusive. It defaults to
+++ ++++ the value three, so that the first informational
+++ ++++ message is printed 10 seconds into the grace
+++ ++++ period, the second at 40 seconds, the third at
+++ ++++ 160 seconds, and then the stall warning at 600
+++ ++++ seconds would prevent a fourth at 640 seconds.
+++ ++++
rcupdate.rcu_task_stall_timeout= [KNL]
--- ---- Set timeout in jiffies for RCU task stall warning
--- ---- messages. Disable with a value less than or equal
--- ---- to zero.
+++ ++++ Set timeout in jiffies for RCU task stall
+++ ++++ warning messages. Disable with a value less
+++ ++++ than or equal to zero. Defaults to ten minutes.
+++ ++++ A change in value does not take effect until
+++ ++++ the beginning of the next grace period.
rcupdate.rcu_self_test= [KNL]
Run the RCU early boot self tests
smart2= [HW]
Format: <io1>[,<io2>[,...,<io8>]]
+ ++++++ smp.csd_lock_timeout= [KNL]
+ ++++++ Specify the period of time in milliseconds
+ ++++++ that smp_call_function() and friends will wait
+ ++++++ for a CPU to release the CSD lock. This is
+ ++++++ useful when diagnosing bugs involving CPUs
+ ++++++ disabling interrupts for extended periods
+ ++++++ of time. Defaults to 5,000 milliseconds, and
+ ++++++ setting a value of zero disables this feature.
+ ++++++ This feature may be more efficiently disabled
+ ++++++ using the csdlock_debug- kernel parameter.
+ ++++++
smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
smsc-ircc2.ircc_sir= [HW] SIR base I/O port
off: Disable mitigation and remove
performance impact to RDRAND and RDSEED
++++ +++ srcutree.big_cpu_lim [KNL]
++++ +++ Specifies the number of CPUs constituting a
++++ +++ large system, such that srcu_struct structures
++++ +++ should immediately allocate an srcu_node array.
++++ +++ This kernel-boot parameter defaults to 128,
++++ +++ but takes effect only when the low-order four
++++ +++ bits of srcutree.convert_to_big is equal to 3
++++ +++ (decide at boot).
++++ +++
++++ +++ srcutree.convert_to_big [KNL]
++++ +++ Specifies under what conditions an SRCU tree
++++ +++ srcu_struct structure will be converted to big
++++ +++ form, that is, with an rcu_node tree:
++++ +++
++++ +++ 0: Never.
++++ +++ 1: At init_srcu_struct() time.
++++ +++ 2: When rcutorture decides to.
++++ +++ 3: Decide at boot time (default).
++++ +++ 0x1X: Above plus if high contention.
++++ +++
++++ +++ Either way, the srcu_node tree will be sized based
++++ +++ on the actual runtime number of CPUs (nr_cpu_ids)
++++ +++ instead of the compile-time CONFIG_NR_CPUS.
++++ +++
srcutree.counter_wrap_check [KNL]
Specifies how frequently to check for
grace-period sequence counter wrap for the
expediting. Set to zero to disable automatic
expediting.
++++ +++ srcutree.small_contention_lim [KNL]
++++ +++ Specifies the number of update-side contention
++++ +++ events per jiffy will be tolerated before
++++ +++ initiating a conversion of an srcu_struct
++++ +++ structure to big form. Note that the value of
++++ +++ srcutree.convert_to_big must have the 0x10 bit
++++ +++ set for contention-based conversions to occur.
++++ +++
ssbd= [ARM64,HW]
Speculative Store Bypass Disable control
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
static inline void rcu_fwd_progress_check(unsigned long j) { }
+++++ ++static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
+++++ ++static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
unsigned long rcu_get_gp_seq(void);
void rcu_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
extern struct workqueue_struct *rcu_par_gp_wq;
+++++ ++void rcu_gp_slow_register(atomic_t *rgssp);
+++++ ++void rcu_gp_slow_unregister(atomic_t *rgssp);
#endif /* #else #ifdef CONFIG_TINY_RCU */
#ifdef CONFIG_RCU_NOCB_CPU
-- -----bool rcu_is_nocb_cpu(int cpu);
void rcu_bind_current_to_nocb(void);
#else
-- -----static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
static inline void rcu_bind_current_to_nocb(void) { }
#endif
.name = "busted_srcud"
};
++++++ +/*
++++++ + * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
++++++ + * This implementation does not necessarily work well with CPU hotplug.
++++++ + */
++++++ +
++++++ +static void synchronize_rcu_trivial(void)
++++++ +{
++++++ + int cpu;
++++++ +
++++++ + for_each_online_cpu(cpu) {
++++++ + rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
++++++ + WARN_ON_ONCE(raw_smp_processor_id() != cpu);
++++++ + }
++++++ +}
++++++ +
++++++ +static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
++++++ +{
++++++ + preempt_disable();
++++++ + return 0;
++++++ +}
++++++ +
++++++ +static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
++++++ +{
++++++ + preempt_enable();
++++++ +}
++++++ +
++++++ +static struct rcu_torture_ops trivial_ops = {
++++++ + .ttype = RCU_TRIVIAL_FLAVOR,
++++++ + .init = rcu_sync_torture_init,
++++++ + .readlock = rcu_torture_read_lock_trivial,
++++++ + .read_delay = rcu_read_delay, /* just reuse rcu's version. */
++++++ + .readunlock = rcu_torture_read_unlock_trivial,
++++++ + .readlock_held = torture_readlock_not_held,
++++++ + .get_gp_seq = rcu_no_completed,
++++++ + .sync = synchronize_rcu_trivial,
++++++ + .exp_sync = synchronize_rcu_trivial,
++++++ + .fqs = NULL,
++++++ + .stats = NULL,
++++++ + .irq_capable = 1,
++++++ + .name = "trivial"
++++++ +};
++++++ +
++++++ +#ifdef CONFIG_TASKS_RCU
++++++ +
/*
* Definitions for RCU-tasks torture testing.
*/
.name = "tasks"
};
------ -/*
------ - * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
------ - * This implementation does not necessarily work well with CPU hotplug.
------ - */
++++++ +#define TASKS_OPS &tasks_ops,
------ -static void synchronize_rcu_trivial(void)
------ -{
------ - int cpu;
++++++ +#else // #ifdef CONFIG_TASKS_RCU
------ - for_each_online_cpu(cpu) {
------ - rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
------ - WARN_ON_ONCE(raw_smp_processor_id() != cpu);
------ - }
------ -}
++++++ +#define TASKS_OPS
------ -static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
------ -{
------ - preempt_disable();
------ - return 0;
------ -}
++++++ +#endif // #else #ifdef CONFIG_TASKS_RCU
------ -static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
------ -{
------ - preempt_enable();
------ -}
------ -static struct rcu_torture_ops trivial_ops = {
------ - .ttype = RCU_TRIVIAL_FLAVOR,
------ - .init = rcu_sync_torture_init,
------ - .readlock = rcu_torture_read_lock_trivial,
------ - .read_delay = rcu_read_delay, /* just reuse rcu's version. */
------ - .readunlock = rcu_torture_read_unlock_trivial,
------ - .readlock_held = torture_readlock_not_held,
------ - .get_gp_seq = rcu_no_completed,
------ - .sync = synchronize_rcu_trivial,
------ - .exp_sync = synchronize_rcu_trivial,
------ - .fqs = NULL,
------ - .stats = NULL,
------ - .irq_capable = 1,
------ - .name = "trivial"
------ -};
++++++ +#ifdef CONFIG_TASKS_RUDE_RCU
/*
* Definitions for rude RCU-tasks torture testing.
.name = "tasks-rude"
};
++++++ +#define TASKS_RUDE_OPS &tasks_rude_ops,
++++++ +
++++++ +#else // #ifdef CONFIG_TASKS_RUDE_RCU
++++++ +
++++++ +#define TASKS_RUDE_OPS
++++++ +
++++++ +#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
++++++ +
++++++ +
++++++ +#ifdef CONFIG_TASKS_TRACE_RCU
++++++ +
/*
* Definitions for tracing RCU-tasks torture testing.
*/
.name = "tasks-tracing"
};
++++++ +#define TASKS_TRACING_OPS &tasks_tracing_ops,
++++++ +
++++++ +#else // #ifdef CONFIG_TASKS_TRACE_RCU
++++++ +
++++++ +#define TASKS_TRACING_OPS
++++++ +
++++++ +#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
++++++ +
++++++ +
static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
{
if (!cur_ops->gp_diff)
" GP expediting controlled from boot/sysfs for %s.\n",
torture_type, cur_ops->name);
if (WARN_ONCE(nsynctypes == 0,
----- -- "rcu_torture_writer: No update-side primitives.\n")) {
+++++ ++ "%s: No update-side primitives.\n", __func__)) {
/*
* No updates primitives, so don't try updating.
* The resulting test won't be testing much, hence the
*/
rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer");
+++++ ++ return 0;
}
do {
VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
set_user_nice(current, MAX_NICE);
+++++ ++ if (WARN_ONCE(nsynctypes == 0,
+++++ ++ "%s: No update-side primitives.\n", __func__)) {
+++++ ++ /*
+++++ ++ * No updates primitives, so don't try updating.
+++++ ++ * The resulting test won't be testing much, hence the
+++++ ++ * above WARN_ONCE().
+++++ ++ */
+++++ ++ torture_kthread_stopping("rcu_torture_fakewriter");
+++++ ++ return 0;
+++++ ++ }
+++++ ++
do {
torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
if (cur_ops->cb_barrier != NULL &&
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
cur_ops->cb_barrier();
}
+++++ ++ rcu_gp_slow_unregister(NULL);
return;
}
if (!cur_ops) {
torture_cleanup_end();
+++++ ++ rcu_gp_slow_unregister(NULL);
return;
}
else
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
torture_cleanup_end();
+++++ ++ rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
}
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
int flags = 0;
unsigned long gp_seq = 0;
static struct rcu_torture_ops *torture_ops[] = {
------ - &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
------ - &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
------ - &tasks_tracing_ops, &trivial_ops,
++++++ + &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
++++++ + TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
++++++ + &trivial_ops,
};
if (!torture_init_begin(torture_type, verbose))
if (object_debug)
rcu_test_debug_objects();
torture_init_end();
+++++ ++ rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
return 0;
unwind:
rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
+ ++++++ if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
+ ++++++ WRITE_ONCE(rdp->last_sched_clock, jiffies);
WRITE_ONCE(rdp->gpwrap, false);
rcu_gpnum_ovf(rnp, rdp);
return ret;
rcu_gp_kthread_wake();
}
+++++ ++static atomic_t *rcu_gp_slow_suppress;
+++++ ++
+++++ ++/* Register a counter to suppress debugging grace-period delays. */
+++++ ++void rcu_gp_slow_register(atomic_t *rgssp)
+++++ ++{
+++++ ++ WARN_ON_ONCE(rcu_gp_slow_suppress);
+++++ ++
+++++ ++ WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
+++++ ++}
+++++ ++EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
+++++ ++
+++++ ++/* Unregister a counter, with NULL for not caring which. */
+++++ ++void rcu_gp_slow_unregister(atomic_t *rgssp)
+++++ ++{
+++++ ++ WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
+++++ ++
+++++ ++ WRITE_ONCE(rcu_gp_slow_suppress, NULL);
+++++ ++}
+++++ ++EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
+++++ ++
+++++ ++static bool rcu_gp_slow_is_suppressed(void)
+++++ ++{
+++++ ++ atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
+++++ ++
+++++ ++ return rgssp && atomic_read(rgssp);
+++++ ++}
+++++ ++
static void rcu_gp_slow(int delay)
{
----- -- if (delay > 0 &&
----- -- !(rcu_seq_ctr(rcu_state.gp_seq) %
----- -- (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+++++ ++ if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
+++++ ++ !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
schedule_timeout_idle(delay);
}
/* Advance CBs to reduce false positives below. */
offloaded = rcu_rdp_is_offloaded(rdp);
if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
+ ++++++
+ ++++++ // We get here if a grace period was needed (“needgp”)
+ ++++++ // and the above call to rcu_accelerate_cbs() did not set
+ ++++++ // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
+ ++++++ // the need for another grace period). The purpose
+ ++++++ // of the “offloaded” check is to avoid invoking
+ ++++++ // rcu_accelerate_cbs() on an offloaded CPU because we do not
+ ++++++ // hold the ->nocb_lock needed to safely access an offloaded
+ ++++++ // ->cblist. We do not want to acquire that lock because
+ ++++++ // it can be heavily contended during callback floods.
+ ++++++
WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
- ------ trace_rcu_grace_period(rcu_state.name,
- ------ rcu_state.gp_seq,
- ------ TPS("newreq"));
+ ++++++ trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
} else {
- ------ WRITE_ONCE(rcu_state.gp_flags,
- ------ rcu_state.gp_flags & RCU_GP_FLAG_INIT);
+ ++++++
+ ++++++ // We get here either if there is no need for an
+ ++++++ // additional grace period or if rcu_accelerate_cbs() has
+ ++++++ // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
+ ++++++ // So all we need to do is to clear all of the other
+ ++++++ // ->gp_flags bits.
+ ++++++
+ ++++++ WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
}
raw_spin_unlock_irq_rcu_node(rnp);
*/
void rcu_sched_clock_irq(int user)
{
+ ++++++ unsigned long j;
+ ++++++
+ ++++++ if (IS_ENABLED(CONFIG_PROVE_RCU)) {
+ ++++++ j = jiffies;
+ ++++++ WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
+ ++++++ __this_cpu_write(rcu_data.last_sched_clock, j);
+ ++++++ }
trace_rcu_utilization(TPS("Start scheduler-tick"));
lockdep_assert_irqs_disabled();
raw_cpu_inc(rcu_data.ticks_this_gp);
rcu_flavor_sched_clock_irq(user);
if (rcu_pending(user))
invoke_rcu_core();
+++ ++++ if (user)
+++ ++++ rcu_tasks_classic_qs(current, false);
lockdep_assert_irqs_disabled();
trace_rcu_utilization(TPS("End scheduler-tick"));
{
int ret;
- ------ if (IS_ENABLED(CONFIG_PREEMPTION))
+ ++++++ // Invoking preempt_model_*() too early gets a splat.
+ ++++++ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE ||
+ ++++++ preempt_model_full() || preempt_model_rt())
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
might_sleep(); /* Check for RCU read-side critical section. */
preempt_disable();
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
+ ++++++ rdp->last_sched_clock = jiffies;
rdp->cpu = cpu;
rcu_boot_init_nocb_percpu_data(rdp);
}
struct rcu_node *rnp;
struct sched_param sp;
struct task_struct *t;
++ +++++ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
rcu_scheduler_fully_active = 1;
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
wake_up_process(t);
-- ----- rcu_spawn_nocb_kthreads();
-- ----- rcu_spawn_boost_kthreads();
++ +++++ /* This is a pre-SMP initcall, we expect a single CPU */
++ +++++ WARN_ON(num_online_cpus() > 1);
++ +++++ /*
++ +++++ * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
++ +++++ * due to rcu_scheduler_fully_active.
++ +++++ */
++ +++++ rcu_spawn_cpu_nocb_kthread(smp_processor_id());
++ +++++ rcu_spawn_one_boost_kthread(rdp->mynode);
rcu_spawn_core_kthreads();
return 0;
}
void __init rcu_init(void)
{
-- ----- int cpu;
++ +++++ int cpu = smp_processor_id();
rcu_early_boot_tests();
* or the scheduler are operational.
*/
pm_notifier(rcu_pm_notify, 0);
-- ----- for_each_online_cpu(cpu) {
-- ----- rcutree_prepare_cpu(cpu);
-- ----- rcu_cpu_starting(cpu);
-- ----- rcutree_online_cpu(cpu);
-- ----- }
++ +++++ WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
++ +++++ rcutree_prepare_cpu(cpu);
++ +++++ rcu_cpu_starting(cpu);
++ +++++ rcutree_online_cpu(cpu);
/* Create workqueue for Tree SRCU and for expedited GPs. */
rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
short rcu_onl_gp_flags; /* ->gp_flags at last online. */
unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
+ ++++++ unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */
int cpu;
};
arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
/* Synchronize offline with */
/* GP pre-initialization. */
++ +++++ int nocb_is_setup; /* nocb is setup from boot */
};
/* Values for rcu_state structure's gp_flags field. */
static bool rcu_is_callbacks_kthread(void);
static void rcu_cpu_kthread_setup(unsigned int cpu);
static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
-- -----static void __init rcu_spawn_boost_kthreads(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
static void rcu_preempt_deferred_qs(struct task_struct *t);
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_cpu_nocb_kthread(int cpu);
-- -----static void __init rcu_spawn_nocb_kthreads(void);
static void show_rcu_nocb_state(struct rcu_data *rdp);
static void rcu_nocb_lock(struct rcu_data *rdp);
static void rcu_nocb_unlock(struct rcu_data *rdp);
t->rcu_read_unlock_special.s = 0;
if (special.b.need_qs) {
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
+ ++++++ rdp->cpu_no_qs.b.norm = false;
rcu_report_qs_rdp(rdp);
udelay(rcu_unlock_delay);
} else {
expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
// Get scheduler to re-evaluate and call hooks.
// If !IRQ_WORK, FQS scan will eventually IPI.
- ------ init_irq_work(&rdp->defer_qs_iw, rcu_preempt_deferred_qs_handler);
+ ++++++ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
+ ++++++ IS_ENABLED(CONFIG_PREEMPT_RT))
+ ++++++ rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
+ ++++++ rcu_preempt_deferred_qs_handler);
+ ++++++ else
+ ++++++ init_irq_work(&rdp->defer_qs_iw,
+ ++++++ rcu_preempt_deferred_qs_handler);
rdp->defer_qs_iw_pending = true;
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
}
__releases(rnp->lock)
{
raw_lockdep_assert_held_rcu_node(rnp);
- ------ if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
+ ++++++ if (!rnp->boost_kthread_task ||
+ ++++++ (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
free_cpumask_var(cm);
}
-- -----/*
-- ----- * Spawn boost kthreads -- called as soon as the scheduler is running.
-- ----- */
-- -----static void __init rcu_spawn_boost_kthreads(void)
-- -----{
-- ----- struct rcu_node *rnp;
-- -----
-- ----- rcu_for_each_leaf_node(rnp)
-- ----- if (rcu_rnp_online_cpus(rnp))
-- ----- rcu_spawn_one_boost_kthread(rnp);
-- -----}
-- -----
#else /* #ifdef CONFIG_RCU_BOOST */
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
{
}
-- -----static void __init rcu_spawn_boost_kthreads(void)
-- -----{
-- -----}
-- -----
#endif /* #else #ifdef CONFIG_RCU_BOOST */
/*
do_kasan=yes
do_kcsan=no
do_clocksourcewd=yes
+++++ ++do_rt=yes
# doyesno - Helper function for yes/no arguments
function doyesno () {
echo " --do-rcuscale / --do-no-rcuscale"
echo " --do-rcutorture / --do-no-rcutorture"
echo " --do-refscale / --do-no-refscale"
+++++ ++ echo " --do-rt / --do-no-rt"
echo " --do-scftorture / --do-no-scftorture"
echo " --duration [ <minutes> | <hours>h | <days>d ]"
echo " --kcsan-kmake-arg kernel-make-arguments"
do_scftorture=yes
do_rcuscale=yes
do_refscale=yes
+++++ ++ do_rt=yes
do_kvfree=yes
do_kasan=yes
do_kcsan=yes
do_scftorture=no
do_rcuscale=no
do_refscale=no
+++++ ++ do_rt=no
do_kvfree=no
do_kasan=no
do_kcsan=no
--do-refscale|--do-no-refscale)
do_refscale=`doyesno "$1" --do-refscale`
;;
+++++ ++ --do-rt|--do-no-rt)
+++++ ++ do_rt=`doyesno "$1" --do-rt`
+++++ ++ ;;
--do-scftorture|--do-no-scftorture)
do_scftorture=`doyesno "$1" --do-scftorture`
;;
echo " --- make clean" > "$amcdir/Make.out" 2>&1
make -j$MAKE_ALLOTED_CPUS clean >> "$amcdir/Make.out" 2>&1
echo " --- make allmodconfig" >> "$amcdir/Make.out" 2>&1
+++++++ cp .config $amcdir
make -j$MAKE_ALLOTED_CPUS allmodconfig >> "$amcdir/Make.out" 2>&1
echo " --- make " >> "$amcdir/Make.out" 2>&1
make -j$MAKE_ALLOTED_CPUS >> "$amcdir/Make.out" 2>&1
if test "$do_scftorture" = "yes"
then
------- torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot"
------- torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
+++++++ torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot csdlock_debug=1"
+++++++ torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make
+++++ + fi
+++++ +
+++++ ++if test "$do_rt" = "yes"
+++++ ++then
+++++ ++ # With all post-boot grace periods forced to normal.
+++++ ++ torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_normal=1"
+++++ ++ torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
+++++ ++
+++++ ++ # With all post-boot grace periods forced to expedited.
+++++ ++ torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_expedited=1"
+++++ ++ torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
+fi
+
if test "$do_refscale" = yes
then
primlist="`grep '\.name[ ]*=' kernel/rcu/refscale.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`"
for prim in $primlist
do
torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=$HALF_ALLOTED_CPUS refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot"
------- torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make
+++++++ torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_TASKS_TRACE_RCU=y CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make
done
if test "$do_rcuscale" = yes
for prim in $primlist
do
torture_bootargs="rcuscale.scale_type="$prim" rcuscale.nwriters=$HALF_ALLOTED_CPUS rcuscale.holdoff=20 torture.disable_onoff_at_boot"
------- torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make
+++++++ torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_TASKS_TRACE_RCU=y CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make
done
if test "$do_kvfree" = "yes"
then
torture_bootargs="rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot"
------- torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
+++++++ torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make
fi
if test "$do_clocksourcewd" = "yes"
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
+++++++ CONFIG_PREEMPT_DYNAMIC=n
CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_PROVE_LOCKING=n
#CHECK#CONFIG_PROVE_RCU=n
++++++ +CONFIG_FORCE_TASKS_TRACE_RCU=y
++++++ +#CHECK#CONFIG_TASKS_TRACE_RCU=y
CONFIG_TASKS_TRACE_RCU_READ_MB=y
CONFIG_RCU_EXPERT=y