to extract confidential information from the kernel
are also disabled.
+++++ locktorture.acq_writer_lim= [KNL]
+++++ Set the time limit in jiffies for a lock
+++++ acquisition. Acquisitions exceeding this limit
+++++ will result in a splat once they do complete.
+++++
+++++ locktorture.bind_readers= [KNL]
+++++ Specify the list of CPUs to which the readers are
+++++ to be bound.
+++++
+++++ locktorture.bind_writers= [KNL]
+++++ Specify the list of CPUs to which the writers are
+++++ to be bound.
+++++
+++++ locktorture.call_rcu_chains= [KNL]
+++++ Specify the number of self-propagating call_rcu()
+++++ chains to set up. These are used to ensure that
+++++ there is a high probability of an RCU grace period
+++++ in progress at any given time. Defaults to 0,
+++++ which disables these call_rcu() chains.
+++++
+++++ locktorture.long_hold= [KNL]
+++++ Specify the duration in milliseconds for the
+++++ occasional long-duration lock hold time. Defaults
+++++ to 100 milliseconds. Select 0 to disable.
+++++
+++++ locktorture.nested_locks= [KNL]
+++++ Specify the maximum lock nesting depth that
+++++ locktorture is to exercise, up to a limit of 8
+++++ (MAX_NESTED_LOCKS). Specify zero to disable.
+++++ Note that this parameter is ineffective on types
+++++ of locks that do not support nested acquisition.
+++++
locktorture.nreaders_stress= [KNL]
Set the number of locking read-acquisition kthreads.
Defaults to being automatically set based on the
Set time (s) between CPU-hotplug operations, or
zero to disable CPU-hotplug testing.
+++++ locktorture.rt_boost= [KNL]
+++++ Do periodic testing of real-time lock priority
+++++ boosting. Select 0 to disable, 1 to boost
+++++ only rt_mutex, and 2 to boost unconditionally.
+++++ Defaults to 2, which might seem to be an
+++++ odd choice, but which should be harmless for
+++++ non-real-time spinlocks, due to their disabling
+++++ of preemption. Note that non-realtime mutexes
+++++ disable boosting.
+++++
+++++ locktorture.rt_boost_factor= [KNL]
+++++ Number that determines how often and for how
+++++ long priority boosting is exercised. This is
+++++ scaled down by the number of writers, so that the
+++++ number of boosts per unit time remains roughly
+++++ constant as the number of writers increases.
+++++ On the other hand, the duration of each boost
+++++ increases with the number of writers.
+++++
locktorture.shuffle_interval= [KNL]
Set task-shuffle interval (jiffies). Shuffling
tasks allows some CPUs to go into dyntick-idle
locktorture.torture_type= [KNL]
Specify the locking implementation to test.
+++++ locktorture.verbose= [KNL]
+++++ Enable additional printk() statements.
+++++
locktorture.writer_fifo= [KNL]
Run the write-side locktorture kthreads at
sched_set_fifo() real-time priority.
----- locktorture.verbose= [KNL]
----- Enable additional printk() statements.
-----
logibm.irq= [HW,MOUSE] Logitech Bus Mouse Driver
Format: <irq>
Set maximum number of finished RCU callbacks to
process in one batch.
+ ++++ rcutree.do_rcu_barrier= [KNL]
+ ++++ Request a call to rcu_barrier(). This is
+ ++++ throttled so that userspace tests can safely
+ ++++ hammer on the sysfs variable if they so choose.
+ ++++ If triggered before the RCU grace-period machinery
+ ++++ is fully active, this will error out with EAGAIN.
+ ++++
rcutree.dump_tree= [KNL]
Dump the structure of the rcu_node combining tree
out at early boot. This is used for diagnostic
test until boot completes in order to avoid
interference.
+++ ++ refscale.lookup_instances= [KNL]
+++ ++ Number of data elements to use for the forms of
+++ ++ SLAB_TYPESAFE_BY_RCU testing. A negative number
+++ ++ is negated and multiplied by nr_cpu_ids, while
+++ ++ zero specifies nr_cpu_ids.
+++ ++
refscale.loops= [KNL]
Set the number of loops over the synchronization
primitive under test. Increasing this number
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate_wait.h>
+++++ #include <linux/rcu_notifier.h>
#include <linux/interrupt.h>
#include <linux/sched/signal.h>
#include <uapi/linux/sched/types.h>
int cpu;
for_each_online_cpu(cpu) {
----- rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
+++++ torture_sched_setaffinity(current->pid, cpumask_of(cpu));
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
}
}
mutex_unlock(&boost_mutex);
break;
}
----- schedule_timeout_uninterruptible(1);
+++++ schedule_timeout_uninterruptible(HZ / 20);
}
/* Go do the stutter. */
/* Clean up and exit. */
while (!kthread_should_stop()) {
torture_shutdown_absorb("rcu_torture_boost");
----- schedule_timeout_uninterruptible(1);
+++++ schedule_timeout_uninterruptible(HZ / 20);
}
torture_kthread_stopping("rcu_torture_boost");
return 0;
fqs_resume_time = jiffies + fqs_stutter * HZ;
while (time_before(jiffies, fqs_resume_time) &&
!kthread_should_stop()) {
----- schedule_timeout_interruptible(1);
+++++ schedule_timeout_interruptible(HZ / 20);
}
fqs_burst_remaining = fqs_duration;
while (fqs_burst_remaining > 0 &&
VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
while (!rcu_inkernel_boot_has_ended())
schedule_timeout_interruptible(HZ / 10);
----- for_each_online_cpu(cpu)
+++++ for_each_possible_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
if (toggle_interval > ULONG_MAX)
return 0;
}
+++++ static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
+++++ {
+++++ pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
+++++ return NOTIFY_OK;
+++++ }
+++++
+++++ static struct notifier_block rcu_torture_stall_block = {
+++++ .notifier_call = rcu_torture_stall_nf,
+++++ };
+++++
/*
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
* induces a CPU stall for the time specified by stall_cpu.
static int rcu_torture_stall(void *args)
{
int idx;
+++++ int ret;
unsigned long stop_at;
VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
+++++ ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
+++++ if (ret)
+++++ pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
+++++ __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
if (stall_cpu_holdoff > 0) {
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
cur_ops->readunlock(idx);
}
pr_alert("%s end.\n", __func__);
+++++ if (!ret) {
+++++ ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
+++++ if (ret)
+++++ pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
+++++ }
torture_shutdown_absorb("rcu_torture_stall");
while (!kthread_should_stop())
schedule_timeout_interruptible(10 * HZ);
WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
} else {
while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
----- schedule_timeout_interruptible(1);
+++++ schedule_timeout_interruptible(HZ / 20);
oldseq = READ_ONCE(rcu_fwd_seq);
}
pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
set_user_nice(current, MAX_NICE);
// Minimize time between reading and exiting.
while (!kthread_should_stop())
----- schedule_timeout_uninterruptible(1);
+++++ schedule_timeout_uninterruptible(HZ / 20);
(void)rcu_torture_one_read(trsp, -1);
return 0;
}
smp_mb(); // Store before wakeup.
wake_up(&read_exit_wq);
while (!torture_must_stop())
----- schedule_timeout_uninterruptible(1);
+++++ schedule_timeout_uninterruptible(HZ / 20);
torture_kthread_stopping("rcu_torture_read_exit");
return 0;
}
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/completion.h>
+ ++++#include <linux/kmemleak.h>
#include <linux/moduleparam.h>
#include <linux/panic.h>
#include <linux/panic_notifier.h>
/* Unregister a counter, with NULL for not caring which. */
void rcu_gp_slow_unregister(atomic_t *rgssp)
{
- ---- WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
+ ++++ WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
WRITE_ONCE(rcu_gp_slow_suppress, NULL);
}
*/
static void rcu_gp_fqs(bool first_time)
{
+++++ int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
struct rcu_node *rnp = rcu_get_root();
WRITE_ONCE(rcu_state.gp_activity, jiffies);
WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
+++++
+++++ WARN_ON_ONCE(nr_fqs > 3);
+++++ /* Only countdown nr_fqs for stall purposes if jiffies moves. */
+++++ if (nr_fqs) {
+++++ if (nr_fqs == 1) {
+++++ WRITE_ONCE(rcu_state.jiffies_stall,
+++++ jiffies + rcu_jiffies_till_stall_check());
+++++ }
+++++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
+++++ }
+++++
if (first_time) {
/* Collect dyntick-idle snapshots. */
force_qs_rnp(dyntick_save_progress_counter);
trace_rcu_invoke_callback(rcu_state.name, rhp);
f = rhp->func;
+ ++++ debug_rcu_head_callback(rhp);
WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
f(rhp);
*/
void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
{
- ---- return __call_rcu_common(head, func, false);
+ ++++ __call_rcu_common(head, func, false);
}
EXPORT_SYMBOL_GPL(call_rcu_hurry);
#endif
*/
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- ---- return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
+ ++++ __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
}
EXPORT_SYMBOL_GPL(call_rcu);
success = true;
}
+ ++++ /*
+ ++++ * The kvfree_rcu() caller considers the pointer freed at this point
+ ++++ * and likely removes any references to it. Since the actual slab
+ ++++ * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
+ ++++ * this object (no scanning or false positives reporting).
+ ++++ */
+ ++++ kmemleak_ignore(ptr);
+ ++++
// Set timer to drain after KFREE_DRAIN_JIFFIES.
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
schedule_delayed_monitor_work(krcp);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
+ ++++static unsigned long rcu_barrier_last_throttle;
+ ++++
+ ++++/**
+ ++++ * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
+ ++++ *
+ ++++ * This can be thought of as guard rails around rcu_barrier() that
+ ++++ * permits unrestricted userspace use, at least assuming the hardware's
+ ++++ * try_cmpxchg() is robust. There will be at most one call per second to
+ ++++ * rcu_barrier() system-wide from use of this function, which means that
+ ++++ * callers might needlessly wait a second or three.
+ ++++ *
+ ++++ * This is intended for use by test suites to avoid OOM by flushing RCU
+ ++++ * callbacks from the previous test before starting the next. See the
+ ++++ * rcutree.do_rcu_barrier module parameter for more information.
+ ++++ *
+ ++++ * Why not simply make rcu_barrier() more scalable? That might be
+ ++++ * the eventual endpoint, but let's keep it simple for the time being.
+ ++++ * Note that the module parameter infrastructure serializes calls to a
+ ++++ * given .set() function, but should concurrent .set() invocation ever be
+ ++++ * possible, we are ready!
+ ++++ */
+ ++++static void rcu_barrier_throttled(void)
+ ++++{
+ ++++ unsigned long j = jiffies;
+ ++++ unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
+ ++++ unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
+ ++++
+ ++++ while (time_in_range(j, old, old + HZ / 16) ||
+ ++++ !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
+ ++++ schedule_timeout_idle(HZ / 16);
+ ++++ if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
+ ++++ smp_mb(); /* caller's subsequent code after above check. */
+ ++++ return;
+ ++++ }
+ ++++ j = jiffies;
+ ++++ old = READ_ONCE(rcu_barrier_last_throttle);
+ ++++ }
+ ++++ rcu_barrier();
+ ++++}
+ ++++
+ ++++/*
+ ++++ * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
+ ++++ * request arrives. We insist on a true value to allow for possible
+ ++++ * future expansion.
+ ++++ */
+ ++++static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
+ ++++{
+ ++++ bool b;
+ ++++ int ret;
+ ++++
+ ++++ if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
+ ++++ return -EAGAIN;
+ ++++ ret = kstrtobool(val, &b);
+ ++++ if (!ret && b) {
+ ++++ atomic_inc((atomic_t *)kp->arg);
+ ++++ rcu_barrier_throttled();
+ ++++ atomic_dec((atomic_t *)kp->arg);
+ ++++ }
+ ++++ return ret;
+ ++++}
+ ++++
+ ++++/*
+ ++++ * Output the number of outstanding rcutree.do_rcu_barrier requests.
+ ++++ */
+ ++++static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
+ ++++{
+ ++++ return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
+ ++++}
+ ++++
+ ++++static const struct kernel_param_ops do_rcu_barrier_ops = {
+ ++++ .set = param_set_do_rcu_barrier,
+ ++++ .get = param_get_do_rcu_barrier,
+ ++++};
+ ++++static atomic_t do_rcu_barrier;
+ ++++module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
+ ++++
/*
* Compute the mask of online CPUs for the specified rcu_node structure.
* This will not be stable unless the rcu_node structure's ->lock is
rdp = this_cpu_ptr(&rcu_data);
/*
* Strictly, we care here about the case where the current CPU is
- ---- * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
+ ++++ * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
* not being up to date. So arch_spin_is_locked() might have a
* false positive if it's held by some *other* CPU, but that's
* OK because that just means a false *negative* on the warning.
return !!rcu_state.n_online_cpus;
}
- ----/*
- ---- * Near the end of the offline process. Trace the fact that this CPU
- ---- * is going offline.
- ---- */
- ----int rcutree_dying_cpu(unsigned int cpu)
- ----{
- ---- bool blkd;
- ---- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
- ---- struct rcu_node *rnp = rdp->mynode;
- ----
- ---- if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
- ---- return 0;
- ----
- ---- blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
- ---- trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
- ---- blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
- ---- return 0;
- ----}
- ----
/*
* All CPUs for the specified rcu_node structure have gone offline,
* and all tasks that were preempted within an RCU read-side critical
}
}
- ----/*
- ---- * The CPU has been completely removed, and some other CPU is reporting
- ---- * this fact from process context. Do the remainder of the cleanup.
- ---- * There can only be one CPU hotplug operation at a time, so no need for
- ---- * explicit locking.
- ---- */
- ----int rcutree_dead_cpu(unsigned int cpu)
- ----{
- ---- if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
- ---- return 0;
- ----
- ---- WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
- ---- // Stop-machine done, so allow nohz_full to disable tick.
- ---- tick_dep_clear(TICK_DEP_BIT_RCU);
- ---- return 0;
- ----}
- ----
/*
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
* first CPU in a given leaf rcu_node structure coming online. The caller
return 0;
}
- ----/*
- ---- * Near the beginning of the process. The CPU is still very much alive
- ---- * with pretty much all services enabled.
- ---- */
- ----int rcutree_offline_cpu(unsigned int cpu)
- ----{
- ---- unsigned long flags;
- ---- struct rcu_data *rdp;
- ---- struct rcu_node *rnp;
- ----
- ---- rdp = per_cpu_ptr(&rcu_data, cpu);
- ---- rnp = rdp->mynode;
- ---- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- ---- rnp->ffmask &= ~rdp->grpmask;
- ---- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- ----
- ---- rcutree_affinity_setting(cpu, cpu);
- ----
- ---- // nohz_full CPUs need the tick for stop-machine to work quickly
- ---- tick_dep_set(TICK_DEP_BIT_RCU);
- ---- return 0;
- ----}
- ----
/*
* Mark the specified CPU as being online so that subsequent grace periods
* (both expedited and normal) will wait on it. Note that this means that
* from the incoming CPU rather than from the cpuhp_step mechanism.
* This is because this function must be invoked at a precise location.
* This incoming CPU must not have enabled interrupts yet.
+ ++++ *
+ ++++ * This mirrors the effects of rcutree_report_cpu_dead().
*/
- ----void rcu_cpu_starting(unsigned int cpu)
+ ++++void rcutree_report_cpu_starting(unsigned int cpu)
{
unsigned long mask;
struct rcu_data *rdp;
* Note that this function is special in that it is invoked directly
* from the outgoing CPU rather than from the cpuhp_step mechanism.
* This is because this function must be invoked at a precise location.
+ ++++ *
+ ++++ * This mirrors the effect of rcutree_report_cpu_starting().
*/
- ----void rcu_report_dead(unsigned int cpu)
+ ++++void rcutree_report_cpu_dead(void)
{
- ---- unsigned long flags, seq_flags;
+ ++++ unsigned long flags;
unsigned long mask;
- ---- struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ ++++ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
+ ++++ /*
+ ++++ * IRQS must be disabled from now on and until the CPU dies, or an interrupt
+ ++++ * may introduce a new READ-side while it is actually off the QS masks.
+ ++++ */
+ ++++ lockdep_assert_irqs_disabled();
// Do any dangling deferred wakeups.
do_nocb_deferred_wakeup(rdp);
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
- ---- local_irq_save(seq_flags);
arch_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
arch_spin_unlock(&rcu_state.ofl_lock);
- ---- local_irq_restore(seq_flags);
- ----
rdp->cpu_started = false;
}
cpu, rcu_segcblist_n_cbs(&rdp->cblist),
rcu_segcblist_first_cb(&rdp->cblist));
}
- ----#endif
+ ++++
+ ++++/*
+ ++++ * The CPU has been completely removed, and some other CPU is reporting
+ ++++ * this fact from process context. Do the remainder of the cleanup.
+ ++++ * There can only be one CPU hotplug operation at a time, so no need for
+ ++++ * explicit locking.
+ ++++ */
+ ++++int rcutree_dead_cpu(unsigned int cpu)
+ ++++{
+ ++++ WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
+ ++++ // Stop-machine done, so allow nohz_full to disable tick.
+ ++++ tick_dep_clear(TICK_DEP_BIT_RCU);
+ ++++ return 0;
+ ++++}
+ ++++
+ ++++/*
+ ++++ * Near the end of the offline process. Trace the fact that this CPU
+ ++++ * is going offline.
+ ++++ */
+ ++++int rcutree_dying_cpu(unsigned int cpu)
+ ++++{
+ ++++ bool blkd;
+ ++++ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ ++++ struct rcu_node *rnp = rdp->mynode;
+ ++++
+ ++++ blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
+ ++++ trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
+ ++++ blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
+ ++++ return 0;
+ ++++}
+ ++++
+ ++++/*
+ ++++ * Near the beginning of the process. The CPU is still very much alive
+ ++++ * with pretty much all services enabled.
+ ++++ */
+ ++++int rcutree_offline_cpu(unsigned int cpu)
+ ++++{
+ ++++ unsigned long flags;
+ ++++ struct rcu_data *rdp;
+ ++++ struct rcu_node *rnp;
+ ++++
+ ++++ rdp = per_cpu_ptr(&rcu_data, cpu);
+ ++++ rnp = rdp->mynode;
+ ++++ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ ++++ rnp->ffmask &= ~rdp->grpmask;
+ ++++ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ ++++
+ ++++ rcutree_affinity_setting(cpu, cpu);
+ ++++
+ ++++ // nohz_full CPUs need the tick for stop-machine to work quickly
+ ++++ tick_dep_set(TICK_DEP_BIT_RCU);
+ ++++ return 0;
+ ++++}
+ ++++#endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
* On non-huge systems, use expedited RCU grace periods to make suspend
pm_notifier(rcu_pm_notify, 0);
WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
rcutree_prepare_cpu(cpu);
- ---- rcu_cpu_starting(cpu);
+ ++++ rcutree_report_cpu_starting(cpu);
rcutree_online_cpu(cpu);
/* Create workqueue for Tree SRCU and for expedited GPs. */