1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update definitions shared among RCU implementations.
5 * Copyright IBM Corporation, 2011
13 #include <trace/events/rcu.h>
15 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
16 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
20 * Grace-period counter management.
23 #define RCU_SEQ_CTR_SHIFT 2
24 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
27 * Return the counter portion of a sequence number previously returned
28 * by rcu_seq_snap() or rcu_seq_current().
30 static inline unsigned long rcu_seq_ctr(unsigned long s)
32 return s >> RCU_SEQ_CTR_SHIFT;
36 * Return the state portion of a sequence number previously returned
37 * by rcu_seq_snap() or rcu_seq_current().
39 static inline int rcu_seq_state(unsigned long s)
41 return s & RCU_SEQ_STATE_MASK;
45 * Set the state portion of the pointed-to sequence number.
46 * The caller is responsible for preventing conflicting updates.
48 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
50 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
51 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
54 /* Adjust sequence number for start of update-side operation. */
55 static inline void rcu_seq_start(unsigned long *sp)
57 WRITE_ONCE(*sp, *sp + 1);
58 smp_mb(); /* Ensure update-side operation after counter increment. */
59 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
62 /* Compute the end-of-grace-period value for the specified sequence number. */
63 static inline unsigned long rcu_seq_endval(unsigned long *sp)
65 return (*sp | RCU_SEQ_STATE_MASK) + 1;
68 /* Adjust sequence number for end of update-side operation. */
69 static inline void rcu_seq_end(unsigned long *sp)
71 smp_mb(); /* Ensure update-side operation before counter increment. */
72 WARN_ON_ONCE(!rcu_seq_state(*sp));
73 WRITE_ONCE(*sp, rcu_seq_endval(sp));
77 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
79 * This function returns the earliest value of the grace-period sequence number
80 * that will indicate that a full grace period has elapsed since the current
81 * time. Once the grace-period sequence number has reached this value, it will
82 * be safe to invoke all callbacks that have been registered prior to the
83 * current time. This value is the current grace-period number plus two to the
84 * power of the number of low-order bits reserved for state, then rounded up to
85 * the next value in which the state bits are all zero.
87 static inline unsigned long rcu_seq_snap(unsigned long *sp)
91 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
92 smp_mb(); /* Above access must not bleed into critical section. */
96 /* Return the current value the update side's sequence number, no ordering. */
97 static inline unsigned long rcu_seq_current(unsigned long *sp)
99 return READ_ONCE(*sp);
103 * Given a snapshot from rcu_seq_snap(), determine whether or not the
104 * corresponding update-side operation has started.
106 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
108 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
112 * Given a snapshot from rcu_seq_snap(), determine whether or not a
113 * full update-side operation has occurred.
115 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
117 return ULONG_CMP_GE(READ_ONCE(*sp), s);
121 * Has a grace period completed since the time the old gp_seq was collected?
123 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
125 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
129 * Has a grace period started since the time the old gp_seq was collected?
131 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
133 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
138 * Roughly how many full grace periods have elapsed between the collection
139 * of the two specified grace periods?
141 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
143 unsigned long rnd_diff;
148 * Compute the number of grace periods (still shifted up), plus
149 * one if either of new and old is not an exact grace period.
151 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
152 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
153 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
154 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
155 return 1; /* Definitely no grace period has elapsed. */
156 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
160 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
161 * by call_rcu() and rcu callback execution, and are therefore not part
162 * of the RCU API. These are in rcupdate.h because they are used by all
163 * RCU implementations.
166 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
167 # define STATE_RCU_HEAD_READY 0
168 # define STATE_RCU_HEAD_QUEUED 1
170 extern struct debug_obj_descr rcuhead_debug_descr;
172 static inline int debug_rcu_head_queue(struct rcu_head *head)
176 r1 = debug_object_activate(head, &rcuhead_debug_descr);
177 debug_object_active_state(head, &rcuhead_debug_descr,
178 STATE_RCU_HEAD_READY,
179 STATE_RCU_HEAD_QUEUED);
183 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
185 debug_object_active_state(head, &rcuhead_debug_descr,
186 STATE_RCU_HEAD_QUEUED,
187 STATE_RCU_HEAD_READY);
188 debug_object_deactivate(head, &rcuhead_debug_descr);
190 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
191 static inline int debug_rcu_head_queue(struct rcu_head *head)
196 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
199 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
201 void kfree(const void *);
204 * Reclaim the specified callback, either by invoking it (non-lazy case)
205 * or freeing it directly (lazy case). Return true if lazy, false otherwise.
207 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
210 unsigned long offset = (unsigned long)head->func;
212 rcu_lock_acquire(&rcu_callback_map);
213 if (__is_kfree_rcu_offset(offset)) {
214 trace_rcu_invoke_kfree_callback(rn, head, offset);
215 kfree((void *)head - offset);
216 rcu_lock_release(&rcu_callback_map);
219 trace_rcu_invoke_callback(rn, head);
221 WRITE_ONCE(head->func, (rcu_callback_t)0L);
223 rcu_lock_release(&rcu_callback_map);
228 #ifdef CONFIG_RCU_STALL_COMMON
230 extern int rcu_cpu_stall_ftrace_dump;
231 extern int rcu_cpu_stall_suppress;
232 extern int rcu_cpu_stall_timeout;
233 int rcu_jiffies_till_stall_check(void);
235 #define rcu_ftrace_dump_stall_suppress() \
237 if (!rcu_cpu_stall_suppress) \
238 rcu_cpu_stall_suppress = 3; \
241 #define rcu_ftrace_dump_stall_unsuppress() \
243 if (rcu_cpu_stall_suppress == 3) \
244 rcu_cpu_stall_suppress = 0; \
247 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
248 #define rcu_ftrace_dump_stall_suppress()
249 #define rcu_ftrace_dump_stall_unsuppress()
250 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
253 * Strings used in tracepoints need to be exported via the
254 * tracing system such that tools like perf and trace-cmd can
255 * translate the string address pointers to actual text.
257 #define TPS(x) tracepoint_string(x)
260 * Dump the ftrace buffer, but only one time per callsite per boot.
262 #define rcu_ftrace_dump(oops_dump_mode) \
264 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
266 if (!atomic_read(&___rfd_beenhere) && \
267 !atomic_xchg(&___rfd_beenhere, 1)) { \
269 rcu_ftrace_dump_stall_suppress(); \
270 ftrace_dump(oops_dump_mode); \
271 rcu_ftrace_dump_stall_unsuppress(); \
275 void rcu_early_boot_tests(void);
276 void rcu_test_sync_prims(void);
279 * This function really isn't for public consumption, but RCU is special in
280 * that context switches can allow the state machine to make progress.
282 extern void resched_cpu(int cpu);
284 #if defined(SRCU) || !defined(TINY_RCU)
286 #include <linux/rcu_node_tree.h>
288 extern int rcu_num_lvls;
289 extern int num_rcu_lvl[];
290 extern int rcu_num_nodes;
291 static bool rcu_fanout_exact;
292 static int rcu_fanout_leaf;
295 * Compute the per-level fanout, either using the exact fanout specified
296 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
298 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
302 for (i = 0; i < RCU_NUM_LVLS; i++)
303 levelspread[i] = INT_MIN;
304 if (rcu_fanout_exact) {
305 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
306 for (i = rcu_num_lvls - 2; i >= 0; i--)
307 levelspread[i] = RCU_FANOUT;
313 for (i = rcu_num_lvls - 1; i >= 0; i--) {
315 levelspread[i] = (cprv + ccur - 1) / ccur;
321 /* Returns a pointer to the first leaf rcu_node structure. */
322 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
324 /* Is this rcu_node a leaf? */
325 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
327 /* Is this rcu_node the last leaf? */
328 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
331 * Do a full breadth-first scan of the {s,}rcu_node structures for the
332 * specified state structure (for SRCU) or the only rcu_state structure
335 #define srcu_for_each_node_breadth_first(sp, rnp) \
336 for ((rnp) = &(sp)->node[0]; \
337 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
338 #define rcu_for_each_node_breadth_first(rnp) \
339 srcu_for_each_node_breadth_first(&rcu_state, rnp)
342 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
343 * Note that if there is a singleton rcu_node tree with but one rcu_node
344 * structure, this loop -will- visit the rcu_node structure. It is still
345 * a leaf node, even if it is also the root node.
347 #define rcu_for_each_leaf_node(rnp) \
348 for ((rnp) = rcu_first_leaf_node(); \
349 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
352 * Iterate over all possible CPUs in a leaf RCU node.
354 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
355 for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
356 (cpu) <= rnp->grphi; \
357 (cpu) = cpumask_next((cpu), cpu_possible_mask))
360 * Iterate over all CPUs in a leaf RCU node's specified mask.
362 #define rcu_find_next_bit(rnp, cpu, mask) \
363 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
364 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
365 for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
366 (cpu) <= rnp->grphi; \
367 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
370 * Wrappers for the rcu_node::lock acquire and release.
372 * Because the rcu_nodes form a tree, the tree traversal locking will observe
373 * different lock values, this in turn means that an UNLOCK of one level
374 * followed by a LOCK of another level does not imply a full memory barrier;
375 * and most importantly transitivity is lost.
377 * In order to restore full ordering between tree levels, augment the regular
378 * lock acquire functions with smp_mb__after_unlock_lock().
380 * As ->lock of struct rcu_node is a __private field, therefore one should use
381 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
383 #define raw_spin_lock_rcu_node(p) \
385 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
386 smp_mb__after_unlock_lock(); \
389 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
391 #define raw_spin_lock_irq_rcu_node(p) \
393 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
394 smp_mb__after_unlock_lock(); \
397 #define raw_spin_unlock_irq_rcu_node(p) \
398 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
400 #define raw_spin_lock_irqsave_rcu_node(p, flags) \
402 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
403 smp_mb__after_unlock_lock(); \
406 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
407 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
409 #define raw_spin_trylock_rcu_node(p) \
411 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
414 smp_mb__after_unlock_lock(); \
418 #define raw_lockdep_assert_held_rcu_node(p) \
419 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
421 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
424 void srcu_init(void);
425 #else /* #ifdef CONFIG_SRCU */
426 static inline void srcu_init(void) { }
427 #endif /* #else #ifdef CONFIG_SRCU */
429 #ifdef CONFIG_TINY_RCU
430 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
431 static inline bool rcu_gp_is_normal(void) { return true; }
432 static inline bool rcu_gp_is_expedited(void) { return false; }
433 static inline void rcu_expedite_gp(void) { }
434 static inline void rcu_unexpedite_gp(void) { }
435 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
436 #else /* #ifdef CONFIG_TINY_RCU */
437 bool rcu_gp_is_normal(void); /* Internal RCU use. */
438 bool rcu_gp_is_expedited(void); /* Internal RCU use. */
439 void rcu_expedite_gp(void);
440 void rcu_unexpedite_gp(void);
441 void rcupdate_announce_bootup_oddness(void);
442 void rcu_request_urgent_qs_task(struct task_struct *t);
443 #endif /* #else #ifdef CONFIG_TINY_RCU */
445 #define RCU_SCHEDULER_INACTIVE 0
446 #define RCU_SCHEDULER_INIT 1
447 #define RCU_SCHEDULER_RUNNING 2
449 enum rcutorture_type {
457 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
458 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
459 unsigned long *gp_seq);
460 void do_trace_rcu_torture_read(const char *rcutorturename,
461 struct rcu_head *rhp,
466 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
467 int *flags, unsigned long *gp_seq)
472 #ifdef CONFIG_RCU_TRACE
473 void do_trace_rcu_torture_read(const char *rcutorturename,
474 struct rcu_head *rhp,
479 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
484 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
485 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
488 #ifdef CONFIG_TINY_SRCU
490 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
491 struct srcu_struct *sp, int *flags,
492 unsigned long *gp_seq)
494 if (test_type != SRCU_FLAVOR)
497 *gp_seq = sp->srcu_idx;
500 #elif defined(CONFIG_TREE_SRCU)
502 void srcutorture_get_gp_data(enum rcutorture_type test_type,
503 struct srcu_struct *sp, int *flags,
504 unsigned long *gp_seq);
508 #ifdef CONFIG_TINY_RCU
509 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
510 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
511 static inline unsigned long
512 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
513 static inline void rcu_force_quiescent_state(void) { }
514 static inline void show_rcu_gp_kthreads(void) { }
515 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
516 static inline void rcu_fwd_progress_check(unsigned long j) { }
517 #else /* #ifdef CONFIG_TINY_RCU */
518 unsigned long rcu_get_gp_seq(void);
519 unsigned long rcu_exp_batches_completed(void);
520 unsigned long srcu_batches_completed(struct srcu_struct *sp);
521 void show_rcu_gp_kthreads(void);
522 int rcu_get_gp_kthreads_prio(void);
523 void rcu_fwd_progress_check(unsigned long j);
524 void rcu_force_quiescent_state(void);
525 extern struct workqueue_struct *rcu_gp_wq;
526 extern struct workqueue_struct *rcu_par_gp_wq;
527 #endif /* #else #ifdef CONFIG_TINY_RCU */
529 #ifdef CONFIG_RCU_NOCB_CPU
530 bool rcu_is_nocb_cpu(int cpu);
531 void rcu_bind_current_to_nocb(void);
533 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
534 static inline void rcu_bind_current_to_nocb(void) { }
537 #endif /* __LINUX_RCU_H */