]>
Commit | Line | Data |
---|---|---|
b5b11890 | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
29c00b4a PM |
2 | /* |
3 | * Read-Copy Update definitions shared among RCU implementations. | |
4 | * | |
29c00b4a PM |
5 | * Copyright IBM Corporation, 2011 |
6 | * | |
b5b11890 | 7 | * Author: Paul E. McKenney <[email protected]> |
29c00b4a PM |
8 | */ |
9 | ||
10 | #ifndef __LINUX_RCU_H | |
11 | #define __LINUX_RCU_H | |
12 | ||
5cb5c6e1 | 13 | #include <trace/events/rcu.h> |
e99033c5 | 14 | |
c2d8089d | 15 | /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ |
84585aa8 | 16 | #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) |
6136d6e4 | 17 | |
2e8c28c2 PM |
18 | |
19 | /* | |
20 | * Grace-period counter management. | |
21 | */ | |
22 | ||
f1ec57a4 | 23 | #define RCU_SEQ_CTR_SHIFT 2 |
031aeee0 PM |
24 | #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) |
25 | ||
26 | /* | |
27 | * Return the counter portion of a sequence number previously returned | |
28 | * by rcu_seq_snap() or rcu_seq_current(). | |
29 | */ | |
30 | static inline unsigned long rcu_seq_ctr(unsigned long s) | |
31 | { | |
32 | return s >> RCU_SEQ_CTR_SHIFT; | |
33 | } | |
34 | ||
35 | /* | |
36 | * Return the state portion of a sequence number previously returned | |
37 | * by rcu_seq_snap() or rcu_seq_current(). | |
38 | */ | |
39 | static inline int rcu_seq_state(unsigned long s) | |
40 | { | |
41 | return s & RCU_SEQ_STATE_MASK; | |
42 | } | |
43 | ||
80a7956f PM |
44 | /* |
45 | * Set the state portion of the pointed-to sequence number. | |
46 | * The caller is responsible for preventing conflicting updates. | |
47 | */ | |
48 | static inline void rcu_seq_set_state(unsigned long *sp, int newstate) | |
49 | { | |
50 | WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); | |
51 | WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); | |
52 | } | |
53 | ||
2e8c28c2 PM |
54 | /* Adjust sequence number for start of update-side operation. */ |
55 | static inline void rcu_seq_start(unsigned long *sp) | |
56 | { | |
57 | WRITE_ONCE(*sp, *sp + 1); | |
58 | smp_mb(); /* Ensure update-side operation after counter increment. */ | |
031aeee0 | 59 | WARN_ON_ONCE(rcu_seq_state(*sp) != 1); |
2e8c28c2 PM |
60 | } |
61 | ||
9a414201 PM |
62 | /* Compute the end-of-grace-period value for the specified sequence number. */ |
63 | static inline unsigned long rcu_seq_endval(unsigned long *sp) | |
64 | { | |
65 | return (*sp | RCU_SEQ_STATE_MASK) + 1; | |
66 | } | |
67 | ||
2e8c28c2 PM |
68 | /* Adjust sequence number for end of update-side operation. */ |
69 | static inline void rcu_seq_end(unsigned long *sp) | |
70 | { | |
71 | smp_mb(); /* Ensure update-side operation before counter increment. */ | |
031aeee0 | 72 | WARN_ON_ONCE(!rcu_seq_state(*sp)); |
9a414201 | 73 | WRITE_ONCE(*sp, rcu_seq_endval(sp)); |
2e8c28c2 PM |
74 | } |
75 | ||
0d805a70 JFG |
76 | /* |
77 | * rcu_seq_snap - Take a snapshot of the update side's sequence number. | |
78 | * | |
79 | * This function returns the earliest value of the grace-period sequence number | |
80 | * that will indicate that a full grace period has elapsed since the current | |
81 | * time. Once the grace-period sequence number has reached this value, it will | |
82 | * be safe to invoke all callbacks that have been registered prior to the | |
83 | * current time. This value is the current grace-period number plus two to the | |
84 | * power of the number of low-order bits reserved for state, then rounded up to | |
85 | * the next value in which the state bits are all zero. | |
86 | */ | |
2e8c28c2 PM |
87 | static inline unsigned long rcu_seq_snap(unsigned long *sp) |
88 | { | |
89 | unsigned long s; | |
90 | ||
031aeee0 | 91 | s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; |
2e8c28c2 PM |
92 | smp_mb(); /* Above access must not bleed into critical section. */ |
93 | return s; | |
94 | } | |
95 | ||
8660b7d8 PM |
96 | /* Return the current value the update side's sequence number, no ordering. */ |
97 | static inline unsigned long rcu_seq_current(unsigned long *sp) | |
98 | { | |
99 | return READ_ONCE(*sp); | |
100 | } | |
101 | ||
2e3e5e55 PM |
102 | /* |
103 | * Given a snapshot from rcu_seq_snap(), determine whether or not the | |
104 | * corresponding update-side operation has started. | |
105 | */ | |
106 | static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) | |
107 | { | |
108 | return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); | |
109 | } | |
110 | ||
2e8c28c2 PM |
111 | /* |
112 | * Given a snapshot from rcu_seq_snap(), determine whether or not a | |
113 | * full update-side operation has occurred. | |
114 | */ | |
115 | static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) | |
116 | { | |
117 | return ULONG_CMP_GE(READ_ONCE(*sp), s); | |
118 | } | |
119 | ||
67e14c1e PM |
120 | /* |
121 | * Has a grace period completed since the time the old gp_seq was collected? | |
122 | */ | |
123 | static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) | |
124 | { | |
125 | return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); | |
126 | } | |
127 | ||
128 | /* | |
129 | * Has a grace period started since the time the old gp_seq was collected? | |
130 | */ | |
131 | static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) | |
132 | { | |
133 | return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, | |
134 | new); | |
135 | } | |
136 | ||
d7219312 PM |
137 | /* |
138 | * Roughly how many full grace periods have elapsed between the collection | |
139 | * of the two specified grace periods? | |
140 | */ | |
141 | static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) | |
142 | { | |
2ee5aca5 PM |
143 | unsigned long rnd_diff; |
144 | ||
145 | if (old == new) | |
146 | return 0; | |
147 | /* | |
148 | * Compute the number of grace periods (still shifted up), plus | |
149 | * one if either of new and old is not an exact grace period. | |
150 | */ | |
151 | rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - | |
152 | ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + | |
153 | ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); | |
154 | if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) | |
155 | return 1; /* Definitely no grace period has elapsed. */ | |
156 | return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; | |
d7219312 PM |
157 | } |
158 | ||
29c00b4a PM |
159 | /* |
160 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally | |
7f87c036 PM |
161 | * by call_rcu() and rcu callback execution, and are therefore not part |
162 | * of the RCU API. These are in rcupdate.h because they are used by all | |
163 | * RCU implementations. | |
29c00b4a PM |
164 | */ |
165 | ||
166 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | |
167 | # define STATE_RCU_HEAD_READY 0 | |
168 | # define STATE_RCU_HEAD_QUEUED 1 | |
169 | ||
f9e62f31 | 170 | extern const struct debug_obj_descr rcuhead_debug_descr; |
29c00b4a | 171 | |
ae150184 | 172 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
29c00b4a | 173 | { |
ae150184 PM |
174 | int r1; |
175 | ||
176 | r1 = debug_object_activate(head, &rcuhead_debug_descr); | |
29c00b4a PM |
177 | debug_object_active_state(head, &rcuhead_debug_descr, |
178 | STATE_RCU_HEAD_READY, | |
179 | STATE_RCU_HEAD_QUEUED); | |
ae150184 | 180 | return r1; |
29c00b4a PM |
181 | } |
182 | ||
183 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |
184 | { | |
185 | debug_object_active_state(head, &rcuhead_debug_descr, | |
186 | STATE_RCU_HEAD_QUEUED, | |
187 | STATE_RCU_HEAD_READY); | |
188 | debug_object_deactivate(head, &rcuhead_debug_descr); | |
189 | } | |
190 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
ae150184 | 191 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
29c00b4a | 192 | { |
ae150184 | 193 | return 0; |
29c00b4a PM |
194 | } |
195 | ||
196 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |
197 | { | |
198 | } | |
199 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
200 | ||
58c53360 PM |
201 | extern int rcu_cpu_stall_suppress_at_boot; |
202 | ||
203 | static inline bool rcu_stall_is_suppressed_at_boot(void) | |
204 | { | |
205 | return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended(); | |
206 | } | |
207 | ||
6bfc09e2 PM |
208 | #ifdef CONFIG_RCU_STALL_COMMON |
209 | ||
cdc694b2 | 210 | extern int rcu_cpu_stall_ftrace_dump; |
6bfc09e2 | 211 | extern int rcu_cpu_stall_suppress; |
10462d6f | 212 | extern int rcu_cpu_stall_timeout; |
6bfc09e2 PM |
213 | int rcu_jiffies_till_stall_check(void); |
214 | ||
58c53360 PM |
215 | static inline bool rcu_stall_is_suppressed(void) |
216 | { | |
217 | return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress; | |
218 | } | |
219 | ||
f22ce091 PM |
220 | #define rcu_ftrace_dump_stall_suppress() \ |
221 | do { \ | |
222 | if (!rcu_cpu_stall_suppress) \ | |
223 | rcu_cpu_stall_suppress = 3; \ | |
224 | } while (0) | |
225 | ||
226 | #define rcu_ftrace_dump_stall_unsuppress() \ | |
227 | do { \ | |
228 | if (rcu_cpu_stall_suppress == 3) \ | |
229 | rcu_cpu_stall_suppress = 0; \ | |
230 | } while (0) | |
231 | ||
232 | #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ | |
58c53360 PM |
233 | |
234 | static inline bool rcu_stall_is_suppressed(void) | |
235 | { | |
236 | return rcu_stall_is_suppressed_at_boot(); | |
237 | } | |
f22ce091 PM |
238 | #define rcu_ftrace_dump_stall_suppress() |
239 | #define rcu_ftrace_dump_stall_unsuppress() | |
6bfc09e2 PM |
240 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
241 | ||
0d752924 PM |
242 | /* |
243 | * Strings used in tracepoints need to be exported via the | |
244 | * tracing system such that tools like perf and trace-cmd can | |
245 | * translate the string address pointers to actual text. | |
246 | */ | |
247 | #define TPS(x) tracepoint_string(x) | |
248 | ||
b8989b76 PM |
249 | /* |
250 | * Dump the ftrace buffer, but only one time per callsite per boot. | |
251 | */ | |
252 | #define rcu_ftrace_dump(oops_dump_mode) \ | |
253 | do { \ | |
254 | static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ | |
255 | \ | |
256 | if (!atomic_read(&___rfd_beenhere) && \ | |
83b6ca1f PM |
257 | !atomic_xchg(&___rfd_beenhere, 1)) { \ |
258 | tracing_off(); \ | |
f22ce091 | 259 | rcu_ftrace_dump_stall_suppress(); \ |
b8989b76 | 260 | ftrace_dump(oops_dump_mode); \ |
f22ce091 | 261 | rcu_ftrace_dump_stall_unsuppress(); \ |
83b6ca1f | 262 | } \ |
b8989b76 PM |
263 | } while (0) |
264 | ||
aa23c6fb | 265 | void rcu_early_boot_tests(void); |
52d7e48b | 266 | void rcu_test_sync_prims(void); |
aa23c6fb | 267 | |
5f6130fa LJ |
268 | /* |
269 | * This function really isn't for public consumption, but RCU is special in | |
270 | * that context switches can allow the state machine to make progress. | |
271 | */ | |
272 | extern void resched_cpu(int cpu); | |
273 | ||
822175e7 | 274 | #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) |
2b34c43c PM |
275 | |
276 | #include <linux/rcu_node_tree.h> | |
277 | ||
278 | extern int rcu_num_lvls; | |
e95d68d2 | 279 | extern int num_rcu_lvl[]; |
2b34c43c PM |
280 | extern int rcu_num_nodes; |
281 | static bool rcu_fanout_exact; | |
282 | static int rcu_fanout_leaf; | |
283 | ||
284 | /* | |
285 | * Compute the per-level fanout, either using the exact fanout specified | |
286 | * or balancing the tree, depending on the rcu_fanout_exact boot parameter. | |
287 | */ | |
288 | static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) | |
289 | { | |
290 | int i; | |
291 | ||
36b5dae6 PM |
292 | for (i = 0; i < RCU_NUM_LVLS; i++) |
293 | levelspread[i] = INT_MIN; | |
2b34c43c PM |
294 | if (rcu_fanout_exact) { |
295 | levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; | |
296 | for (i = rcu_num_lvls - 2; i >= 0; i--) | |
297 | levelspread[i] = RCU_FANOUT; | |
298 | } else { | |
299 | int ccur; | |
300 | int cprv; | |
301 | ||
302 | cprv = nr_cpu_ids; | |
303 | for (i = rcu_num_lvls - 1; i >= 0; i--) { | |
304 | ccur = levelcnt[i]; | |
305 | levelspread[i] = (cprv + ccur - 1) / ccur; | |
306 | cprv = ccur; | |
307 | } | |
308 | } | |
309 | } | |
310 | ||
7f87c036 | 311 | /* Returns a pointer to the first leaf rcu_node structure. */ |
aedf4ba9 | 312 | #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) |
5b4c11d5 PM |
313 | |
314 | /* Is this rcu_node a leaf? */ | |
315 | #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) | |
316 | ||
5257514d | 317 | /* Is this rcu_node the last leaf? */ |
aedf4ba9 | 318 | #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) |
5257514d | 319 | |
efbe451d | 320 | /* |
aedf4ba9 | 321 | * Do a full breadth-first scan of the {s,}rcu_node structures for the |
7f87c036 PM |
322 | * specified state structure (for SRCU) or the only rcu_state structure |
323 | * (for RCU). | |
efbe451d | 324 | */ |
aedf4ba9 PM |
325 | #define srcu_for_each_node_breadth_first(sp, rnp) \ |
326 | for ((rnp) = &(sp)->node[0]; \ | |
327 | (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) | |
328 | #define rcu_for_each_node_breadth_first(rnp) \ | |
329 | srcu_for_each_node_breadth_first(&rcu_state, rnp) | |
efbe451d PM |
330 | |
331 | /* | |
7f87c036 PM |
332 | * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. |
333 | * Note that if there is a singleton rcu_node tree with but one rcu_node | |
334 | * structure, this loop -will- visit the rcu_node structure. It is still | |
335 | * a leaf node, even if it is also the root node. | |
efbe451d | 336 | */ |
aedf4ba9 PM |
337 | #define rcu_for_each_leaf_node(rnp) \ |
338 | for ((rnp) = rcu_first_leaf_node(); \ | |
339 | (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) | |
efbe451d PM |
340 | |
341 | /* | |
342 | * Iterate over all possible CPUs in a leaf RCU node. | |
343 | */ | |
344 | #define for_each_leaf_node_possible_cpu(rnp, cpu) \ | |
82dd8419 PM |
345 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
346 | (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ | |
65963d24 PM |
347 | (cpu) <= rnp->grphi; \ |
348 | (cpu) = cpumask_next((cpu), cpu_possible_mask)) | |
349 | ||
350 | /* | |
351 | * Iterate over all CPUs in a leaf RCU node's specified mask. | |
352 | */ | |
353 | #define rcu_find_next_bit(rnp, cpu, mask) \ | |
354 | ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) | |
355 | #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ | |
82dd8419 PM |
356 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
357 | (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ | |
65963d24 PM |
358 | (cpu) <= rnp->grphi; \ |
359 | (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) | |
efbe451d | 360 | |
83d40bd3 PM |
361 | /* |
362 | * Wrappers for the rcu_node::lock acquire and release. | |
363 | * | |
364 | * Because the rcu_nodes form a tree, the tree traversal locking will observe | |
365 | * different lock values, this in turn means that an UNLOCK of one level | |
366 | * followed by a LOCK of another level does not imply a full memory barrier; | |
367 | * and most importantly transitivity is lost. | |
368 | * | |
369 | * In order to restore full ordering between tree levels, augment the regular | |
370 | * lock acquire functions with smp_mb__after_unlock_lock(). | |
371 | * | |
372 | * As ->lock of struct rcu_node is a __private field, therefore one should use | |
373 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. | |
374 | */ | |
375 | #define raw_spin_lock_rcu_node(p) \ | |
376 | do { \ | |
377 | raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ | |
378 | smp_mb__after_unlock_lock(); \ | |
379 | } while (0) | |
380 | ||
381 | #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock)) | |
382 | ||
383 | #define raw_spin_lock_irq_rcu_node(p) \ | |
384 | do { \ | |
385 | raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
386 | smp_mb__after_unlock_lock(); \ | |
387 | } while (0) | |
388 | ||
389 | #define raw_spin_unlock_irq_rcu_node(p) \ | |
390 | raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) | |
391 | ||
4e4bea74 | 392 | #define raw_spin_lock_irqsave_rcu_node(p, flags) \ |
83d40bd3 | 393 | do { \ |
4e4bea74 | 394 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
83d40bd3 PM |
395 | smp_mb__after_unlock_lock(); \ |
396 | } while (0) | |
397 | ||
4e4bea74 | 398 | #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ |
a32e01ee | 399 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) |
83d40bd3 PM |
400 | |
401 | #define raw_spin_trylock_rcu_node(p) \ | |
402 | ({ \ | |
403 | bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ | |
404 | \ | |
405 | if (___locked) \ | |
406 | smp_mb__after_unlock_lock(); \ | |
407 | ___locked; \ | |
408 | }) | |
409 | ||
a32e01ee MW |
410 | #define raw_lockdep_assert_held_rcu_node(p) \ |
411 | lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) | |
412 | ||
822175e7 | 413 | #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */ |
2b34c43c | 414 | |
e0fcba9a PM |
415 | #ifdef CONFIG_SRCU |
416 | void srcu_init(void); | |
417 | #else /* #ifdef CONFIG_SRCU */ | |
418 | static inline void srcu_init(void) { } | |
419 | #endif /* #else #ifdef CONFIG_SRCU */ | |
420 | ||
25c36329 PM |
421 | #ifdef CONFIG_TINY_RCU |
422 | /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ | |
7414fac0 PM |
423 | static inline bool rcu_gp_is_normal(void) { return true; } |
424 | static inline bool rcu_gp_is_expedited(void) { return false; } | |
425 | static inline void rcu_expedite_gp(void) { } | |
426 | static inline void rcu_unexpedite_gp(void) { } | |
bfbd767d | 427 | static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } |
25c36329 PM |
428 | #else /* #ifdef CONFIG_TINY_RCU */ |
429 | bool rcu_gp_is_normal(void); /* Internal RCU use. */ | |
430 | bool rcu_gp_is_expedited(void); /* Internal RCU use. */ | |
431 | void rcu_expedite_gp(void); | |
432 | void rcu_unexpedite_gp(void); | |
433 | void rcupdate_announce_bootup_oddness(void); | |
e21408ce | 434 | void show_rcu_tasks_gp_kthreads(void); |
bfbd767d | 435 | void rcu_request_urgent_qs_task(struct task_struct *t); |
25c36329 PM |
436 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
437 | ||
82118249 PM |
438 | #define RCU_SCHEDULER_INACTIVE 0 |
439 | #define RCU_SCHEDULER_INIT 1 | |
440 | #define RCU_SCHEDULER_RUNNING 2 | |
441 | ||
cad7b389 PM |
442 | enum rcutorture_type { |
443 | RCU_FLAVOR, | |
cad7b389 | 444 | RCU_TASKS_FLAVOR, |
3d6e43c7 | 445 | RCU_TASKS_RUDE_FLAVOR, |
c1a76c0b | 446 | RCU_TASKS_TRACING_FLAVOR, |
c682db55 | 447 | RCU_TRIVIAL_FLAVOR, |
cad7b389 PM |
448 | SRCU_FLAVOR, |
449 | INVALID_RCU_FLAVOR | |
450 | }; | |
451 | ||
b3e627d3 | 452 | #if defined(CONFIG_TREE_RCU) |
cad7b389 | 453 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
aebc8264 | 454 | unsigned long *gp_seq); |
cad7b389 PM |
455 | void do_trace_rcu_torture_read(const char *rcutorturename, |
456 | struct rcu_head *rhp, | |
457 | unsigned long secs, | |
458 | unsigned long c_old, | |
459 | unsigned long c); | |
55b2dcf5 | 460 | void rcu_gp_set_torture_wait(int duration); |
cad7b389 PM |
461 | #else |
462 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, | |
aebc8264 | 463 | int *flags, unsigned long *gp_seq) |
cad7b389 PM |
464 | { |
465 | *flags = 0; | |
aebc8264 | 466 | *gp_seq = 0; |
cad7b389 | 467 | } |
cad7b389 PM |
468 | #ifdef CONFIG_RCU_TRACE |
469 | void do_trace_rcu_torture_read(const char *rcutorturename, | |
470 | struct rcu_head *rhp, | |
471 | unsigned long secs, | |
472 | unsigned long c_old, | |
473 | unsigned long c); | |
474 | #else | |
475 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ | |
476 | do { } while (0) | |
477 | #endif | |
55b2dcf5 | 478 | static inline void rcu_gp_set_torture_wait(int duration) { } |
cad7b389 PM |
479 | #endif |
480 | ||
c682db55 PM |
481 | #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) |
482 | long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); | |
483 | #endif | |
484 | ||
cad7b389 PM |
485 | #ifdef CONFIG_TINY_SRCU |
486 | ||
487 | static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
488 | struct srcu_struct *sp, int *flags, | |
aebc8264 | 489 | unsigned long *gp_seq) |
cad7b389 PM |
490 | { |
491 | if (test_type != SRCU_FLAVOR) | |
492 | return; | |
493 | *flags = 0; | |
aebc8264 | 494 | *gp_seq = sp->srcu_idx; |
cad7b389 PM |
495 | } |
496 | ||
497 | #elif defined(CONFIG_TREE_SRCU) | |
498 | ||
499 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
500 | struct srcu_struct *sp, int *flags, | |
aebc8264 | 501 | unsigned long *gp_seq); |
cad7b389 | 502 | |
cad7b389 PM |
503 | #endif |
504 | ||
e3c8d51e | 505 | #ifdef CONFIG_TINY_RCU |
7d0c9c50 | 506 | static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; } |
17ef2fe9 | 507 | static inline unsigned long rcu_get_gp_seq(void) { return 0; } |
7414fac0 | 508 | static inline unsigned long rcu_exp_batches_completed(void) { return 0; } |
7414fac0 PM |
509 | static inline unsigned long |
510 | srcu_batches_completed(struct srcu_struct *sp) { return 0; } | |
511 | static inline void rcu_force_quiescent_state(void) { } | |
7414fac0 | 512 | static inline void show_rcu_gp_kthreads(void) { } |
4babd855 | 513 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } |
e0aff973 | 514 | static inline void rcu_fwd_progress_check(unsigned long j) { } |
e3c8d51e | 515 | #else /* #ifdef CONFIG_TINY_RCU */ |
7d0c9c50 | 516 | bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); |
17ef2fe9 | 517 | unsigned long rcu_get_gp_seq(void); |
e3c8d51e | 518 | unsigned long rcu_exp_batches_completed(void); |
5a0465e1 | 519 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
e3c8d51e | 520 | void show_rcu_gp_kthreads(void); |
4babd855 | 521 | int rcu_get_gp_kthreads_prio(void); |
e0aff973 | 522 | void rcu_fwd_progress_check(unsigned long j); |
e3c8d51e | 523 | void rcu_force_quiescent_state(void); |
ad7c946b | 524 | extern struct workqueue_struct *rcu_gp_wq; |
25f3d7ef | 525 | extern struct workqueue_struct *rcu_par_gp_wq; |
e3c8d51e PM |
526 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
527 | ||
44c65ff2 | 528 | #ifdef CONFIG_RCU_NOCB_CPU |
3d54f798 | 529 | bool rcu_is_nocb_cpu(int cpu); |
5ab7ab83 | 530 | void rcu_bind_current_to_nocb(void); |
3d54f798 PM |
531 | #else |
532 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } | |
5ab7ab83 | 533 | static inline void rcu_bind_current_to_nocb(void) { } |
3d54f798 PM |
534 | #endif |
535 | ||
27c0f144 PM |
536 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU) |
537 | void show_rcu_tasks_classic_gp_kthread(void); | |
538 | #else | |
539 | static inline void show_rcu_tasks_classic_gp_kthread(void) {} | |
540 | #endif | |
541 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU) | |
542 | void show_rcu_tasks_rude_gp_kthread(void); | |
543 | #else | |
544 | static inline void show_rcu_tasks_rude_gp_kthread(void) {} | |
545 | #endif | |
546 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) | |
547 | void show_rcu_tasks_trace_gp_kthread(void); | |
548 | #else | |
549 | static inline void show_rcu_tasks_trace_gp_kthread(void) {} | |
550 | #endif | |
551 | ||
29c00b4a | 552 | #endif /* __LINUX_RCU_H */ |