]> Git Repo - linux.git/blob - kernel/rcu/rcu.h
ptr_ring: try vmalloc() when kmalloc() fails
[linux.git] / kernel / rcu / rcu.h
1 /*
2  * Read-Copy Update definitions shared among RCU implementations.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2011
19  *
20  * Author: Paul E. McKenney <[email protected]>
21  */
22
23 #ifndef __LINUX_RCU_H
24 #define __LINUX_RCU_H
25
26 #include <trace/events/rcu.h>
27 #ifdef CONFIG_RCU_TRACE
28 #define RCU_TRACE(stmt) stmt
29 #else /* #ifdef CONFIG_RCU_TRACE */
30 #define RCU_TRACE(stmt)
31 #endif /* #else #ifdef CONFIG_RCU_TRACE */
32
33 /* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
34 #define DYNTICK_IRQ_NONIDLE     ((LONG_MAX / 2) + 1)
35
36
37 /*
38  * Grace-period counter management.
39  */
40
41 #define RCU_SEQ_CTR_SHIFT       2
42 #define RCU_SEQ_STATE_MASK      ((1 << RCU_SEQ_CTR_SHIFT) - 1)
43
44 /*
45  * Return the counter portion of a sequence number previously returned
46  * by rcu_seq_snap() or rcu_seq_current().
47  */
48 static inline unsigned long rcu_seq_ctr(unsigned long s)
49 {
50         return s >> RCU_SEQ_CTR_SHIFT;
51 }
52
53 /*
54  * Return the state portion of a sequence number previously returned
55  * by rcu_seq_snap() or rcu_seq_current().
56  */
57 static inline int rcu_seq_state(unsigned long s)
58 {
59         return s & RCU_SEQ_STATE_MASK;
60 }
61
62 /*
63  * Set the state portion of the pointed-to sequence number.
64  * The caller is responsible for preventing conflicting updates.
65  */
66 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
67 {
68         WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
69         WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
70 }
71
72 /* Adjust sequence number for start of update-side operation. */
73 static inline void rcu_seq_start(unsigned long *sp)
74 {
75         WRITE_ONCE(*sp, *sp + 1);
76         smp_mb(); /* Ensure update-side operation after counter increment. */
77         WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
78 }
79
80 /* Adjust sequence number for end of update-side operation. */
81 static inline void rcu_seq_end(unsigned long *sp)
82 {
83         smp_mb(); /* Ensure update-side operation before counter increment. */
84         WARN_ON_ONCE(!rcu_seq_state(*sp));
85         WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
86 }
87
88 /* Take a snapshot of the update side's sequence number. */
89 static inline unsigned long rcu_seq_snap(unsigned long *sp)
90 {
91         unsigned long s;
92
93         s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
94         smp_mb(); /* Above access must not bleed into critical section. */
95         return s;
96 }
97
98 /* Return the current value the update side's sequence number, no ordering. */
99 static inline unsigned long rcu_seq_current(unsigned long *sp)
100 {
101         return READ_ONCE(*sp);
102 }
103
104 /*
105  * Given a snapshot from rcu_seq_snap(), determine whether or not a
106  * full update-side operation has occurred.
107  */
108 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
109 {
110         return ULONG_CMP_GE(READ_ONCE(*sp), s);
111 }
112
113 /*
114  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
115  * by call_rcu() and rcu callback execution, and are therefore not part of the
116  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
117  */
118
119 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
120 # define STATE_RCU_HEAD_READY   0
121 # define STATE_RCU_HEAD_QUEUED  1
122
123 extern struct debug_obj_descr rcuhead_debug_descr;
124
125 static inline int debug_rcu_head_queue(struct rcu_head *head)
126 {
127         int r1;
128
129         r1 = debug_object_activate(head, &rcuhead_debug_descr);
130         debug_object_active_state(head, &rcuhead_debug_descr,
131                                   STATE_RCU_HEAD_READY,
132                                   STATE_RCU_HEAD_QUEUED);
133         return r1;
134 }
135
136 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
137 {
138         debug_object_active_state(head, &rcuhead_debug_descr,
139                                   STATE_RCU_HEAD_QUEUED,
140                                   STATE_RCU_HEAD_READY);
141         debug_object_deactivate(head, &rcuhead_debug_descr);
142 }
143 #else   /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
144 static inline int debug_rcu_head_queue(struct rcu_head *head)
145 {
146         return 0;
147 }
148
149 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
150 {
151 }
152 #endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
153
154 void kfree(const void *);
155
156 /*
157  * Reclaim the specified callback, either by invoking it (non-lazy case)
158  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
159  */
160 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
161 {
162         unsigned long offset = (unsigned long)head->func;
163
164         rcu_lock_acquire(&rcu_callback_map);
165         if (__is_kfree_rcu_offset(offset)) {
166                 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
167                 kfree((void *)head - offset);
168                 rcu_lock_release(&rcu_callback_map);
169                 return true;
170         } else {
171                 RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
172                 head->func(head);
173                 rcu_lock_release(&rcu_callback_map);
174                 return false;
175         }
176 }
177
178 #ifdef CONFIG_RCU_STALL_COMMON
179
180 extern int rcu_cpu_stall_suppress;
181 int rcu_jiffies_till_stall_check(void);
182
183 #define rcu_ftrace_dump_stall_suppress() \
184 do { \
185         if (!rcu_cpu_stall_suppress) \
186                 rcu_cpu_stall_suppress = 3; \
187 } while (0)
188
189 #define rcu_ftrace_dump_stall_unsuppress() \
190 do { \
191         if (rcu_cpu_stall_suppress == 3) \
192                 rcu_cpu_stall_suppress = 0; \
193 } while (0)
194
195 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
196 #define rcu_ftrace_dump_stall_suppress()
197 #define rcu_ftrace_dump_stall_unsuppress()
198 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
199
200 /*
201  * Strings used in tracepoints need to be exported via the
202  * tracing system such that tools like perf and trace-cmd can
203  * translate the string address pointers to actual text.
204  */
205 #define TPS(x)  tracepoint_string(x)
206
207 /*
208  * Dump the ftrace buffer, but only one time per callsite per boot.
209  */
210 #define rcu_ftrace_dump(oops_dump_mode) \
211 do { \
212         static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
213         \
214         if (!atomic_read(&___rfd_beenhere) && \
215             !atomic_xchg(&___rfd_beenhere, 1)) { \
216                 tracing_off(); \
217                 rcu_ftrace_dump_stall_suppress(); \
218                 ftrace_dump(oops_dump_mode); \
219                 rcu_ftrace_dump_stall_unsuppress(); \
220         } \
221 } while (0)
222
223 void rcu_early_boot_tests(void);
224 void rcu_test_sync_prims(void);
225
226 /*
227  * This function really isn't for public consumption, but RCU is special in
228  * that context switches can allow the state machine to make progress.
229  */
230 extern void resched_cpu(int cpu);
231
232 #if defined(SRCU) || !defined(TINY_RCU)
233
234 #include <linux/rcu_node_tree.h>
235
236 extern int rcu_num_lvls;
237 extern int num_rcu_lvl[];
238 extern int rcu_num_nodes;
239 static bool rcu_fanout_exact;
240 static int rcu_fanout_leaf;
241
242 /*
243  * Compute the per-level fanout, either using the exact fanout specified
244  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
245  */
246 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
247 {
248         int i;
249
250         if (rcu_fanout_exact) {
251                 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
252                 for (i = rcu_num_lvls - 2; i >= 0; i--)
253                         levelspread[i] = RCU_FANOUT;
254         } else {
255                 int ccur;
256                 int cprv;
257
258                 cprv = nr_cpu_ids;
259                 for (i = rcu_num_lvls - 1; i >= 0; i--) {
260                         ccur = levelcnt[i];
261                         levelspread[i] = (cprv + ccur - 1) / ccur;
262                         cprv = ccur;
263                 }
264         }
265 }
266
267 /*
268  * Do a full breadth-first scan of the rcu_node structures for the
269  * specified rcu_state structure.
270  */
271 #define rcu_for_each_node_breadth_first(rsp, rnp) \
272         for ((rnp) = &(rsp)->node[0]; \
273              (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
274
275 /*
276  * Do a breadth-first scan of the non-leaf rcu_node structures for the
277  * specified rcu_state structure.  Note that if there is a singleton
278  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
279  */
280 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
281         for ((rnp) = &(rsp)->node[0]; \
282              (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
283
284 /*
285  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
286  * structure.  Note that if there is a singleton rcu_node tree with but
287  * one rcu_node structure, this loop -will- visit the rcu_node structure.
288  * It is still a leaf node, even if it is also the root node.
289  */
290 #define rcu_for_each_leaf_node(rsp, rnp) \
291         for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
292              (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
293
294 /*
295  * Iterate over all possible CPUs in a leaf RCU node.
296  */
297 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
298         for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
299              cpu <= rnp->grphi; \
300              cpu = cpumask_next((cpu), cpu_possible_mask))
301
302 /*
303  * Wrappers for the rcu_node::lock acquire and release.
304  *
305  * Because the rcu_nodes form a tree, the tree traversal locking will observe
306  * different lock values, this in turn means that an UNLOCK of one level
307  * followed by a LOCK of another level does not imply a full memory barrier;
308  * and most importantly transitivity is lost.
309  *
310  * In order to restore full ordering between tree levels, augment the regular
311  * lock acquire functions with smp_mb__after_unlock_lock().
312  *
313  * As ->lock of struct rcu_node is a __private field, therefore one should use
314  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
315  */
316 #define raw_spin_lock_rcu_node(p)                                       \
317 do {                                                                    \
318         raw_spin_lock(&ACCESS_PRIVATE(p, lock));                        \
319         smp_mb__after_unlock_lock();                                    \
320 } while (0)
321
322 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
323
324 #define raw_spin_lock_irq_rcu_node(p)                                   \
325 do {                                                                    \
326         raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));                    \
327         smp_mb__after_unlock_lock();                                    \
328 } while (0)
329
330 #define raw_spin_unlock_irq_rcu_node(p)                                 \
331         raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
332
333 #define raw_spin_lock_irqsave_rcu_node(p, flags)                        \
334 do {                                                                    \
335         raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
336         smp_mb__after_unlock_lock();                                    \
337 } while (0)
338
339 #define raw_spin_unlock_irqrestore_rcu_node(p, flags)                   \
340         raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)     \
341
342 #define raw_spin_trylock_rcu_node(p)                                    \
343 ({                                                                      \
344         bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));    \
345                                                                         \
346         if (___locked)                                                  \
347                 smp_mb__after_unlock_lock();                            \
348         ___locked;                                                      \
349 })
350
351 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
352
353 #ifdef CONFIG_TINY_RCU
354 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
355 static inline bool rcu_gp_is_normal(void) { return true; }
356 static inline bool rcu_gp_is_expedited(void) { return false; }
357 static inline void rcu_expedite_gp(void) { }
358 static inline void rcu_unexpedite_gp(void) { }
359 #else /* #ifdef CONFIG_TINY_RCU */
360 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
361 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
362 void rcu_expedite_gp(void);
363 void rcu_unexpedite_gp(void);
364 void rcupdate_announce_bootup_oddness(void);
365 #endif /* #else #ifdef CONFIG_TINY_RCU */
366
367 #define RCU_SCHEDULER_INACTIVE  0
368 #define RCU_SCHEDULER_INIT      1
369 #define RCU_SCHEDULER_RUNNING   2
370
371 #ifdef CONFIG_TINY_RCU
372 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
373 #else /* #ifdef CONFIG_TINY_RCU */
374 void rcu_request_urgent_qs_task(struct task_struct *t);
375 #endif /* #else #ifdef CONFIG_TINY_RCU */
376
377 enum rcutorture_type {
378         RCU_FLAVOR,
379         RCU_BH_FLAVOR,
380         RCU_SCHED_FLAVOR,
381         RCU_TASKS_FLAVOR,
382         SRCU_FLAVOR,
383         INVALID_RCU_FLAVOR
384 };
385
386 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
387 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
388                             unsigned long *gpnum, unsigned long *completed);
389 void rcutorture_record_test_transition(void);
390 void rcutorture_record_progress(unsigned long vernum);
391 void do_trace_rcu_torture_read(const char *rcutorturename,
392                                struct rcu_head *rhp,
393                                unsigned long secs,
394                                unsigned long c_old,
395                                unsigned long c);
396 #else
397 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
398                                           int *flags,
399                                           unsigned long *gpnum,
400                                           unsigned long *completed)
401 {
402         *flags = 0;
403         *gpnum = 0;
404         *completed = 0;
405 }
406 static inline void rcutorture_record_test_transition(void) { }
407 static inline void rcutorture_record_progress(unsigned long vernum) { }
408 #ifdef CONFIG_RCU_TRACE
409 void do_trace_rcu_torture_read(const char *rcutorturename,
410                                struct rcu_head *rhp,
411                                unsigned long secs,
412                                unsigned long c_old,
413                                unsigned long c);
414 #else
415 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
416         do { } while (0)
417 #endif
418 #endif
419
420 #ifdef CONFIG_TINY_SRCU
421
422 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
423                                            struct srcu_struct *sp, int *flags,
424                                            unsigned long *gpnum,
425                                            unsigned long *completed)
426 {
427         if (test_type != SRCU_FLAVOR)
428                 return;
429         *flags = 0;
430         *completed = sp->srcu_idx;
431         *gpnum = *completed;
432 }
433
434 #elif defined(CONFIG_TREE_SRCU)
435
436 void srcutorture_get_gp_data(enum rcutorture_type test_type,
437                              struct srcu_struct *sp, int *flags,
438                              unsigned long *gpnum, unsigned long *completed);
439
440 #endif
441
442 #ifdef CONFIG_TINY_RCU
443 static inline unsigned long rcu_batches_started(void) { return 0; }
444 static inline unsigned long rcu_batches_started_bh(void) { return 0; }
445 static inline unsigned long rcu_batches_started_sched(void) { return 0; }
446 static inline unsigned long rcu_batches_completed(void) { return 0; }
447 static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
448 static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
449 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
450 static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
451 static inline unsigned long
452 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
453 static inline void rcu_force_quiescent_state(void) { }
454 static inline void rcu_bh_force_quiescent_state(void) { }
455 static inline void rcu_sched_force_quiescent_state(void) { }
456 static inline void show_rcu_gp_kthreads(void) { }
457 #else /* #ifdef CONFIG_TINY_RCU */
458 extern unsigned long rcutorture_testseq;
459 extern unsigned long rcutorture_vernum;
460 unsigned long rcu_batches_started(void);
461 unsigned long rcu_batches_started_bh(void);
462 unsigned long rcu_batches_started_sched(void);
463 unsigned long rcu_batches_completed(void);
464 unsigned long rcu_batches_completed_bh(void);
465 unsigned long rcu_batches_completed_sched(void);
466 unsigned long rcu_exp_batches_completed(void);
467 unsigned long rcu_exp_batches_completed_sched(void);
468 unsigned long srcu_batches_completed(struct srcu_struct *sp);
469 void show_rcu_gp_kthreads(void);
470 void rcu_force_quiescent_state(void);
471 void rcu_bh_force_quiescent_state(void);
472 void rcu_sched_force_quiescent_state(void);
473 #endif /* #else #ifdef CONFIG_TINY_RCU */
474
475 #ifdef CONFIG_RCU_NOCB_CPU
476 bool rcu_is_nocb_cpu(int cpu);
477 #else
478 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
479 #endif
480
481 #endif /* __LINUX_RCU_H */
This page took 0.05915 seconds and 4 git commands to generate.