]>
Commit | Line | Data |
---|---|---|
64db4cff PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Authors: Dipankar Sarma <[email protected]> | |
21 | * Manfred Spraul <[email protected]> | |
22 | * Paul E. McKenney <[email protected]> Hierarchical version | |
23 | * | |
24 | * Based on the original work by Paul McKenney <[email protected]> | |
25 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
26 | * | |
27 | * For detailed explanation of Read-Copy Update mechanism see - | |
a71fca58 | 28 | * Documentation/RCU |
64db4cff PM |
29 | */ |
30 | #include <linux/types.h> | |
31 | #include <linux/kernel.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/smp.h> | |
35 | #include <linux/rcupdate.h> | |
36 | #include <linux/interrupt.h> | |
37 | #include <linux/sched.h> | |
c1dc0b9c | 38 | #include <linux/nmi.h> |
64db4cff PM |
39 | #include <asm/atomic.h> |
40 | #include <linux/bitops.h> | |
41 | #include <linux/module.h> | |
42 | #include <linux/completion.h> | |
43 | #include <linux/moduleparam.h> | |
44 | #include <linux/percpu.h> | |
45 | #include <linux/notifier.h> | |
46 | #include <linux/cpu.h> | |
47 | #include <linux/mutex.h> | |
48 | #include <linux/time.h> | |
49 | ||
9f77da9f PM |
50 | #include "rcutree.h" |
51 | ||
64db4cff PM |
52 | /* Data structures. */ |
53 | ||
54 | #define RCU_STATE_INITIALIZER(name) { \ | |
55 | .level = { &name.node[0] }, \ | |
56 | .levelcnt = { \ | |
57 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | |
58 | NUM_RCU_LVL_1, \ | |
59 | NUM_RCU_LVL_2, \ | |
60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | |
61 | }, \ | |
62 | .signaled = RCU_SIGNAL_INIT, \ | |
63 | .gpnum = -300, \ | |
64 | .completed = -300, \ | |
65 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | |
e74f4c45 PM |
66 | .orphan_cbs_list = NULL, \ |
67 | .orphan_cbs_tail = &name.orphan_cbs_list, \ | |
68 | .orphan_qlen = 0, \ | |
64db4cff PM |
69 | .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ |
70 | .n_force_qs = 0, \ | |
71 | .n_force_qs_ngp = 0, \ | |
72 | } | |
73 | ||
d6714c22 PM |
74 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
75 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |
64db4cff | 76 | |
6258c4fb IM |
77 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
78 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | |
b1f77b05 | 79 | |
f41d911f | 80 | |
fc2219d4 PM |
81 | /* |
82 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | |
83 | * permit this function to be invoked without holding the root rcu_node | |
84 | * structure's ->lock, but of course results can be subject to change. | |
85 | */ | |
86 | static int rcu_gp_in_progress(struct rcu_state *rsp) | |
87 | { | |
88 | return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); | |
89 | } | |
90 | ||
b1f77b05 | 91 | /* |
d6714c22 | 92 | * Note a quiescent state. Because we do not need to know |
b1f77b05 | 93 | * how many quiescent states passed, just if there was at least |
d6714c22 | 94 | * one since the start of the grace period, this just sets a flag. |
b1f77b05 | 95 | */ |
d6714c22 | 96 | void rcu_sched_qs(int cpu) |
b1f77b05 | 97 | { |
f41d911f PM |
98 | struct rcu_data *rdp; |
99 | ||
f41d911f | 100 | rdp = &per_cpu(rcu_sched_data, cpu); |
b1f77b05 | 101 | rdp->passed_quiesc_completed = rdp->completed; |
c3422bea PM |
102 | barrier(); |
103 | rdp->passed_quiesc = 1; | |
104 | rcu_preempt_note_context_switch(cpu); | |
b1f77b05 IM |
105 | } |
106 | ||
d6714c22 | 107 | void rcu_bh_qs(int cpu) |
b1f77b05 | 108 | { |
f41d911f PM |
109 | struct rcu_data *rdp; |
110 | ||
f41d911f | 111 | rdp = &per_cpu(rcu_bh_data, cpu); |
b1f77b05 | 112 | rdp->passed_quiesc_completed = rdp->completed; |
c3422bea PM |
113 | barrier(); |
114 | rdp->passed_quiesc = 1; | |
b1f77b05 | 115 | } |
64db4cff PM |
116 | |
117 | #ifdef CONFIG_NO_HZ | |
90a4d2c0 PM |
118 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
119 | .dynticks_nesting = 1, | |
120 | .dynticks = 1, | |
121 | }; | |
64db4cff PM |
122 | #endif /* #ifdef CONFIG_NO_HZ */ |
123 | ||
124 | static int blimit = 10; /* Maximum callbacks per softirq. */ | |
125 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ | |
126 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | |
127 | ||
3d76c082 PM |
128 | module_param(blimit, int, 0); |
129 | module_param(qhimark, int, 0); | |
130 | module_param(qlowmark, int, 0); | |
131 | ||
64db4cff | 132 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
a157229c | 133 | static int rcu_pending(int cpu); |
64db4cff PM |
134 | |
135 | /* | |
d6714c22 | 136 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
64db4cff | 137 | */ |
d6714c22 | 138 | long rcu_batches_completed_sched(void) |
64db4cff | 139 | { |
d6714c22 | 140 | return rcu_sched_state.completed; |
64db4cff | 141 | } |
d6714c22 | 142 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
64db4cff PM |
143 | |
144 | /* | |
145 | * Return the number of RCU BH batches processed thus far for debug & stats. | |
146 | */ | |
147 | long rcu_batches_completed_bh(void) | |
148 | { | |
149 | return rcu_bh_state.completed; | |
150 | } | |
151 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |
152 | ||
153 | /* | |
154 | * Does the CPU have callbacks ready to be invoked? | |
155 | */ | |
156 | static int | |
157 | cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) | |
158 | { | |
159 | return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]; | |
160 | } | |
161 | ||
162 | /* | |
163 | * Does the current CPU require a yet-as-unscheduled grace period? | |
164 | */ | |
165 | static int | |
166 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | |
167 | { | |
fc2219d4 | 168 | return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); |
64db4cff PM |
169 | } |
170 | ||
171 | /* | |
172 | * Return the root node of the specified rcu_state structure. | |
173 | */ | |
174 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |
175 | { | |
176 | return &rsp->node[0]; | |
177 | } | |
178 | ||
179 | #ifdef CONFIG_SMP | |
180 | ||
181 | /* | |
182 | * If the specified CPU is offline, tell the caller that it is in | |
183 | * a quiescent state. Otherwise, whack it with a reschedule IPI. | |
184 | * Grace periods can end up waiting on an offline CPU when that | |
185 | * CPU is in the process of coming online -- it will be added to the | |
186 | * rcu_node bitmasks before it actually makes it online. The same thing | |
187 | * can happen while a CPU is in the process of coming online. Because this | |
188 | * race is quite rare, we check for it after detecting that the grace | |
189 | * period has been delayed rather than checking each and every CPU | |
190 | * each and every time we start a new grace period. | |
191 | */ | |
192 | static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |
193 | { | |
194 | /* | |
195 | * If the CPU is offline, it is in a quiescent state. We can | |
196 | * trust its state not to change because interrupts are disabled. | |
197 | */ | |
198 | if (cpu_is_offline(rdp->cpu)) { | |
199 | rdp->offline_fqs++; | |
200 | return 1; | |
201 | } | |
202 | ||
f41d911f PM |
203 | /* If preemptable RCU, no point in sending reschedule IPI. */ |
204 | if (rdp->preemptable) | |
205 | return 0; | |
206 | ||
64db4cff PM |
207 | /* The CPU is online, so send it a reschedule IPI. */ |
208 | if (rdp->cpu != smp_processor_id()) | |
209 | smp_send_reschedule(rdp->cpu); | |
210 | else | |
211 | set_need_resched(); | |
212 | rdp->resched_ipi++; | |
213 | return 0; | |
214 | } | |
215 | ||
216 | #endif /* #ifdef CONFIG_SMP */ | |
217 | ||
218 | #ifdef CONFIG_NO_HZ | |
64db4cff PM |
219 | |
220 | /** | |
221 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | |
222 | * | |
223 | * Enter nohz mode, in other words, -leave- the mode in which RCU | |
224 | * read-side critical sections can occur. (Though RCU read-side | |
225 | * critical sections can occur in irq handlers in nohz mode, a possibility | |
226 | * handled by rcu_irq_enter() and rcu_irq_exit()). | |
227 | */ | |
228 | void rcu_enter_nohz(void) | |
229 | { | |
230 | unsigned long flags; | |
231 | struct rcu_dynticks *rdtp; | |
232 | ||
233 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | |
234 | local_irq_save(flags); | |
235 | rdtp = &__get_cpu_var(rcu_dynticks); | |
236 | rdtp->dynticks++; | |
237 | rdtp->dynticks_nesting--; | |
86848966 | 238 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
64db4cff PM |
239 | local_irq_restore(flags); |
240 | } | |
241 | ||
242 | /* | |
243 | * rcu_exit_nohz - inform RCU that current CPU is leaving nohz | |
244 | * | |
245 | * Exit nohz mode, in other words, -enter- the mode in which RCU | |
246 | * read-side critical sections normally occur. | |
247 | */ | |
248 | void rcu_exit_nohz(void) | |
249 | { | |
250 | unsigned long flags; | |
251 | struct rcu_dynticks *rdtp; | |
252 | ||
253 | local_irq_save(flags); | |
254 | rdtp = &__get_cpu_var(rcu_dynticks); | |
255 | rdtp->dynticks++; | |
256 | rdtp->dynticks_nesting++; | |
86848966 | 257 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
64db4cff PM |
258 | local_irq_restore(flags); |
259 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | |
260 | } | |
261 | ||
262 | /** | |
263 | * rcu_nmi_enter - inform RCU of entry to NMI context | |
264 | * | |
265 | * If the CPU was idle with dynamic ticks active, and there is no | |
266 | * irq handler running, this updates rdtp->dynticks_nmi to let the | |
267 | * RCU grace-period handling know that the CPU is active. | |
268 | */ | |
269 | void rcu_nmi_enter(void) | |
270 | { | |
271 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | |
272 | ||
273 | if (rdtp->dynticks & 0x1) | |
274 | return; | |
275 | rdtp->dynticks_nmi++; | |
86848966 | 276 | WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); |
64db4cff PM |
277 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
278 | } | |
279 | ||
280 | /** | |
281 | * rcu_nmi_exit - inform RCU of exit from NMI context | |
282 | * | |
283 | * If the CPU was idle with dynamic ticks active, and there is no | |
284 | * irq handler running, this updates rdtp->dynticks_nmi to let the | |
285 | * RCU grace-period handling know that the CPU is no longer active. | |
286 | */ | |
287 | void rcu_nmi_exit(void) | |
288 | { | |
289 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | |
290 | ||
291 | if (rdtp->dynticks & 0x1) | |
292 | return; | |
293 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | |
294 | rdtp->dynticks_nmi++; | |
86848966 | 295 | WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); |
64db4cff PM |
296 | } |
297 | ||
298 | /** | |
299 | * rcu_irq_enter - inform RCU of entry to hard irq context | |
300 | * | |
301 | * If the CPU was idle with dynamic ticks active, this updates the | |
302 | * rdtp->dynticks to let the RCU handling know that the CPU is active. | |
303 | */ | |
304 | void rcu_irq_enter(void) | |
305 | { | |
306 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | |
307 | ||
308 | if (rdtp->dynticks_nesting++) | |
309 | return; | |
310 | rdtp->dynticks++; | |
86848966 | 311 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
64db4cff PM |
312 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
313 | } | |
314 | ||
315 | /** | |
316 | * rcu_irq_exit - inform RCU of exit from hard irq context | |
317 | * | |
318 | * If the CPU was idle with dynamic ticks active, update the rdp->dynticks | |
319 | * to put let the RCU handling be aware that the CPU is going back to idle | |
320 | * with no ticks. | |
321 | */ | |
322 | void rcu_irq_exit(void) | |
323 | { | |
324 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | |
325 | ||
326 | if (--rdtp->dynticks_nesting) | |
327 | return; | |
328 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | |
329 | rdtp->dynticks++; | |
86848966 | 330 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
64db4cff PM |
331 | |
332 | /* If the interrupt queued a callback, get out of dyntick mode. */ | |
d6714c22 | 333 | if (__get_cpu_var(rcu_sched_data).nxtlist || |
64db4cff PM |
334 | __get_cpu_var(rcu_bh_data).nxtlist) |
335 | set_need_resched(); | |
336 | } | |
337 | ||
338 | /* | |
339 | * Record the specified "completed" value, which is later used to validate | |
340 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | |
341 | * unconditionally invalidate any future dynticks manipulations (which is | |
342 | * useful at the beginning of a grace period). | |
343 | */ | |
344 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | |
345 | { | |
346 | rsp->dynticks_completed = comp; | |
347 | } | |
348 | ||
349 | #ifdef CONFIG_SMP | |
350 | ||
351 | /* | |
352 | * Recall the previously recorded value of the completion for dynticks. | |
353 | */ | |
354 | static long dyntick_recall_completed(struct rcu_state *rsp) | |
355 | { | |
356 | return rsp->dynticks_completed; | |
357 | } | |
358 | ||
359 | /* | |
360 | * Snapshot the specified CPU's dynticks counter so that we can later | |
361 | * credit them with an implicit quiescent state. Return 1 if this CPU | |
1eba8f84 | 362 | * is in dynticks idle mode, which is an extended quiescent state. |
64db4cff PM |
363 | */ |
364 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | |
365 | { | |
366 | int ret; | |
367 | int snap; | |
368 | int snap_nmi; | |
369 | ||
370 | snap = rdp->dynticks->dynticks; | |
371 | snap_nmi = rdp->dynticks->dynticks_nmi; | |
372 | smp_mb(); /* Order sampling of snap with end of grace period. */ | |
373 | rdp->dynticks_snap = snap; | |
374 | rdp->dynticks_nmi_snap = snap_nmi; | |
375 | ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0); | |
376 | if (ret) | |
377 | rdp->dynticks_fqs++; | |
378 | return ret; | |
379 | } | |
380 | ||
381 | /* | |
382 | * Return true if the specified CPU has passed through a quiescent | |
383 | * state by virtue of being in or having passed through an dynticks | |
384 | * idle state since the last call to dyntick_save_progress_counter() | |
385 | * for this same CPU. | |
386 | */ | |
387 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |
388 | { | |
389 | long curr; | |
390 | long curr_nmi; | |
391 | long snap; | |
392 | long snap_nmi; | |
393 | ||
394 | curr = rdp->dynticks->dynticks; | |
395 | snap = rdp->dynticks_snap; | |
396 | curr_nmi = rdp->dynticks->dynticks_nmi; | |
397 | snap_nmi = rdp->dynticks_nmi_snap; | |
398 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | |
399 | ||
400 | /* | |
401 | * If the CPU passed through or entered a dynticks idle phase with | |
402 | * no active irq/NMI handlers, then we can safely pretend that the CPU | |
403 | * already acknowledged the request to pass through a quiescent | |
404 | * state. Either way, that CPU cannot possibly be in an RCU | |
405 | * read-side critical section that started before the beginning | |
406 | * of the current RCU grace period. | |
407 | */ | |
408 | if ((curr != snap || (curr & 0x1) == 0) && | |
409 | (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) { | |
410 | rdp->dynticks_fqs++; | |
411 | return 1; | |
412 | } | |
413 | ||
414 | /* Go check for the CPU being offline. */ | |
415 | return rcu_implicit_offline_qs(rdp); | |
416 | } | |
417 | ||
418 | #endif /* #ifdef CONFIG_SMP */ | |
419 | ||
420 | #else /* #ifdef CONFIG_NO_HZ */ | |
421 | ||
422 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | |
423 | { | |
424 | } | |
425 | ||
426 | #ifdef CONFIG_SMP | |
427 | ||
428 | /* | |
429 | * If there are no dynticks, then the only way that a CPU can passively | |
430 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | |
431 | * is a point in time during the prior (already finished) grace period, | |
432 | * an offline CPU is always in a quiescent state, and thus can be | |
433 | * unconditionally applied. So just return the current value of completed. | |
434 | */ | |
435 | static long dyntick_recall_completed(struct rcu_state *rsp) | |
436 | { | |
437 | return rsp->completed; | |
438 | } | |
439 | ||
440 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | |
441 | { | |
442 | return 0; | |
443 | } | |
444 | ||
445 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |
446 | { | |
447 | return rcu_implicit_offline_qs(rdp); | |
448 | } | |
449 | ||
450 | #endif /* #ifdef CONFIG_SMP */ | |
451 | ||
452 | #endif /* #else #ifdef CONFIG_NO_HZ */ | |
453 | ||
454 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | |
455 | ||
456 | static void record_gp_stall_check_time(struct rcu_state *rsp) | |
457 | { | |
458 | rsp->gp_start = jiffies; | |
459 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; | |
460 | } | |
461 | ||
462 | static void print_other_cpu_stall(struct rcu_state *rsp) | |
463 | { | |
464 | int cpu; | |
465 | long delta; | |
466 | unsigned long flags; | |
467 | struct rcu_node *rnp = rcu_get_root(rsp); | |
64db4cff PM |
468 | |
469 | /* Only let one CPU complain about others per time interval. */ | |
470 | ||
471 | spin_lock_irqsave(&rnp->lock, flags); | |
472 | delta = jiffies - rsp->jiffies_stall; | |
fc2219d4 | 473 | if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { |
64db4cff PM |
474 | spin_unlock_irqrestore(&rnp->lock, flags); |
475 | return; | |
476 | } | |
477 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | |
a0b6c9a7 PM |
478 | |
479 | /* | |
480 | * Now rat on any tasks that got kicked up to the root rcu_node | |
481 | * due to CPU offlining. | |
482 | */ | |
483 | rcu_print_task_stall(rnp); | |
64db4cff PM |
484 | spin_unlock_irqrestore(&rnp->lock, flags); |
485 | ||
486 | /* OK, time to rat on our buddy... */ | |
487 | ||
488 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | |
a0b6c9a7 | 489 | rcu_for_each_leaf_node(rsp, rnp) { |
f41d911f | 490 | rcu_print_task_stall(rnp); |
a0b6c9a7 | 491 | if (rnp->qsmask == 0) |
64db4cff | 492 | continue; |
a0b6c9a7 PM |
493 | for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) |
494 | if (rnp->qsmask & (1UL << cpu)) | |
495 | printk(" %d", rnp->grplo + cpu); | |
64db4cff PM |
496 | } |
497 | printk(" (detected by %d, t=%ld jiffies)\n", | |
498 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | |
c1dc0b9c IM |
499 | trigger_all_cpu_backtrace(); |
500 | ||
64db4cff PM |
501 | force_quiescent_state(rsp, 0); /* Kick them all. */ |
502 | } | |
503 | ||
504 | static void print_cpu_stall(struct rcu_state *rsp) | |
505 | { | |
506 | unsigned long flags; | |
507 | struct rcu_node *rnp = rcu_get_root(rsp); | |
508 | ||
509 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | |
510 | smp_processor_id(), jiffies - rsp->gp_start); | |
c1dc0b9c IM |
511 | trigger_all_cpu_backtrace(); |
512 | ||
64db4cff PM |
513 | spin_lock_irqsave(&rnp->lock, flags); |
514 | if ((long)(jiffies - rsp->jiffies_stall) >= 0) | |
515 | rsp->jiffies_stall = | |
516 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | |
517 | spin_unlock_irqrestore(&rnp->lock, flags); | |
c1dc0b9c | 518 | |
64db4cff PM |
519 | set_need_resched(); /* kick ourselves to get things going. */ |
520 | } | |
521 | ||
522 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |
523 | { | |
524 | long delta; | |
525 | struct rcu_node *rnp; | |
526 | ||
527 | delta = jiffies - rsp->jiffies_stall; | |
528 | rnp = rdp->mynode; | |
529 | if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { | |
530 | ||
531 | /* We haven't checked in, so go dump stack. */ | |
532 | print_cpu_stall(rsp); | |
533 | ||
fc2219d4 | 534 | } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { |
64db4cff PM |
535 | |
536 | /* They had two time units to dump stack, so complain. */ | |
537 | print_other_cpu_stall(rsp); | |
538 | } | |
539 | } | |
540 | ||
541 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
542 | ||
543 | static void record_gp_stall_check_time(struct rcu_state *rsp) | |
544 | { | |
545 | } | |
546 | ||
547 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |
548 | { | |
549 | } | |
550 | ||
551 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
552 | ||
553 | /* | |
554 | * Update CPU-local rcu_data state to record the newly noticed grace period. | |
555 | * This is used both when we started the grace period and when we notice | |
556 | * that someone else started the grace period. | |
557 | */ | |
558 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | |
559 | { | |
560 | rdp->qs_pending = 1; | |
561 | rdp->passed_quiesc = 0; | |
562 | rdp->gpnum = rsp->gpnum; | |
64db4cff PM |
563 | } |
564 | ||
565 | /* | |
566 | * Did someone else start a new RCU grace period start since we last | |
567 | * checked? Update local state appropriately if so. Must be called | |
568 | * on the CPU corresponding to rdp. | |
569 | */ | |
570 | static int | |
571 | check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | |
572 | { | |
573 | unsigned long flags; | |
574 | int ret = 0; | |
575 | ||
576 | local_irq_save(flags); | |
577 | if (rdp->gpnum != rsp->gpnum) { | |
578 | note_new_gpnum(rsp, rdp); | |
579 | ret = 1; | |
580 | } | |
581 | local_irq_restore(flags); | |
582 | return ret; | |
583 | } | |
584 | ||
585 | /* | |
586 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | |
587 | * in preparation for detecting the next grace period. The caller must hold | |
588 | * the root node's ->lock, which is released before return. Hard irqs must | |
589 | * be disabled. | |
590 | */ | |
591 | static void | |
592 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |
593 | __releases(rcu_get_root(rsp)->lock) | |
594 | { | |
595 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | |
596 | struct rcu_node *rnp = rcu_get_root(rsp); | |
64db4cff PM |
597 | |
598 | if (!cpu_needs_another_gp(rsp, rdp)) { | |
599 | spin_unlock_irqrestore(&rnp->lock, flags); | |
600 | return; | |
601 | } | |
602 | ||
603 | /* Advance to a new grace period and initialize state. */ | |
604 | rsp->gpnum++; | |
c3422bea | 605 | WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); |
64db4cff PM |
606 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
607 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | |
64db4cff PM |
608 | record_gp_stall_check_time(rsp); |
609 | dyntick_record_completed(rsp, rsp->completed - 1); | |
610 | note_new_gpnum(rsp, rdp); | |
611 | ||
612 | /* | |
1eba8f84 PM |
613 | * Because this CPU just now started the new grace period, we know |
614 | * that all of its callbacks will be covered by this upcoming grace | |
615 | * period, even the ones that were registered arbitrarily recently. | |
616 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | |
617 | * | |
618 | * Other CPUs cannot be sure exactly when the grace period started. | |
619 | * Therefore, their recently registered callbacks must pass through | |
620 | * an additional RCU_NEXT_READY stage, so that they will be handled | |
621 | * by the next RCU grace period. | |
64db4cff PM |
622 | */ |
623 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | |
624 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | |
625 | ||
626 | /* Special-case the common single-level case. */ | |
627 | if (NUM_RCU_NODES == 1) { | |
b0e165c0 | 628 | rcu_preempt_check_blocked_tasks(rnp); |
28ecd580 | 629 | rnp->qsmask = rnp->qsmaskinit; |
de078d87 | 630 | rnp->gpnum = rsp->gpnum; |
c12172c0 | 631 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
64db4cff PM |
632 | spin_unlock_irqrestore(&rnp->lock, flags); |
633 | return; | |
634 | } | |
635 | ||
636 | spin_unlock(&rnp->lock); /* leave irqs disabled. */ | |
637 | ||
638 | ||
639 | /* Exclude any concurrent CPU-hotplug operations. */ | |
640 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | |
641 | ||
642 | /* | |
b835db1f PM |
643 | * Set the quiescent-state-needed bits in all the rcu_node |
644 | * structures for all currently online CPUs in breadth-first | |
645 | * order, starting from the root rcu_node structure. This | |
646 | * operation relies on the layout of the hierarchy within the | |
647 | * rsp->node[] array. Note that other CPUs will access only | |
648 | * the leaves of the hierarchy, which still indicate that no | |
649 | * grace period is in progress, at least until the corresponding | |
650 | * leaf node has been initialized. In addition, we have excluded | |
651 | * CPU-hotplug operations. | |
64db4cff PM |
652 | * |
653 | * Note that the grace period cannot complete until we finish | |
654 | * the initialization process, as there will be at least one | |
655 | * qsmask bit set in the root node until that time, namely the | |
b835db1f PM |
656 | * one corresponding to this CPU, due to the fact that we have |
657 | * irqs disabled. | |
64db4cff | 658 | */ |
a0b6c9a7 | 659 | rcu_for_each_node_breadth_first(rsp, rnp) { |
49e29126 | 660 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
b0e165c0 | 661 | rcu_preempt_check_blocked_tasks(rnp); |
49e29126 | 662 | rnp->qsmask = rnp->qsmaskinit; |
de078d87 | 663 | rnp->gpnum = rsp->gpnum; |
49e29126 | 664 | spin_unlock(&rnp->lock); /* irqs already disabled. */ |
64db4cff PM |
665 | } |
666 | ||
667 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | |
668 | spin_unlock_irqrestore(&rsp->onofflock, flags); | |
669 | } | |
670 | ||
671 | /* | |
672 | * Advance this CPU's callbacks, but only if the current grace period | |
673 | * has ended. This may be called only from the CPU to whom the rdp | |
674 | * belongs. | |
675 | */ | |
676 | static void | |
677 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | |
678 | { | |
679 | long completed_snap; | |
680 | unsigned long flags; | |
681 | ||
682 | local_irq_save(flags); | |
683 | completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | |
684 | ||
685 | /* Did another grace period end? */ | |
686 | if (rdp->completed != completed_snap) { | |
687 | ||
688 | /* Advance callbacks. No harm if list empty. */ | |
689 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | |
690 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | |
691 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | |
692 | ||
693 | /* Remember that we saw this grace-period completion. */ | |
694 | rdp->completed = completed_snap; | |
695 | } | |
696 | local_irq_restore(flags); | |
697 | } | |
698 | ||
f41d911f PM |
699 | /* |
700 | * Clean up after the prior grace period and let rcu_start_gp() start up | |
701 | * the next grace period if one is needed. Note that the caller must | |
702 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | |
703 | */ | |
704 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | |
fc2219d4 | 705 | __releases(rcu_get_root(rsp)->lock) |
f41d911f | 706 | { |
fc2219d4 | 707 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
f41d911f PM |
708 | rsp->completed = rsp->gpnum; |
709 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | |
710 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | |
711 | } | |
712 | ||
64db4cff PM |
713 | /* |
714 | * Similar to cpu_quiet(), for which it is a helper function. Allows | |
715 | * a group of CPUs to be quieted at one go, though all the CPUs in the | |
716 | * group must be represented by the same leaf rcu_node structure. | |
717 | * That structure's lock must be held upon entry, and it is released | |
718 | * before return. | |
719 | */ | |
720 | static void | |
721 | cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |
722 | unsigned long flags) | |
723 | __releases(rnp->lock) | |
724 | { | |
28ecd580 PM |
725 | struct rcu_node *rnp_c; |
726 | ||
64db4cff PM |
727 | /* Walk up the rcu_node hierarchy. */ |
728 | for (;;) { | |
729 | if (!(rnp->qsmask & mask)) { | |
730 | ||
731 | /* Our bit has already been cleared, so done. */ | |
732 | spin_unlock_irqrestore(&rnp->lock, flags); | |
733 | return; | |
734 | } | |
735 | rnp->qsmask &= ~mask; | |
f41d911f | 736 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { |
64db4cff PM |
737 | |
738 | /* Other bits still set at this level, so done. */ | |
739 | spin_unlock_irqrestore(&rnp->lock, flags); | |
740 | return; | |
741 | } | |
742 | mask = rnp->grpmask; | |
743 | if (rnp->parent == NULL) { | |
744 | ||
745 | /* No more levels. Exit loop holding root lock. */ | |
746 | ||
747 | break; | |
748 | } | |
749 | spin_unlock_irqrestore(&rnp->lock, flags); | |
28ecd580 | 750 | rnp_c = rnp; |
64db4cff PM |
751 | rnp = rnp->parent; |
752 | spin_lock_irqsave(&rnp->lock, flags); | |
28ecd580 | 753 | WARN_ON_ONCE(rnp_c->qsmask); |
64db4cff PM |
754 | } |
755 | ||
756 | /* | |
757 | * Get here if we are the last CPU to pass through a quiescent | |
f41d911f PM |
758 | * state for this grace period. Invoke cpu_quiet_msk_finish() |
759 | * to clean up and start the next grace period if one is needed. | |
64db4cff | 760 | */ |
f41d911f | 761 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ |
64db4cff PM |
762 | } |
763 | ||
764 | /* | |
765 | * Record a quiescent state for the specified CPU, which must either be | |
e7d8842e PM |
766 | * the current CPU. The lastcomp argument is used to make sure we are |
767 | * still in the grace period of interest. We don't want to end the current | |
768 | * grace period based on quiescent states detected in an earlier grace | |
769 | * period! | |
64db4cff PM |
770 | */ |
771 | static void | |
772 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |
773 | { | |
774 | unsigned long flags; | |
775 | unsigned long mask; | |
776 | struct rcu_node *rnp; | |
777 | ||
778 | rnp = rdp->mynode; | |
779 | spin_lock_irqsave(&rnp->lock, flags); | |
780 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | |
781 | ||
782 | /* | |
783 | * Someone beat us to it for this grace period, so leave. | |
784 | * The race with GP start is resolved by the fact that we | |
785 | * hold the leaf rcu_node lock, so that the per-CPU bits | |
786 | * cannot yet be initialized -- so we would simply find our | |
787 | * CPU's bit already cleared in cpu_quiet_msk() if this race | |
788 | * occurred. | |
789 | */ | |
790 | rdp->passed_quiesc = 0; /* try again later! */ | |
791 | spin_unlock_irqrestore(&rnp->lock, flags); | |
792 | return; | |
793 | } | |
794 | mask = rdp->grpmask; | |
795 | if ((rnp->qsmask & mask) == 0) { | |
796 | spin_unlock_irqrestore(&rnp->lock, flags); | |
797 | } else { | |
798 | rdp->qs_pending = 0; | |
799 | ||
800 | /* | |
801 | * This GP can't end until cpu checks in, so all of our | |
802 | * callbacks can be processed during the next GP. | |
803 | */ | |
64db4cff PM |
804 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
805 | ||
806 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | |
807 | } | |
808 | } | |
809 | ||
810 | /* | |
811 | * Check to see if there is a new grace period of which this CPU | |
812 | * is not yet aware, and if so, set up local rcu_data state for it. | |
813 | * Otherwise, see if this CPU has just passed through its first | |
814 | * quiescent state for this grace period, and record that fact if so. | |
815 | */ | |
816 | static void | |
817 | rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |
818 | { | |
819 | /* If there is now a new grace period, record and return. */ | |
820 | if (check_for_new_grace_period(rsp, rdp)) | |
821 | return; | |
822 | ||
823 | /* | |
824 | * Does this CPU still need to do its part for current grace period? | |
825 | * If no, return and let the other CPUs do their part as well. | |
826 | */ | |
827 | if (!rdp->qs_pending) | |
828 | return; | |
829 | ||
830 | /* | |
831 | * Was there a quiescent state since the beginning of the grace | |
832 | * period? If no, then exit and wait for the next call. | |
833 | */ | |
834 | if (!rdp->passed_quiesc) | |
835 | return; | |
836 | ||
837 | /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ | |
838 | cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | |
839 | } | |
840 | ||
841 | #ifdef CONFIG_HOTPLUG_CPU | |
842 | ||
e74f4c45 PM |
843 | /* |
844 | * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the | |
845 | * specified flavor of RCU. The callbacks will be adopted by the next | |
846 | * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever | |
847 | * comes first. Because this is invoked from the CPU_DYING notifier, | |
848 | * irqs are already disabled. | |
849 | */ | |
850 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | |
851 | { | |
852 | int i; | |
853 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | |
854 | ||
855 | if (rdp->nxtlist == NULL) | |
856 | return; /* irqs disabled, so comparison is stable. */ | |
857 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | |
858 | *rsp->orphan_cbs_tail = rdp->nxtlist; | |
859 | rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; | |
860 | rdp->nxtlist = NULL; | |
861 | for (i = 0; i < RCU_NEXT_SIZE; i++) | |
862 | rdp->nxttail[i] = &rdp->nxtlist; | |
863 | rsp->orphan_qlen += rdp->qlen; | |
864 | rdp->qlen = 0; | |
865 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | |
866 | } | |
867 | ||
868 | /* | |
869 | * Adopt previously orphaned RCU callbacks. | |
870 | */ | |
871 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |
872 | { | |
873 | unsigned long flags; | |
874 | struct rcu_data *rdp; | |
875 | ||
876 | spin_lock_irqsave(&rsp->onofflock, flags); | |
877 | rdp = rsp->rda[smp_processor_id()]; | |
878 | if (rsp->orphan_cbs_list == NULL) { | |
879 | spin_unlock_irqrestore(&rsp->onofflock, flags); | |
880 | return; | |
881 | } | |
882 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | |
883 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | |
884 | rdp->qlen += rsp->orphan_qlen; | |
885 | rsp->orphan_cbs_list = NULL; | |
886 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | |
887 | rsp->orphan_qlen = 0; | |
888 | spin_unlock_irqrestore(&rsp->onofflock, flags); | |
889 | } | |
890 | ||
64db4cff PM |
891 | /* |
892 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | |
893 | * and move all callbacks from the outgoing CPU to the current one. | |
894 | */ | |
895 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |
896 | { | |
64db4cff PM |
897 | unsigned long flags; |
898 | long lastcomp; | |
899 | unsigned long mask; | |
900 | struct rcu_data *rdp = rsp->rda[cpu]; | |
64db4cff PM |
901 | struct rcu_node *rnp; |
902 | ||
903 | /* Exclude any attempts to start a new grace period. */ | |
904 | spin_lock_irqsave(&rsp->onofflock, flags); | |
905 | ||
906 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | |
28ecd580 | 907 | rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ |
64db4cff PM |
908 | mask = rdp->grpmask; /* rnp->grplo is constant. */ |
909 | do { | |
910 | spin_lock(&rnp->lock); /* irqs already disabled. */ | |
911 | rnp->qsmaskinit &= ~mask; | |
912 | if (rnp->qsmaskinit != 0) { | |
f41d911f | 913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cff PM |
914 | break; |
915 | } | |
237c80c5 PM |
916 | |
917 | /* | |
918 | * If there was a task blocking the current grace period, | |
919 | * and if all CPUs have checked in, we need to propagate | |
920 | * the quiescent state up the rcu_node hierarchy. But that | |
921 | * is inconvenient at the moment due to deadlock issues if | |
922 | * this should end the current grace period. So set the | |
923 | * offlined CPU's bit in ->qsmask in order to force the | |
924 | * next force_quiescent_state() invocation to clean up this | |
925 | * mess in a deadlock-free manner. | |
926 | */ | |
927 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | |
928 | rnp->qsmask |= mask; | |
929 | ||
64db4cff | 930 | mask = rnp->grpmask; |
f41d911f | 931 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cff PM |
932 | rnp = rnp->parent; |
933 | } while (rnp != NULL); | |
934 | lastcomp = rsp->completed; | |
935 | ||
e74f4c45 | 936 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
64db4cff | 937 | |
e74f4c45 | 938 | rcu_adopt_orphan_cbs(rsp); |
64db4cff PM |
939 | } |
940 | ||
941 | /* | |
942 | * Remove the specified CPU from the RCU hierarchy and move any pending | |
943 | * callbacks that it might have to the current CPU. This code assumes | |
944 | * that at least one CPU in the system will remain running at all times. | |
945 | * Any attempt to offline -all- CPUs is likely to strand RCU callbacks. | |
946 | */ | |
947 | static void rcu_offline_cpu(int cpu) | |
948 | { | |
d6714c22 | 949 | __rcu_offline_cpu(cpu, &rcu_sched_state); |
64db4cff | 950 | __rcu_offline_cpu(cpu, &rcu_bh_state); |
33f76148 | 951 | rcu_preempt_offline_cpu(cpu); |
64db4cff PM |
952 | } |
953 | ||
954 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | |
955 | ||
e74f4c45 PM |
956 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) |
957 | { | |
958 | } | |
959 | ||
960 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |
961 | { | |
962 | } | |
963 | ||
64db4cff PM |
964 | static void rcu_offline_cpu(int cpu) |
965 | { | |
966 | } | |
967 | ||
968 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | |
969 | ||
970 | /* | |
971 | * Invoke any RCU callbacks that have made it to the end of their grace | |
972 | * period. Thottle as specified by rdp->blimit. | |
973 | */ | |
37c72e56 | 974 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
64db4cff PM |
975 | { |
976 | unsigned long flags; | |
977 | struct rcu_head *next, *list, **tail; | |
978 | int count; | |
979 | ||
980 | /* If no callbacks are ready, just return.*/ | |
981 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) | |
982 | return; | |
983 | ||
984 | /* | |
985 | * Extract the list of ready callbacks, disabling to prevent | |
986 | * races with call_rcu() from interrupt handlers. | |
987 | */ | |
988 | local_irq_save(flags); | |
989 | list = rdp->nxtlist; | |
990 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; | |
991 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; | |
992 | tail = rdp->nxttail[RCU_DONE_TAIL]; | |
993 | for (count = RCU_NEXT_SIZE - 1; count >= 0; count--) | |
994 | if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL]) | |
995 | rdp->nxttail[count] = &rdp->nxtlist; | |
996 | local_irq_restore(flags); | |
997 | ||
998 | /* Invoke callbacks. */ | |
999 | count = 0; | |
1000 | while (list) { | |
1001 | next = list->next; | |
1002 | prefetch(next); | |
1003 | list->func(list); | |
1004 | list = next; | |
1005 | if (++count >= rdp->blimit) | |
1006 | break; | |
1007 | } | |
1008 | ||
1009 | local_irq_save(flags); | |
1010 | ||
1011 | /* Update count, and requeue any remaining callbacks. */ | |
1012 | rdp->qlen -= count; | |
1013 | if (list != NULL) { | |
1014 | *tail = rdp->nxtlist; | |
1015 | rdp->nxtlist = list; | |
1016 | for (count = 0; count < RCU_NEXT_SIZE; count++) | |
1017 | if (&rdp->nxtlist == rdp->nxttail[count]) | |
1018 | rdp->nxttail[count] = tail; | |
1019 | else | |
1020 | break; | |
1021 | } | |
1022 | ||
1023 | /* Reinstate batch limit if we have worked down the excess. */ | |
1024 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | |
1025 | rdp->blimit = blimit; | |
1026 | ||
37c72e56 PM |
1027 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ |
1028 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | |
1029 | rdp->qlen_last_fqs_check = 0; | |
1030 | rdp->n_force_qs_snap = rsp->n_force_qs; | |
1031 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | |
1032 | rdp->qlen_last_fqs_check = rdp->qlen; | |
1033 | ||
64db4cff PM |
1034 | local_irq_restore(flags); |
1035 | ||
1036 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | |
1037 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | |
1038 | raise_softirq(RCU_SOFTIRQ); | |
1039 | } | |
1040 | ||
1041 | /* | |
1042 | * Check to see if this CPU is in a non-context-switch quiescent state | |
1043 | * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). | |
1044 | * Also schedule the RCU softirq handler. | |
1045 | * | |
1046 | * This function must be called with hardirqs disabled. It is normally | |
1047 | * invoked from the scheduling-clock interrupt. If rcu_pending returns | |
1048 | * false, there is no point in invoking rcu_check_callbacks(). | |
1049 | */ | |
1050 | void rcu_check_callbacks(int cpu, int user) | |
1051 | { | |
a157229c PM |
1052 | if (!rcu_pending(cpu)) |
1053 | return; /* if nothing for RCU to do. */ | |
64db4cff | 1054 | if (user || |
a6826048 PM |
1055 | (idle_cpu(cpu) && rcu_scheduler_active && |
1056 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | |
64db4cff PM |
1057 | |
1058 | /* | |
1059 | * Get here if this CPU took its interrupt from user | |
1060 | * mode or from the idle loop, and if this is not a | |
1061 | * nested interrupt. In this case, the CPU is in | |
d6714c22 | 1062 | * a quiescent state, so note it. |
64db4cff PM |
1063 | * |
1064 | * No memory barrier is required here because both | |
d6714c22 PM |
1065 | * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local |
1066 | * variables that other CPUs neither access nor modify, | |
1067 | * at least not while the corresponding CPU is online. | |
64db4cff PM |
1068 | */ |
1069 | ||
d6714c22 PM |
1070 | rcu_sched_qs(cpu); |
1071 | rcu_bh_qs(cpu); | |
64db4cff PM |
1072 | |
1073 | } else if (!in_softirq()) { | |
1074 | ||
1075 | /* | |
1076 | * Get here if this CPU did not take its interrupt from | |
1077 | * softirq, in other words, if it is not interrupting | |
1078 | * a rcu_bh read-side critical section. This is an _bh | |
d6714c22 | 1079 | * critical section, so note it. |
64db4cff PM |
1080 | */ |
1081 | ||
d6714c22 | 1082 | rcu_bh_qs(cpu); |
64db4cff | 1083 | } |
f41d911f | 1084 | rcu_preempt_check_callbacks(cpu); |
64db4cff PM |
1085 | raise_softirq(RCU_SOFTIRQ); |
1086 | } | |
1087 | ||
1088 | #ifdef CONFIG_SMP | |
1089 | ||
1090 | /* | |
1091 | * Scan the leaf rcu_node structures, processing dyntick state for any that | |
1092 | * have not yet encountered a quiescent state, using the function specified. | |
1093 | * Returns 1 if the current grace period ends while scanning (possibly | |
1094 | * because we made it end). | |
1095 | */ | |
1096 | static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |
1097 | int (*f)(struct rcu_data *)) | |
1098 | { | |
1099 | unsigned long bit; | |
1100 | int cpu; | |
1101 | unsigned long flags; | |
1102 | unsigned long mask; | |
a0b6c9a7 | 1103 | struct rcu_node *rnp; |
64db4cff | 1104 | |
a0b6c9a7 | 1105 | rcu_for_each_leaf_node(rsp, rnp) { |
64db4cff | 1106 | mask = 0; |
a0b6c9a7 | 1107 | spin_lock_irqsave(&rnp->lock, flags); |
64db4cff | 1108 | if (rsp->completed != lastcomp) { |
a0b6c9a7 | 1109 | spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cff PM |
1110 | return 1; |
1111 | } | |
a0b6c9a7 PM |
1112 | if (rnp->qsmask == 0) { |
1113 | spin_unlock_irqrestore(&rnp->lock, flags); | |
64db4cff PM |
1114 | continue; |
1115 | } | |
a0b6c9a7 | 1116 | cpu = rnp->grplo; |
64db4cff | 1117 | bit = 1; |
a0b6c9a7 PM |
1118 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
1119 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | |
64db4cff PM |
1120 | mask |= bit; |
1121 | } | |
1122 | if (mask != 0 && rsp->completed == lastcomp) { | |
1123 | ||
a0b6c9a7 PM |
1124 | /* cpu_quiet_msk() releases rnp->lock. */ |
1125 | cpu_quiet_msk(mask, rsp, rnp, flags); | |
64db4cff PM |
1126 | continue; |
1127 | } | |
a0b6c9a7 | 1128 | spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cff PM |
1129 | } |
1130 | return 0; | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * Force quiescent states on reluctant CPUs, and also detect which | |
1135 | * CPUs are in dyntick-idle mode. | |
1136 | */ | |
1137 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |
1138 | { | |
1139 | unsigned long flags; | |
1140 | long lastcomp; | |
64db4cff PM |
1141 | struct rcu_node *rnp = rcu_get_root(rsp); |
1142 | u8 signaled; | |
1143 | ||
fc2219d4 | 1144 | if (!rcu_gp_in_progress(rsp)) |
64db4cff PM |
1145 | return; /* No grace period in progress, nothing to force. */ |
1146 | if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { | |
1147 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ | |
1148 | return; /* Someone else is already on the job. */ | |
1149 | } | |
1150 | if (relaxed && | |
ef631b0c | 1151 | (long)(rsp->jiffies_force_qs - jiffies) >= 0) |
64db4cff PM |
1152 | goto unlock_ret; /* no emergency and done recently. */ |
1153 | rsp->n_force_qs++; | |
1154 | spin_lock(&rnp->lock); | |
1155 | lastcomp = rsp->completed; | |
1156 | signaled = rsp->signaled; | |
1157 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | |
64db4cff PM |
1158 | if (lastcomp == rsp->gpnum) { |
1159 | rsp->n_force_qs_ngp++; | |
1160 | spin_unlock(&rnp->lock); | |
1161 | goto unlock_ret; /* no GP in progress, time updated. */ | |
1162 | } | |
1163 | spin_unlock(&rnp->lock); | |
1164 | switch (signaled) { | |
1165 | case RCU_GP_INIT: | |
1166 | ||
1167 | break; /* grace period still initializing, ignore. */ | |
1168 | ||
1169 | case RCU_SAVE_DYNTICK: | |
1170 | ||
1171 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) | |
1172 | break; /* So gcc recognizes the dead code. */ | |
1173 | ||
1174 | /* Record dyntick-idle state. */ | |
1175 | if (rcu_process_dyntick(rsp, lastcomp, | |
1176 | dyntick_save_progress_counter)) | |
1177 | goto unlock_ret; | |
1178 | ||
1179 | /* Update state, record completion counter. */ | |
1180 | spin_lock(&rnp->lock); | |
1181 | if (lastcomp == rsp->completed) { | |
1182 | rsp->signaled = RCU_FORCE_QS; | |
1183 | dyntick_record_completed(rsp, lastcomp); | |
1184 | } | |
1185 | spin_unlock(&rnp->lock); | |
1186 | break; | |
1187 | ||
1188 | case RCU_FORCE_QS: | |
1189 | ||
1190 | /* Check dyntick-idle state, send IPI to laggarts. */ | |
1191 | if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), | |
1192 | rcu_implicit_dynticks_qs)) | |
1193 | goto unlock_ret; | |
1194 | ||
1195 | /* Leave state in case more forcing is required. */ | |
1196 | ||
1197 | break; | |
1198 | } | |
1199 | unlock_ret: | |
1200 | spin_unlock_irqrestore(&rsp->fqslock, flags); | |
1201 | } | |
1202 | ||
1203 | #else /* #ifdef CONFIG_SMP */ | |
1204 | ||
1205 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |
1206 | { | |
1207 | set_need_resched(); | |
1208 | } | |
1209 | ||
1210 | #endif /* #else #ifdef CONFIG_SMP */ | |
1211 | ||
1212 | /* | |
1213 | * This does the RCU processing work from softirq context for the | |
1214 | * specified rcu_state and rcu_data structures. This may be called | |
1215 | * only from the CPU to whom the rdp belongs. | |
1216 | */ | |
1217 | static void | |
1218 | __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |
1219 | { | |
1220 | unsigned long flags; | |
1221 | ||
2e597558 PM |
1222 | WARN_ON_ONCE(rdp->beenonline == 0); |
1223 | ||
64db4cff PM |
1224 | /* |
1225 | * If an RCU GP has gone long enough, go check for dyntick | |
1226 | * idle CPUs and, if needed, send resched IPIs. | |
1227 | */ | |
ef631b0c | 1228 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
64db4cff PM |
1229 | force_quiescent_state(rsp, 1); |
1230 | ||
1231 | /* | |
1232 | * Advance callbacks in response to end of earlier grace | |
1233 | * period that some other CPU ended. | |
1234 | */ | |
1235 | rcu_process_gp_end(rsp, rdp); | |
1236 | ||
1237 | /* Update RCU state based on any recent quiescent states. */ | |
1238 | rcu_check_quiescent_state(rsp, rdp); | |
1239 | ||
1240 | /* Does this CPU require a not-yet-started grace period? */ | |
1241 | if (cpu_needs_another_gp(rsp, rdp)) { | |
1242 | spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); | |
1243 | rcu_start_gp(rsp, flags); /* releases above lock */ | |
1244 | } | |
1245 | ||
1246 | /* If there are callbacks ready, invoke them. */ | |
37c72e56 | 1247 | rcu_do_batch(rsp, rdp); |
64db4cff PM |
1248 | } |
1249 | ||
1250 | /* | |
1251 | * Do softirq processing for the current CPU. | |
1252 | */ | |
1253 | static void rcu_process_callbacks(struct softirq_action *unused) | |
1254 | { | |
1255 | /* | |
1256 | * Memory references from any prior RCU read-side critical sections | |
1257 | * executed by the interrupted code must be seen before any RCU | |
1258 | * grace-period manipulations below. | |
1259 | */ | |
1260 | smp_mb(); /* See above block comment. */ | |
1261 | ||
d6714c22 PM |
1262 | __rcu_process_callbacks(&rcu_sched_state, |
1263 | &__get_cpu_var(rcu_sched_data)); | |
64db4cff | 1264 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
f41d911f | 1265 | rcu_preempt_process_callbacks(); |
64db4cff PM |
1266 | |
1267 | /* | |
1268 | * Memory references from any later RCU read-side critical sections | |
1269 | * executed by the interrupted code must be seen after any RCU | |
1270 | * grace-period manipulations above. | |
1271 | */ | |
1272 | smp_mb(); /* See above block comment. */ | |
1273 | } | |
1274 | ||
1275 | static void | |
1276 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |
1277 | struct rcu_state *rsp) | |
1278 | { | |
1279 | unsigned long flags; | |
1280 | struct rcu_data *rdp; | |
1281 | ||
1282 | head->func = func; | |
1283 | head->next = NULL; | |
1284 | ||
1285 | smp_mb(); /* Ensure RCU update seen before callback registry. */ | |
1286 | ||
1287 | /* | |
1288 | * Opportunistically note grace-period endings and beginnings. | |
1289 | * Note that we might see a beginning right after we see an | |
1290 | * end, but never vice versa, since this CPU has to pass through | |
1291 | * a quiescent state betweentimes. | |
1292 | */ | |
1293 | local_irq_save(flags); | |
1294 | rdp = rsp->rda[smp_processor_id()]; | |
1295 | rcu_process_gp_end(rsp, rdp); | |
1296 | check_for_new_grace_period(rsp, rdp); | |
1297 | ||
1298 | /* Add the callback to our list. */ | |
1299 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | |
1300 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | |
1301 | ||
1302 | /* Start a new grace period if one not already started. */ | |
fc2219d4 | 1303 | if (!rcu_gp_in_progress(rsp)) { |
64db4cff PM |
1304 | unsigned long nestflag; |
1305 | struct rcu_node *rnp_root = rcu_get_root(rsp); | |
1306 | ||
1307 | spin_lock_irqsave(&rnp_root->lock, nestflag); | |
1308 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | |
1309 | } | |
1310 | ||
37c72e56 PM |
1311 | /* |
1312 | * Force the grace period if too many callbacks or too long waiting. | |
1313 | * Enforce hysteresis, and don't invoke force_quiescent_state() | |
1314 | * if some other CPU has recently done so. Also, don't bother | |
1315 | * invoking force_quiescent_state() if the newly enqueued callback | |
1316 | * is the only one waiting for a grace period to complete. | |
1317 | */ | |
1318 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | |
64db4cff | 1319 | rdp->blimit = LONG_MAX; |
37c72e56 PM |
1320 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1321 | *rdp->nxttail[RCU_DONE_TAIL] != head) | |
1322 | force_quiescent_state(rsp, 0); | |
1323 | rdp->n_force_qs_snap = rsp->n_force_qs; | |
1324 | rdp->qlen_last_fqs_check = rdp->qlen; | |
ef631b0c | 1325 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
64db4cff PM |
1326 | force_quiescent_state(rsp, 1); |
1327 | local_irq_restore(flags); | |
1328 | } | |
1329 | ||
1330 | /* | |
d6714c22 | 1331 | * Queue an RCU-sched callback for invocation after a grace period. |
64db4cff | 1332 | */ |
d6714c22 | 1333 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
64db4cff | 1334 | { |
d6714c22 | 1335 | __call_rcu(head, func, &rcu_sched_state); |
64db4cff | 1336 | } |
d6714c22 | 1337 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
64db4cff PM |
1338 | |
1339 | /* | |
1340 | * Queue an RCU for invocation after a quicker grace period. | |
1341 | */ | |
1342 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
1343 | { | |
1344 | __call_rcu(head, func, &rcu_bh_state); | |
1345 | } | |
1346 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
1347 | ||
1348 | /* | |
1349 | * Check to see if there is any immediate RCU-related work to be done | |
1350 | * by the current CPU, for the specified type of RCU, returning 1 if so. | |
1351 | * The checks are in order of increasing expense: checks that can be | |
1352 | * carried out against CPU-local state are performed first. However, | |
1353 | * we must check for CPU stalls first, else we might not get a chance. | |
1354 | */ | |
1355 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |
1356 | { | |
1357 | rdp->n_rcu_pending++; | |
1358 | ||
1359 | /* Check for CPU stalls, if enabled. */ | |
1360 | check_cpu_stall(rsp, rdp); | |
1361 | ||
1362 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | |
7ba5c840 PM |
1363 | if (rdp->qs_pending) { |
1364 | rdp->n_rp_qs_pending++; | |
64db4cff | 1365 | return 1; |
7ba5c840 | 1366 | } |
64db4cff PM |
1367 | |
1368 | /* Does this CPU have callbacks ready to invoke? */ | |
7ba5c840 PM |
1369 | if (cpu_has_callbacks_ready_to_invoke(rdp)) { |
1370 | rdp->n_rp_cb_ready++; | |
64db4cff | 1371 | return 1; |
7ba5c840 | 1372 | } |
64db4cff PM |
1373 | |
1374 | /* Has RCU gone idle with this CPU needing another grace period? */ | |
7ba5c840 PM |
1375 | if (cpu_needs_another_gp(rsp, rdp)) { |
1376 | rdp->n_rp_cpu_needs_gp++; | |
64db4cff | 1377 | return 1; |
7ba5c840 | 1378 | } |
64db4cff PM |
1379 | |
1380 | /* Has another RCU grace period completed? */ | |
7ba5c840 PM |
1381 | if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ |
1382 | rdp->n_rp_gp_completed++; | |
64db4cff | 1383 | return 1; |
7ba5c840 | 1384 | } |
64db4cff PM |
1385 | |
1386 | /* Has a new RCU grace period started? */ | |
7ba5c840 PM |
1387 | if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ |
1388 | rdp->n_rp_gp_started++; | |
64db4cff | 1389 | return 1; |
7ba5c840 | 1390 | } |
64db4cff PM |
1391 | |
1392 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | |
fc2219d4 | 1393 | if (rcu_gp_in_progress(rsp) && |
7ba5c840 PM |
1394 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { |
1395 | rdp->n_rp_need_fqs++; | |
64db4cff | 1396 | return 1; |
7ba5c840 | 1397 | } |
64db4cff PM |
1398 | |
1399 | /* nothing to do */ | |
7ba5c840 | 1400 | rdp->n_rp_need_nothing++; |
64db4cff PM |
1401 | return 0; |
1402 | } | |
1403 | ||
1404 | /* | |
1405 | * Check to see if there is any immediate RCU-related work to be done | |
1406 | * by the current CPU, returning 1 if so. This function is part of the | |
1407 | * RCU implementation; it is -not- an exported member of the RCU API. | |
1408 | */ | |
a157229c | 1409 | static int rcu_pending(int cpu) |
64db4cff | 1410 | { |
d6714c22 | 1411 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
f41d911f PM |
1412 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || |
1413 | rcu_preempt_pending(cpu); | |
64db4cff PM |
1414 | } |
1415 | ||
1416 | /* | |
1417 | * Check to see if any future RCU-related work will need to be done | |
1418 | * by the current CPU, even if none need be done immediately, returning | |
1419 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1420 | * an exported member of the RCU API. | |
1421 | */ | |
1422 | int rcu_needs_cpu(int cpu) | |
1423 | { | |
1424 | /* RCU callbacks either ready or pending? */ | |
d6714c22 | 1425 | return per_cpu(rcu_sched_data, cpu).nxtlist || |
f41d911f PM |
1426 | per_cpu(rcu_bh_data, cpu).nxtlist || |
1427 | rcu_preempt_needs_cpu(cpu); | |
64db4cff PM |
1428 | } |
1429 | ||
d0ec774c PM |
1430 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
1431 | static atomic_t rcu_barrier_cpu_count; | |
1432 | static DEFINE_MUTEX(rcu_barrier_mutex); | |
1433 | static struct completion rcu_barrier_completion; | |
d0ec774c PM |
1434 | |
1435 | static void rcu_barrier_callback(struct rcu_head *notused) | |
1436 | { | |
1437 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | |
1438 | complete(&rcu_barrier_completion); | |
1439 | } | |
1440 | ||
1441 | /* | |
1442 | * Called with preemption disabled, and from cross-cpu IRQ context. | |
1443 | */ | |
1444 | static void rcu_barrier_func(void *type) | |
1445 | { | |
1446 | int cpu = smp_processor_id(); | |
1447 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | |
1448 | void (*call_rcu_func)(struct rcu_head *head, | |
1449 | void (*func)(struct rcu_head *head)); | |
1450 | ||
1451 | atomic_inc(&rcu_barrier_cpu_count); | |
1452 | call_rcu_func = type; | |
1453 | call_rcu_func(head, rcu_barrier_callback); | |
1454 | } | |
1455 | ||
d0ec774c PM |
1456 | /* |
1457 | * Orchestrate the specified type of RCU barrier, waiting for all | |
1458 | * RCU callbacks of the specified type to complete. | |
1459 | */ | |
e74f4c45 PM |
1460 | static void _rcu_barrier(struct rcu_state *rsp, |
1461 | void (*call_rcu_func)(struct rcu_head *head, | |
d0ec774c PM |
1462 | void (*func)(struct rcu_head *head))) |
1463 | { | |
1464 | BUG_ON(in_interrupt()); | |
e74f4c45 | 1465 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ |
d0ec774c PM |
1466 | mutex_lock(&rcu_barrier_mutex); |
1467 | init_completion(&rcu_barrier_completion); | |
1468 | /* | |
1469 | * Initialize rcu_barrier_cpu_count to 1, then invoke | |
1470 | * rcu_barrier_func() on each CPU, so that each CPU also has | |
1471 | * incremented rcu_barrier_cpu_count. Only then is it safe to | |
1472 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | |
1473 | * might complete its grace period before all of the other CPUs | |
1474 | * did their increment, causing this function to return too | |
1475 | * early. | |
1476 | */ | |
1477 | atomic_set(&rcu_barrier_cpu_count, 1); | |
e74f4c45 PM |
1478 | preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */ |
1479 | rcu_adopt_orphan_cbs(rsp); | |
d0ec774c | 1480 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); |
e74f4c45 | 1481 | preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */ |
d0ec774c PM |
1482 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
1483 | complete(&rcu_barrier_completion); | |
1484 | wait_for_completion(&rcu_barrier_completion); | |
1485 | mutex_unlock(&rcu_barrier_mutex); | |
d0ec774c | 1486 | } |
d0ec774c PM |
1487 | |
1488 | /** | |
1489 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | |
1490 | */ | |
1491 | void rcu_barrier_bh(void) | |
1492 | { | |
e74f4c45 | 1493 | _rcu_barrier(&rcu_bh_state, call_rcu_bh); |
d0ec774c PM |
1494 | } |
1495 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | |
1496 | ||
1497 | /** | |
1498 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | |
1499 | */ | |
1500 | void rcu_barrier_sched(void) | |
1501 | { | |
e74f4c45 | 1502 | _rcu_barrier(&rcu_sched_state, call_rcu_sched); |
d0ec774c PM |
1503 | } |
1504 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | |
1505 | ||
64db4cff | 1506 | /* |
27569620 | 1507 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
64db4cff | 1508 | */ |
27569620 PM |
1509 | static void __init |
1510 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |
64db4cff PM |
1511 | { |
1512 | unsigned long flags; | |
1513 | int i; | |
27569620 PM |
1514 | struct rcu_data *rdp = rsp->rda[cpu]; |
1515 | struct rcu_node *rnp = rcu_get_root(rsp); | |
1516 | ||
1517 | /* Set up local state, ensuring consistent view of global state. */ | |
1518 | spin_lock_irqsave(&rnp->lock, flags); | |
1519 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | |
1520 | rdp->nxtlist = NULL; | |
1521 | for (i = 0; i < RCU_NEXT_SIZE; i++) | |
1522 | rdp->nxttail[i] = &rdp->nxtlist; | |
1523 | rdp->qlen = 0; | |
1524 | #ifdef CONFIG_NO_HZ | |
1525 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | |
1526 | #endif /* #ifdef CONFIG_NO_HZ */ | |
1527 | rdp->cpu = cpu; | |
1528 | spin_unlock_irqrestore(&rnp->lock, flags); | |
1529 | } | |
1530 | ||
1531 | /* | |
1532 | * Initialize a CPU's per-CPU RCU data. Note that only one online or | |
1533 | * offline event can be happening at a given time. Note also that we | |
1534 | * can accept some slop in the rsp->completed access due to the fact | |
1535 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | |
64db4cff | 1536 | */ |
e4fa4c97 | 1537 | static void __cpuinit |
f41d911f | 1538 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) |
64db4cff PM |
1539 | { |
1540 | unsigned long flags; | |
64db4cff PM |
1541 | long lastcomp; |
1542 | unsigned long mask; | |
1543 | struct rcu_data *rdp = rsp->rda[cpu]; | |
1544 | struct rcu_node *rnp = rcu_get_root(rsp); | |
1545 | ||
1546 | /* Set up local state, ensuring consistent view of global state. */ | |
1547 | spin_lock_irqsave(&rnp->lock, flags); | |
1548 | lastcomp = rsp->completed; | |
1549 | rdp->completed = lastcomp; | |
1550 | rdp->gpnum = lastcomp; | |
1551 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | |
1552 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | |
1553 | rdp->beenonline = 1; /* We have now been online. */ | |
f41d911f | 1554 | rdp->preemptable = preemptable; |
64db4cff | 1555 | rdp->passed_quiesc_completed = lastcomp - 1; |
37c72e56 PM |
1556 | rdp->qlen_last_fqs_check = 0; |
1557 | rdp->n_force_qs_snap = rsp->n_force_qs; | |
64db4cff | 1558 | rdp->blimit = blimit; |
64db4cff PM |
1559 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1560 | ||
1561 | /* | |
1562 | * A new grace period might start here. If so, we won't be part | |
1563 | * of it, but that is OK, as we are currently in a quiescent state. | |
1564 | */ | |
1565 | ||
1566 | /* Exclude any attempts to start a new GP on large systems. */ | |
1567 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | |
1568 | ||
1569 | /* Add CPU to rcu_node bitmasks. */ | |
1570 | rnp = rdp->mynode; | |
1571 | mask = rdp->grpmask; | |
1572 | do { | |
1573 | /* Exclude any attempts to start a new GP on small systems. */ | |
1574 | spin_lock(&rnp->lock); /* irqs already disabled. */ | |
1575 | rnp->qsmaskinit |= mask; | |
1576 | mask = rnp->grpmask; | |
1577 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | |
1578 | rnp = rnp->parent; | |
1579 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | |
1580 | ||
e7d8842e | 1581 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
64db4cff PM |
1582 | } |
1583 | ||
1584 | static void __cpuinit rcu_online_cpu(int cpu) | |
1585 | { | |
f41d911f PM |
1586 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
1587 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); | |
1588 | rcu_preempt_init_percpu_data(cpu); | |
64db4cff PM |
1589 | } |
1590 | ||
1591 | /* | |
f41d911f | 1592 | * Handle CPU online/offline notification events. |
64db4cff | 1593 | */ |
2e597558 PM |
1594 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1595 | unsigned long action, void *hcpu) | |
64db4cff PM |
1596 | { |
1597 | long cpu = (long)hcpu; | |
1598 | ||
1599 | switch (action) { | |
1600 | case CPU_UP_PREPARE: | |
1601 | case CPU_UP_PREPARE_FROZEN: | |
1602 | rcu_online_cpu(cpu); | |
1603 | break; | |
d0ec774c PM |
1604 | case CPU_DYING: |
1605 | case CPU_DYING_FROZEN: | |
1606 | /* | |
e74f4c45 | 1607 | * preempt_disable() in _rcu_barrier() prevents stop_machine(), |
d0ec774c | 1608 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" |
e74f4c45 PM |
1609 | * returns, all online cpus have queued rcu_barrier_func(). |
1610 | * The dying CPU clears its cpu_online_mask bit and | |
1611 | * moves all of its RCU callbacks to ->orphan_cbs_list | |
1612 | * in the context of stop_machine(), so subsequent calls | |
1613 | * to _rcu_barrier() will adopt these callbacks and only | |
1614 | * then queue rcu_barrier_func() on all remaining CPUs. | |
d0ec774c | 1615 | */ |
e74f4c45 PM |
1616 | rcu_send_cbs_to_orphanage(&rcu_bh_state); |
1617 | rcu_send_cbs_to_orphanage(&rcu_sched_state); | |
1618 | rcu_preempt_send_cbs_to_orphanage(); | |
d0ec774c | 1619 | break; |
64db4cff PM |
1620 | case CPU_DEAD: |
1621 | case CPU_DEAD_FROZEN: | |
1622 | case CPU_UP_CANCELED: | |
1623 | case CPU_UP_CANCELED_FROZEN: | |
1624 | rcu_offline_cpu(cpu); | |
1625 | break; | |
1626 | default: | |
1627 | break; | |
1628 | } | |
1629 | return NOTIFY_OK; | |
1630 | } | |
1631 | ||
1632 | /* | |
1633 | * Compute the per-level fanout, either using the exact fanout specified | |
1634 | * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. | |
1635 | */ | |
1636 | #ifdef CONFIG_RCU_FANOUT_EXACT | |
1637 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | |
1638 | { | |
1639 | int i; | |
1640 | ||
1641 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) | |
1642 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; | |
1643 | } | |
1644 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ | |
1645 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | |
1646 | { | |
1647 | int ccur; | |
1648 | int cprv; | |
1649 | int i; | |
1650 | ||
1651 | cprv = NR_CPUS; | |
1652 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { | |
1653 | ccur = rsp->levelcnt[i]; | |
1654 | rsp->levelspread[i] = (cprv + ccur - 1) / ccur; | |
1655 | cprv = ccur; | |
1656 | } | |
1657 | } | |
1658 | #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ | |
1659 | ||
1660 | /* | |
1661 | * Helper function for rcu_init() that initializes one rcu_state structure. | |
1662 | */ | |
1663 | static void __init rcu_init_one(struct rcu_state *rsp) | |
1664 | { | |
1665 | int cpustride = 1; | |
1666 | int i; | |
1667 | int j; | |
1668 | struct rcu_node *rnp; | |
1669 | ||
1670 | /* Initialize the level-tracking arrays. */ | |
1671 | ||
1672 | for (i = 1; i < NUM_RCU_LVLS; i++) | |
1673 | rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; | |
1674 | rcu_init_levelspread(rsp); | |
1675 | ||
1676 | /* Initialize the elements themselves, starting from the leaves. */ | |
1677 | ||
1678 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { | |
1679 | cpustride *= rsp->levelspread[i]; | |
1680 | rnp = rsp->level[i]; | |
1681 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | |
978c0b88 PM |
1682 | if (rnp != rcu_get_root(rsp)) |
1683 | spin_lock_init(&rnp->lock); | |
f41d911f | 1684 | rnp->gpnum = 0; |
64db4cff PM |
1685 | rnp->qsmask = 0; |
1686 | rnp->qsmaskinit = 0; | |
1687 | rnp->grplo = j * cpustride; | |
1688 | rnp->grphi = (j + 1) * cpustride - 1; | |
1689 | if (rnp->grphi >= NR_CPUS) | |
1690 | rnp->grphi = NR_CPUS - 1; | |
1691 | if (i == 0) { | |
1692 | rnp->grpnum = 0; | |
1693 | rnp->grpmask = 0; | |
1694 | rnp->parent = NULL; | |
1695 | } else { | |
1696 | rnp->grpnum = j % rsp->levelspread[i - 1]; | |
1697 | rnp->grpmask = 1UL << rnp->grpnum; | |
1698 | rnp->parent = rsp->level[i - 1] + | |
1699 | j / rsp->levelspread[i - 1]; | |
1700 | } | |
1701 | rnp->level = i; | |
f41d911f PM |
1702 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); |
1703 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | |
64db4cff PM |
1704 | } |
1705 | } | |
978c0b88 | 1706 | spin_lock_init(&rcu_get_root(rsp)->lock); |
64db4cff PM |
1707 | } |
1708 | ||
1709 | /* | |
f41d911f PM |
1710 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used |
1711 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data | |
1712 | * structure. | |
64db4cff | 1713 | */ |
65cf8f86 | 1714 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
64db4cff | 1715 | do { \ |
a0b6c9a7 PM |
1716 | int i; \ |
1717 | int j; \ | |
1718 | struct rcu_node *rnp; \ | |
1719 | \ | |
65cf8f86 | 1720 | rcu_init_one(rsp); \ |
64db4cff PM |
1721 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
1722 | j = 0; \ | |
1723 | for_each_possible_cpu(i) { \ | |
1724 | if (i > rnp[j].grphi) \ | |
1725 | j++; \ | |
1726 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | |
1727 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | |
65cf8f86 | 1728 | rcu_boot_init_percpu_data(i, rsp); \ |
64db4cff PM |
1729 | } \ |
1730 | } while (0) | |
1731 | ||
64db4cff PM |
1732 | void __init __rcu_init(void) |
1733 | { | |
f41d911f | 1734 | rcu_bootup_announce(); |
64db4cff PM |
1735 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1736 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | |
1737 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
65cf8f86 PM |
1738 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1739 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | |
f41d911f | 1740 | __rcu_init_preempt(); |
2e597558 | 1741 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
64db4cff PM |
1742 | } |
1743 | ||
1eba8f84 | 1744 | #include "rcutree_plugin.h" |