]>
Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
4 | * or preemptable semantics. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <[email protected]> | |
24 | * Paul E. McKenney <[email protected]> | |
25 | */ | |
26 | ||
27 | ||
28 | #ifdef CONFIG_TREE_PREEMPT_RCU | |
29 | ||
30 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |
31 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | |
32 | ||
33 | /* | |
34 | * Tell them what RCU they are running. | |
35 | */ | |
0e0fc1c2 | 36 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
37 | { |
38 | printk(KERN_INFO | |
39 | "Experimental preemptable hierarchical RCU implementation.\n"); | |
40 | } | |
41 | ||
42 | /* | |
43 | * Return the number of RCU-preempt batches processed thus far | |
44 | * for debug and statistics. | |
45 | */ | |
46 | long rcu_batches_completed_preempt(void) | |
47 | { | |
48 | return rcu_preempt_state.completed; | |
49 | } | |
50 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
51 | ||
52 | /* | |
53 | * Return the number of RCU batches processed thus far for debug & stats. | |
54 | */ | |
55 | long rcu_batches_completed(void) | |
56 | { | |
57 | return rcu_batches_completed_preempt(); | |
58 | } | |
59 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
60 | ||
61 | /* | |
62 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | |
63 | * that this just means that the task currently running on the CPU is | |
64 | * not in a quiescent state. There might be any number of tasks blocked | |
65 | * while in an RCU read-side critical section. | |
66 | */ | |
c3422bea | 67 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
68 | { |
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
c64ac3ce | 70 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea PM |
71 | barrier(); |
72 | rdp->passed_quiesc = 1; | |
f41d911f PM |
73 | } |
74 | ||
75 | /* | |
c3422bea PM |
76 | * We have entered the scheduler, and the current task might soon be |
77 | * context-switched away from. If this task is in an RCU read-side | |
78 | * critical section, we will no longer be able to rely on the CPU to | |
79 | * record that fact, so we enqueue the task on the appropriate entry | |
80 | * of the blocked_tasks[] array. The task will dequeue itself when | |
81 | * it exits the outermost enclosing RCU read-side critical section. | |
82 | * Therefore, the current grace period cannot be permitted to complete | |
83 | * until the blocked_tasks[] entry indexed by the low-order bit of | |
84 | * rnp->gpnum empties. | |
85 | * | |
86 | * Caller must disable preemption. | |
f41d911f | 87 | */ |
c3422bea | 88 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
89 | { |
90 | struct task_struct *t = current; | |
c3422bea | 91 | unsigned long flags; |
f41d911f PM |
92 | int phase; |
93 | struct rcu_data *rdp; | |
94 | struct rcu_node *rnp; | |
95 | ||
96 | if (t->rcu_read_lock_nesting && | |
97 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
98 | ||
99 | /* Possibly blocking in an RCU read-side critical section. */ | |
100 | rdp = rcu_preempt_state.rda[cpu]; | |
101 | rnp = rdp->mynode; | |
c3422bea | 102 | spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 103 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 104 | t->rcu_blocked_node = rnp; |
f41d911f PM |
105 | |
106 | /* | |
107 | * If this CPU has already checked in, then this task | |
108 | * will hold up the next grace period rather than the | |
109 | * current grace period. Queue the task accordingly. | |
110 | * If the task is queued for the current grace period | |
111 | * (i.e., this CPU has not yet passed through a quiescent | |
112 | * state for the current grace period), then as long | |
113 | * as that task remains queued, the current grace period | |
114 | * cannot end. | |
b0e165c0 PM |
115 | * |
116 | * But first, note that the current CPU must still be | |
117 | * on line! | |
f41d911f | 118 | */ |
b0e165c0 | 119 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e PM |
120 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
121 | phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; | |
f41d911f | 122 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); |
c3422bea | 123 | spin_unlock_irqrestore(&rnp->lock, flags); |
f41d911f PM |
124 | } |
125 | ||
126 | /* | |
127 | * Either we were not in an RCU read-side critical section to | |
128 | * begin with, or we have now recorded that critical section | |
129 | * globally. Either way, we can now note a quiescent state | |
130 | * for this CPU. Again, if we were in an RCU read-side critical | |
131 | * section, and if that critical section was blocking the current | |
132 | * grace period, then the fact that the task has been enqueued | |
133 | * means that we continue to block the current grace period. | |
134 | */ | |
c3422bea | 135 | rcu_preempt_qs(cpu); |
e7d8842e | 136 | local_irq_save(flags); |
c3422bea | 137 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
e7d8842e | 138 | local_irq_restore(flags); |
f41d911f PM |
139 | } |
140 | ||
141 | /* | |
142 | * Tree-preemptable RCU implementation for rcu_read_lock(). | |
143 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
144 | * if we block. | |
145 | */ | |
146 | void __rcu_read_lock(void) | |
147 | { | |
148 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | |
149 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | |
150 | } | |
151 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
152 | ||
fc2219d4 PM |
153 | /* |
154 | * Check for preempted RCU readers blocking the current grace period | |
155 | * for the specified rcu_node structure. If the caller needs a reliable | |
156 | * answer, it must hold the rcu_node's ->lock. | |
157 | */ | |
158 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
159 | { | |
160 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | |
161 | } | |
162 | ||
b668c9cf PM |
163 | /* |
164 | * Record a quiescent state for all tasks that were previously queued | |
165 | * on the specified rcu_node structure and that were blocking the current | |
166 | * RCU grace period. The caller must hold the specified rnp->lock with | |
167 | * irqs disabled, and this lock is released upon return, but irqs remain | |
168 | * disabled. | |
169 | */ | |
d3f6bad3 | 170 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
171 | __releases(rnp->lock) |
172 | { | |
173 | unsigned long mask; | |
174 | struct rcu_node *rnp_p; | |
175 | ||
176 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | |
177 | spin_unlock_irqrestore(&rnp->lock, flags); | |
178 | return; /* Still need more quiescent states! */ | |
179 | } | |
180 | ||
181 | rnp_p = rnp->parent; | |
182 | if (rnp_p == NULL) { | |
183 | /* | |
184 | * Either there is only one rcu_node in the tree, | |
185 | * or tasks were kicked up to root rcu_node due to | |
186 | * CPUs going offline. | |
187 | */ | |
d3f6bad3 | 188 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
189 | return; |
190 | } | |
191 | ||
192 | /* Report up the rest of the hierarchy. */ | |
193 | mask = rnp->grpmask; | |
194 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | |
195 | spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 196 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
197 | } |
198 | ||
199 | /* | |
200 | * Handle special cases during rcu_read_unlock(), such as needing to | |
201 | * notify RCU core processing or task having blocked during the RCU | |
202 | * read-side critical section. | |
203 | */ | |
f41d911f PM |
204 | static void rcu_read_unlock_special(struct task_struct *t) |
205 | { | |
206 | int empty; | |
207 | unsigned long flags; | |
f41d911f PM |
208 | struct rcu_node *rnp; |
209 | int special; | |
210 | ||
211 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
212 | if (in_nmi()) | |
213 | return; | |
214 | ||
215 | local_irq_save(flags); | |
216 | ||
217 | /* | |
218 | * If RCU core is waiting for this CPU to exit critical section, | |
219 | * let it know that we have done so. | |
220 | */ | |
221 | special = t->rcu_read_unlock_special; | |
222 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
223 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | |
c3422bea | 224 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
225 | } |
226 | ||
227 | /* Hardware IRQ handlers cannot block. */ | |
228 | if (in_irq()) { | |
229 | local_irq_restore(flags); | |
230 | return; | |
231 | } | |
232 | ||
233 | /* Clean up if blocked during RCU read-side critical section. */ | |
234 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
235 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
236 | ||
dd5d19ba PM |
237 | /* |
238 | * Remove this task from the list it blocked on. The | |
239 | * task can migrate while we acquire the lock, but at | |
240 | * most one time. So at most two passes through loop. | |
241 | */ | |
242 | for (;;) { | |
86848966 | 243 | rnp = t->rcu_blocked_node; |
e7d8842e | 244 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 245 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 246 | break; |
e7d8842e | 247 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 248 | } |
fc2219d4 | 249 | empty = !rcu_preempted_readers(rnp); |
f41d911f | 250 | list_del_init(&t->rcu_node_entry); |
dd5d19ba | 251 | t->rcu_blocked_node = NULL; |
f41d911f PM |
252 | |
253 | /* | |
254 | * If this was the last task on the current list, and if | |
255 | * we aren't waiting on any CPUs, report the quiescent state. | |
d3f6bad3 | 256 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
f41d911f | 257 | */ |
b668c9cf | 258 | if (empty) |
f41d911f | 259 | spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf | 260 | else |
d3f6bad3 | 261 | rcu_report_unblock_qs_rnp(rnp, flags); |
b668c9cf PM |
262 | } else { |
263 | local_irq_restore(flags); | |
f41d911f | 264 | } |
f41d911f PM |
265 | } |
266 | ||
267 | /* | |
268 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | |
269 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
270 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
271 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
272 | * in an RCU read-side critical section and other special cases. | |
273 | */ | |
274 | void __rcu_read_unlock(void) | |
275 | { | |
276 | struct task_struct *t = current; | |
277 | ||
278 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | |
279 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | |
280 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
281 | rcu_read_unlock_special(t); | |
282 | } | |
283 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
284 | ||
285 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | |
286 | ||
287 | /* | |
288 | * Scan the current list of tasks blocked within RCU read-side critical | |
289 | * sections, printing out the tid of each. | |
290 | */ | |
291 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
292 | { | |
293 | unsigned long flags; | |
294 | struct list_head *lp; | |
fc2219d4 | 295 | int phase; |
f41d911f PM |
296 | struct task_struct *t; |
297 | ||
fc2219d4 | 298 | if (rcu_preempted_readers(rnp)) { |
f41d911f | 299 | spin_lock_irqsave(&rnp->lock, flags); |
fc2219d4 | 300 | phase = rnp->gpnum & 0x1; |
f41d911f PM |
301 | lp = &rnp->blocked_tasks[phase]; |
302 | list_for_each_entry(t, lp, rcu_node_entry) | |
303 | printk(" P%d", t->pid); | |
304 | spin_unlock_irqrestore(&rnp->lock, flags); | |
305 | } | |
306 | } | |
307 | ||
308 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
309 | ||
b0e165c0 PM |
310 | /* |
311 | * Check that the list of blocked tasks for the newly completed grace | |
312 | * period is in fact empty. It is a serious bug to complete a grace | |
313 | * period that still has RCU readers blocked! This function must be | |
314 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
315 | * must be held by the caller. | |
316 | */ | |
317 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
318 | { | |
fc2219d4 | 319 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); |
28ecd580 | 320 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
321 | } |
322 | ||
33f76148 PM |
323 | #ifdef CONFIG_HOTPLUG_CPU |
324 | ||
dd5d19ba PM |
325 | /* |
326 | * Handle tasklist migration for case in which all CPUs covered by the | |
327 | * specified rcu_node have gone offline. Move them up to the root | |
328 | * rcu_node. The reason for not just moving them to the immediate | |
329 | * parent is to remove the need for rcu_read_unlock_special() to | |
330 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
331 | * Returns true if there were tasks blocking the current RCU grace |
332 | * period. | |
dd5d19ba | 333 | * |
237c80c5 PM |
334 | * Returns 1 if there was previously a task blocking the current grace |
335 | * period on the specified rcu_node structure. | |
336 | * | |
dd5d19ba PM |
337 | * The caller must hold rnp->lock with irqs disabled. |
338 | */ | |
237c80c5 PM |
339 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
340 | struct rcu_node *rnp, | |
341 | struct rcu_data *rdp) | |
dd5d19ba PM |
342 | { |
343 | int i; | |
344 | struct list_head *lp; | |
345 | struct list_head *lp_root; | |
b668c9cf | 346 | int retval; |
dd5d19ba PM |
347 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
348 | struct task_struct *tp; | |
349 | ||
86848966 PM |
350 | if (rnp == rnp_root) { |
351 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 352 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 353 | } |
28ecd580 PM |
354 | WARN_ON_ONCE(rnp != rdp->mynode && |
355 | (!list_empty(&rnp->blocked_tasks[0]) || | |
356 | !list_empty(&rnp->blocked_tasks[1]))); | |
dd5d19ba PM |
357 | |
358 | /* | |
359 | * Move tasks up to root rcu_node. Rely on the fact that the | |
360 | * root rcu_node can be at most one ahead of the rest of the | |
361 | * rcu_nodes in terms of gp_num value. This fact allows us to | |
362 | * move the blocked_tasks[] array directly, element by element. | |
363 | */ | |
b668c9cf | 364 | retval = rcu_preempted_readers(rnp); |
dd5d19ba PM |
365 | for (i = 0; i < 2; i++) { |
366 | lp = &rnp->blocked_tasks[i]; | |
367 | lp_root = &rnp_root->blocked_tasks[i]; | |
368 | while (!list_empty(lp)) { | |
369 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | |
370 | spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
371 | list_del(&tp->rcu_node_entry); | |
372 | tp->rcu_blocked_node = rnp_root; | |
373 | list_add(&tp->rcu_node_entry, lp_root); | |
374 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | |
375 | } | |
376 | } | |
237c80c5 | 377 | return retval; |
dd5d19ba PM |
378 | } |
379 | ||
33f76148 PM |
380 | /* |
381 | * Do CPU-offline processing for preemptable RCU. | |
382 | */ | |
383 | static void rcu_preempt_offline_cpu(int cpu) | |
384 | { | |
385 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
386 | } | |
387 | ||
388 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
389 | ||
f41d911f PM |
390 | /* |
391 | * Check for a quiescent state from the current CPU. When a task blocks, | |
392 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
393 | * which is checked elsewhere. | |
394 | * | |
395 | * Caller must disable hard irqs. | |
396 | */ | |
397 | static void rcu_preempt_check_callbacks(int cpu) | |
398 | { | |
399 | struct task_struct *t = current; | |
400 | ||
401 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea PM |
402 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
403 | rcu_preempt_qs(cpu); | |
f41d911f PM |
404 | return; |
405 | } | |
a71fca58 | 406 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) |
c3422bea | 407 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
408 | } |
409 | ||
410 | /* | |
411 | * Process callbacks for preemptable RCU. | |
412 | */ | |
413 | static void rcu_preempt_process_callbacks(void) | |
414 | { | |
415 | __rcu_process_callbacks(&rcu_preempt_state, | |
416 | &__get_cpu_var(rcu_preempt_data)); | |
417 | } | |
418 | ||
419 | /* | |
420 | * Queue a preemptable-RCU callback for invocation after a grace period. | |
421 | */ | |
422 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
423 | { | |
424 | __call_rcu(head, func, &rcu_preempt_state); | |
425 | } | |
426 | EXPORT_SYMBOL_GPL(call_rcu); | |
427 | ||
6ebb237b PM |
428 | /** |
429 | * synchronize_rcu - wait until a grace period has elapsed. | |
430 | * | |
431 | * Control will return to the caller some time after a full grace | |
432 | * period has elapsed, in other words after all currently executing RCU | |
433 | * read-side critical sections have completed. RCU read-side critical | |
434 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
435 | * and may be nested. | |
436 | */ | |
437 | void synchronize_rcu(void) | |
438 | { | |
439 | struct rcu_synchronize rcu; | |
440 | ||
441 | if (!rcu_scheduler_active) | |
442 | return; | |
443 | ||
444 | init_completion(&rcu.completion); | |
445 | /* Will wake me after RCU finished. */ | |
446 | call_rcu(&rcu.head, wakeme_after_rcu); | |
447 | /* Wait for it. */ | |
448 | wait_for_completion(&rcu.completion); | |
449 | } | |
450 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
451 | ||
019129d5 PM |
452 | /* |
453 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | |
454 | * grace period, but this is the crude slow compatability hack, so just | |
455 | * invoke synchronize_rcu(). | |
456 | */ | |
457 | void synchronize_rcu_expedited(void) | |
458 | { | |
459 | synchronize_rcu(); | |
460 | } | |
461 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
462 | ||
f41d911f PM |
463 | /* |
464 | * Check to see if there is any immediate preemptable-RCU-related work | |
465 | * to be done. | |
466 | */ | |
467 | static int rcu_preempt_pending(int cpu) | |
468 | { | |
469 | return __rcu_pending(&rcu_preempt_state, | |
470 | &per_cpu(rcu_preempt_data, cpu)); | |
471 | } | |
472 | ||
473 | /* | |
474 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | |
475 | */ | |
476 | static int rcu_preempt_needs_cpu(int cpu) | |
477 | { | |
478 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
479 | } | |
480 | ||
e74f4c45 PM |
481 | /** |
482 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
483 | */ | |
484 | void rcu_barrier(void) | |
485 | { | |
486 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
487 | } | |
488 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
489 | ||
f41d911f PM |
490 | /* |
491 | * Initialize preemptable RCU's per-CPU data. | |
492 | */ | |
493 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
494 | { | |
495 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
496 | } | |
497 | ||
e74f4c45 PM |
498 | /* |
499 | * Move preemptable RCU's callbacks to ->orphan_cbs_list. | |
500 | */ | |
501 | static void rcu_preempt_send_cbs_to_orphanage(void) | |
502 | { | |
503 | rcu_send_cbs_to_orphanage(&rcu_preempt_state); | |
504 | } | |
505 | ||
1eba8f84 PM |
506 | /* |
507 | * Initialize preemptable RCU's state structures. | |
508 | */ | |
509 | static void __init __rcu_init_preempt(void) | |
510 | { | |
1eba8f84 PM |
511 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); |
512 | } | |
513 | ||
f41d911f PM |
514 | /* |
515 | * Check for a task exiting while in a preemptable-RCU read-side | |
516 | * critical section, clean up if so. No need to issue warnings, | |
517 | * as debug_check_no_locks_held() already does this if lockdep | |
518 | * is enabled. | |
519 | */ | |
520 | void exit_rcu(void) | |
521 | { | |
522 | struct task_struct *t = current; | |
523 | ||
524 | if (t->rcu_read_lock_nesting == 0) | |
525 | return; | |
526 | t->rcu_read_lock_nesting = 1; | |
527 | rcu_read_unlock(); | |
528 | } | |
529 | ||
530 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
531 | ||
532 | /* | |
533 | * Tell them what RCU they are running. | |
534 | */ | |
0e0fc1c2 | 535 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
536 | { |
537 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
538 | } | |
539 | ||
540 | /* | |
541 | * Return the number of RCU batches processed thus far for debug & stats. | |
542 | */ | |
543 | long rcu_batches_completed(void) | |
544 | { | |
545 | return rcu_batches_completed_sched(); | |
546 | } | |
547 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
548 | ||
549 | /* | |
550 | * Because preemptable RCU does not exist, we never have to check for | |
551 | * CPUs being in quiescent states. | |
552 | */ | |
c3422bea | 553 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
554 | { |
555 | } | |
556 | ||
fc2219d4 PM |
557 | /* |
558 | * Because preemptable RCU does not exist, there are never any preempted | |
559 | * RCU readers. | |
560 | */ | |
561 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
562 | { | |
563 | return 0; | |
564 | } | |
565 | ||
b668c9cf PM |
566 | #ifdef CONFIG_HOTPLUG_CPU |
567 | ||
568 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 569 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
570 | { |
571 | spin_unlock_irqrestore(&rnp->lock, flags); | |
572 | } | |
573 | ||
574 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
575 | ||
f41d911f PM |
576 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
577 | ||
578 | /* | |
579 | * Because preemptable RCU does not exist, we never have to check for | |
580 | * tasks blocked within RCU read-side critical sections. | |
581 | */ | |
582 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
583 | { | |
584 | } | |
585 | ||
586 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
587 | ||
b0e165c0 PM |
588 | /* |
589 | * Because there is no preemptable RCU, there can be no readers blocked, | |
49e29126 PM |
590 | * so there is no need to check for blocked tasks. So check only for |
591 | * bogus qsmask values. | |
b0e165c0 PM |
592 | */ |
593 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
594 | { | |
49e29126 | 595 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
596 | } |
597 | ||
33f76148 PM |
598 | #ifdef CONFIG_HOTPLUG_CPU |
599 | ||
dd5d19ba PM |
600 | /* |
601 | * Because preemptable RCU does not exist, it never needs to migrate | |
237c80c5 PM |
602 | * tasks that were blocked within RCU read-side critical sections, and |
603 | * such non-existent tasks cannot possibly have been blocking the current | |
604 | * grace period. | |
dd5d19ba | 605 | */ |
237c80c5 PM |
606 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
607 | struct rcu_node *rnp, | |
608 | struct rcu_data *rdp) | |
dd5d19ba | 609 | { |
237c80c5 | 610 | return 0; |
dd5d19ba PM |
611 | } |
612 | ||
33f76148 PM |
613 | /* |
614 | * Because preemptable RCU does not exist, it never needs CPU-offline | |
615 | * processing. | |
616 | */ | |
617 | static void rcu_preempt_offline_cpu(int cpu) | |
618 | { | |
619 | } | |
620 | ||
621 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
622 | ||
f41d911f PM |
623 | /* |
624 | * Because preemptable RCU does not exist, it never has any callbacks | |
625 | * to check. | |
626 | */ | |
1eba8f84 | 627 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
628 | { |
629 | } | |
630 | ||
631 | /* | |
632 | * Because preemptable RCU does not exist, it never has any callbacks | |
633 | * to process. | |
634 | */ | |
1eba8f84 | 635 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
636 | { |
637 | } | |
638 | ||
639 | /* | |
640 | * In classic RCU, call_rcu() is just call_rcu_sched(). | |
641 | */ | |
642 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
643 | { | |
644 | call_rcu_sched(head, func); | |
645 | } | |
646 | EXPORT_SYMBOL_GPL(call_rcu); | |
647 | ||
019129d5 PM |
648 | /* |
649 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
650 | * But because preemptable RCU does not exist, map to rcu-sched. | |
651 | */ | |
652 | void synchronize_rcu_expedited(void) | |
653 | { | |
654 | synchronize_sched_expedited(); | |
655 | } | |
656 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
657 | ||
f41d911f PM |
658 | /* |
659 | * Because preemptable RCU does not exist, it never has any work to do. | |
660 | */ | |
661 | static int rcu_preempt_pending(int cpu) | |
662 | { | |
663 | return 0; | |
664 | } | |
665 | ||
666 | /* | |
667 | * Because preemptable RCU does not exist, it never needs any CPU. | |
668 | */ | |
669 | static int rcu_preempt_needs_cpu(int cpu) | |
670 | { | |
671 | return 0; | |
672 | } | |
673 | ||
e74f4c45 PM |
674 | /* |
675 | * Because preemptable RCU does not exist, rcu_barrier() is just | |
676 | * another name for rcu_barrier_sched(). | |
677 | */ | |
678 | void rcu_barrier(void) | |
679 | { | |
680 | rcu_barrier_sched(); | |
681 | } | |
682 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
683 | ||
f41d911f PM |
684 | /* |
685 | * Because preemptable RCU does not exist, there is no per-CPU | |
686 | * data to initialize. | |
687 | */ | |
688 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
689 | { | |
690 | } | |
691 | ||
e74f4c45 PM |
692 | /* |
693 | * Because there is no preemptable RCU, there are no callbacks to move. | |
694 | */ | |
695 | static void rcu_preempt_send_cbs_to_orphanage(void) | |
696 | { | |
697 | } | |
698 | ||
1eba8f84 PM |
699 | /* |
700 | * Because preemptable RCU does not exist, it need not be initialized. | |
701 | */ | |
702 | static void __init __rcu_init_preempt(void) | |
703 | { | |
704 | } | |
705 | ||
f41d911f | 706 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |