]>
Commit | Line | Data |
---|---|---|
3549c2bc PM |
1 | /* |
2 | * RCU expedited grace periods | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, you can access it online at | |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
17 | * | |
18 | * Copyright IBM Corporation, 2016 | |
19 | * | |
20 | * Authors: Paul E. McKenney <[email protected]> | |
21 | */ | |
22 | ||
09e2db37 PM |
23 | /* |
24 | * Record the start of an expedited grace period. | |
25 | */ | |
3549c2bc PM |
26 | static void rcu_exp_gp_seq_start(struct rcu_state *rsp) |
27 | { | |
28 | rcu_seq_start(&rsp->expedited_sequence); | |
29 | } | |
09e2db37 PM |
30 | |
31 | /* | |
32 | * Record the end of an expedited grace period. | |
33 | */ | |
3549c2bc PM |
34 | static void rcu_exp_gp_seq_end(struct rcu_state *rsp) |
35 | { | |
36 | rcu_seq_end(&rsp->expedited_sequence); | |
37 | smp_mb(); /* Ensure that consecutive grace periods serialize. */ | |
38 | } | |
09e2db37 PM |
39 | |
40 | /* | |
41 | * Take a snapshot of the expedited-grace-period counter. | |
42 | */ | |
3549c2bc PM |
43 | static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) |
44 | { | |
45 | unsigned long s; | |
46 | ||
47 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
48 | s = rcu_seq_snap(&rsp->expedited_sequence); | |
49 | trace_rcu_exp_grace_period(rsp->name, s, TPS("snap")); | |
50 | return s; | |
51 | } | |
09e2db37 PM |
52 | |
53 | /* | |
54 | * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true | |
55 | * if a full expedited grace period has elapsed since that snapshot | |
56 | * was taken. | |
57 | */ | |
3549c2bc PM |
58 | static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) |
59 | { | |
60 | return rcu_seq_done(&rsp->expedited_sequence, s); | |
61 | } | |
62 | ||
63 | /* | |
64 | * Reset the ->expmaskinit values in the rcu_node tree to reflect any | |
65 | * recent CPU-online activity. Note that these masks are not cleared | |
66 | * when CPUs go offline, so they reflect the union of all CPUs that have | |
67 | * ever been online. This means that this function normally takes its | |
68 | * no-work-to-do fastpath. | |
69 | */ | |
70 | static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) | |
71 | { | |
72 | bool done; | |
73 | unsigned long flags; | |
74 | unsigned long mask; | |
75 | unsigned long oldmask; | |
313517fc | 76 | int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */ |
3549c2bc PM |
77 | struct rcu_node *rnp; |
78 | struct rcu_node *rnp_up; | |
79 | ||
80 | /* If no new CPUs onlined since last time, nothing to do. */ | |
81 | if (likely(ncpus == rsp->ncpus_snap)) | |
82 | return; | |
83 | rsp->ncpus_snap = ncpus; | |
84 | ||
85 | /* | |
86 | * Each pass through the following loop propagates newly onlined | |
87 | * CPUs for the current rcu_node structure up the rcu_node tree. | |
88 | */ | |
89 | rcu_for_each_leaf_node(rsp, rnp) { | |
90 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
91 | if (rnp->expmaskinit == rnp->expmaskinitnext) { | |
92 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
93 | continue; /* No new CPUs, nothing to do. */ | |
94 | } | |
95 | ||
96 | /* Update this node's mask, track old value for propagation. */ | |
97 | oldmask = rnp->expmaskinit; | |
98 | rnp->expmaskinit = rnp->expmaskinitnext; | |
99 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
100 | ||
101 | /* If was already nonzero, nothing to propagate. */ | |
102 | if (oldmask) | |
103 | continue; | |
104 | ||
105 | /* Propagate the new CPU up the tree. */ | |
106 | mask = rnp->grpmask; | |
107 | rnp_up = rnp->parent; | |
108 | done = false; | |
109 | while (rnp_up) { | |
110 | raw_spin_lock_irqsave_rcu_node(rnp_up, flags); | |
111 | if (rnp_up->expmaskinit) | |
112 | done = true; | |
113 | rnp_up->expmaskinit |= mask; | |
114 | raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); | |
115 | if (done) | |
116 | break; | |
117 | mask = rnp_up->grpmask; | |
118 | rnp_up = rnp_up->parent; | |
119 | } | |
120 | } | |
121 | } | |
122 | ||
123 | /* | |
124 | * Reset the ->expmask values in the rcu_node tree in preparation for | |
125 | * a new expedited grace period. | |
126 | */ | |
127 | static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) | |
128 | { | |
129 | unsigned long flags; | |
130 | struct rcu_node *rnp; | |
131 | ||
132 | sync_exp_reset_tree_hotplug(rsp); | |
133 | rcu_for_each_node_breadth_first(rsp, rnp) { | |
134 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
135 | WARN_ON_ONCE(rnp->expmask); | |
136 | rnp->expmask = rnp->expmaskinit; | |
137 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
138 | } | |
139 | } | |
140 | ||
141 | /* | |
142 | * Return non-zero if there is no RCU expedited grace period in progress | |
143 | * for the specified rcu_node structure, in other words, if all CPUs and | |
144 | * tasks covered by the specified rcu_node structure have done their bit | |
145 | * for the current expedited grace period. Works only for preemptible | |
146 | * RCU -- other RCU implementation use other means. | |
147 | * | |
148 | * Caller must hold the rcu_state's exp_mutex. | |
149 | */ | |
dcfc315b | 150 | static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp) |
3549c2bc PM |
151 | { |
152 | return rnp->exp_tasks == NULL && | |
153 | READ_ONCE(rnp->expmask) == 0; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Report the exit from RCU read-side critical section for the last task | |
158 | * that queued itself during or before the current expedited preemptible-RCU | |
159 | * grace period. This event is reported either to the rcu_node structure on | |
160 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
161 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
162 | * iteratively!) | |
163 | * | |
164 | * Caller must hold the rcu_state's exp_mutex and the specified rcu_node | |
165 | * structure's ->lock. | |
166 | */ | |
167 | static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |
168 | bool wake, unsigned long flags) | |
169 | __releases(rnp->lock) | |
170 | { | |
171 | unsigned long mask; | |
172 | ||
173 | for (;;) { | |
174 | if (!sync_rcu_preempt_exp_done(rnp)) { | |
175 | if (!rnp->expmask) | |
176 | rcu_initiate_boost(rnp, flags); | |
177 | else | |
178 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
179 | break; | |
180 | } | |
181 | if (rnp->parent == NULL) { | |
182 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
183 | if (wake) { | |
184 | smp_mb(); /* EGP done before wake_up(). */ | |
185 | swake_up(&rsp->expedited_wq); | |
186 | } | |
187 | break; | |
188 | } | |
189 | mask = rnp->grpmask; | |
190 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ | |
191 | rnp = rnp->parent; | |
192 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ | |
193 | WARN_ON_ONCE(!(rnp->expmask & mask)); | |
194 | rnp->expmask &= ~mask; | |
195 | } | |
196 | } | |
197 | ||
198 | /* | |
199 | * Report expedited quiescent state for specified node. This is a | |
200 | * lock-acquisition wrapper function for __rcu_report_exp_rnp(). | |
201 | * | |
202 | * Caller must hold the rcu_state's exp_mutex. | |
203 | */ | |
204 | static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, | |
205 | struct rcu_node *rnp, bool wake) | |
206 | { | |
207 | unsigned long flags; | |
208 | ||
209 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
210 | __rcu_report_exp_rnp(rsp, rnp, wake, flags); | |
211 | } | |
212 | ||
213 | /* | |
214 | * Report expedited quiescent state for multiple CPUs, all covered by the | |
215 | * specified leaf rcu_node structure. Caller must hold the rcu_state's | |
216 | * exp_mutex. | |
217 | */ | |
218 | static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, | |
219 | unsigned long mask, bool wake) | |
220 | { | |
221 | unsigned long flags; | |
222 | ||
223 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
224 | if (!(rnp->expmask & mask)) { | |
225 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
226 | return; | |
227 | } | |
228 | rnp->expmask &= ~mask; | |
229 | __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ | |
230 | } | |
231 | ||
232 | /* | |
233 | * Report expedited quiescent state for specified rcu_data (CPU). | |
234 | */ | |
235 | static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, | |
236 | bool wake) | |
237 | { | |
238 | rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); | |
239 | } | |
240 | ||
241 | /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ | |
242 | static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat, | |
243 | unsigned long s) | |
244 | { | |
245 | if (rcu_exp_gp_seq_done(rsp, s)) { | |
246 | trace_rcu_exp_grace_period(rsp->name, s, TPS("done")); | |
247 | /* Ensure test happens before caller kfree(). */ | |
248 | smp_mb__before_atomic(); /* ^^^ */ | |
249 | atomic_long_inc(stat); | |
250 | return true; | |
251 | } | |
252 | return false; | |
253 | } | |
254 | ||
255 | /* | |
256 | * Funnel-lock acquisition for expedited grace periods. Returns true | |
257 | * if some other task completed an expedited grace period that this task | |
258 | * can piggy-back on, and with no mutex held. Otherwise, returns false | |
259 | * with the mutex held, indicating that the caller must actually do the | |
260 | * expedited grace period. | |
261 | */ | |
262 | static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) | |
263 | { | |
264 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); | |
265 | struct rcu_node *rnp = rdp->mynode; | |
266 | struct rcu_node *rnp_root = rcu_get_root(rsp); | |
267 | ||
268 | /* Low-contention fastpath. */ | |
269 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && | |
270 | (rnp == rnp_root || | |
271 | ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && | |
3549c2bc PM |
272 | mutex_trylock(&rsp->exp_mutex)) |
273 | goto fastpath; | |
274 | ||
275 | /* | |
276 | * Each pass through the following loop works its way up | |
277 | * the rcu_node tree, returning if others have done the work or | |
278 | * otherwise falls through to acquire rsp->exp_mutex. The mapping | |
279 | * from CPU to rcu_node structure can be inexact, as it is just | |
280 | * promoting locality and is not strictly needed for correctness. | |
281 | */ | |
282 | for (; rnp != NULL; rnp = rnp->parent) { | |
283 | if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s)) | |
284 | return true; | |
285 | ||
286 | /* Work not done, either wait here or go up. */ | |
287 | spin_lock(&rnp->exp_lock); | |
288 | if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { | |
289 | ||
290 | /* Someone else doing GP, so wait for them. */ | |
291 | spin_unlock(&rnp->exp_lock); | |
292 | trace_rcu_exp_funnel_lock(rsp->name, rnp->level, | |
293 | rnp->grplo, rnp->grphi, | |
294 | TPS("wait")); | |
031aeee0 | 295 | wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], |
3549c2bc PM |
296 | sync_exp_work_done(rsp, |
297 | &rdp->exp_workdone2, s)); | |
298 | return true; | |
299 | } | |
300 | rnp->exp_seq_rq = s; /* Followers can wait on us. */ | |
301 | spin_unlock(&rnp->exp_lock); | |
302 | trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo, | |
303 | rnp->grphi, TPS("nxtlvl")); | |
304 | } | |
305 | mutex_lock(&rsp->exp_mutex); | |
306 | fastpath: | |
307 | if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) { | |
308 | mutex_unlock(&rsp->exp_mutex); | |
309 | return true; | |
310 | } | |
311 | rcu_exp_gp_seq_start(rsp); | |
312 | trace_rcu_exp_grace_period(rsp->name, s, TPS("start")); | |
313 | return false; | |
314 | } | |
315 | ||
316 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ | |
317 | static void sync_sched_exp_handler(void *data) | |
318 | { | |
319 | struct rcu_data *rdp; | |
320 | struct rcu_node *rnp; | |
321 | struct rcu_state *rsp = data; | |
322 | ||
323 | rdp = this_cpu_ptr(rsp->rda); | |
324 | rnp = rdp->mynode; | |
325 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || | |
326 | __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) | |
327 | return; | |
328 | if (rcu_is_cpu_rrupt_from_idle()) { | |
329 | rcu_report_exp_rdp(&rcu_sched_state, | |
330 | this_cpu_ptr(&rcu_sched_data), true); | |
331 | return; | |
332 | } | |
333 | __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); | |
9226b10d PM |
334 | /* Store .exp before .rcu_urgent_qs. */ |
335 | smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); | |
3549c2bc PM |
336 | resched_cpu(smp_processor_id()); |
337 | } | |
338 | ||
339 | /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ | |
340 | static void sync_sched_exp_online_cleanup(int cpu) | |
341 | { | |
342 | struct rcu_data *rdp; | |
343 | int ret; | |
344 | struct rcu_node *rnp; | |
345 | struct rcu_state *rsp = &rcu_sched_state; | |
346 | ||
347 | rdp = per_cpu_ptr(rsp->rda, cpu); | |
348 | rnp = rdp->mynode; | |
349 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) | |
350 | return; | |
351 | ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); | |
352 | WARN_ON_ONCE(ret); | |
353 | } | |
354 | ||
355 | /* | |
356 | * Select the nodes that the upcoming expedited grace period needs | |
357 | * to wait for. | |
358 | */ | |
359 | static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, | |
360 | smp_call_func_t func) | |
361 | { | |
362 | int cpu; | |
363 | unsigned long flags; | |
3549c2bc PM |
364 | unsigned long mask_ofl_test; |
365 | unsigned long mask_ofl_ipi; | |
366 | int ret; | |
367 | struct rcu_node *rnp; | |
368 | ||
369 | sync_exp_reset_tree(rsp); | |
370 | rcu_for_each_leaf_node(rsp, rnp) { | |
371 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
372 | ||
373 | /* Each pass checks a CPU for identity, offline, and idle. */ | |
374 | mask_ofl_test = 0; | |
bc75e999 | 375 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
3549c2bc | 376 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
3549c2bc | 377 | |
0742ac3e | 378 | rdp->exp_dynticks_snap = |
8b2f63ab | 379 | rcu_dynticks_snap(rdp->dynticks); |
3549c2bc | 380 | if (raw_smp_processor_id() == cpu || |
02a5c550 | 381 | rcu_dynticks_in_eqs(rdp->exp_dynticks_snap) || |
98834b83 | 382 | !(rnp->qsmaskinitnext & rdp->grpmask)) |
3549c2bc PM |
383 | mask_ofl_test |= rdp->grpmask; |
384 | } | |
385 | mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; | |
386 | ||
387 | /* | |
388 | * Need to wait for any blocked tasks as well. Note that | |
389 | * additional blocking tasks will also block the expedited | |
390 | * GP until such time as the ->expmask bits are cleared. | |
391 | */ | |
392 | if (rcu_preempt_has_tasks(rnp)) | |
393 | rnp->exp_tasks = rnp->blkd_tasks.next; | |
394 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
395 | ||
396 | /* IPI the remaining CPUs for expedited quiescent state. */ | |
bc75e999 MR |
397 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
398 | unsigned long mask = leaf_node_cpu_bit(rnp, cpu); | |
0742ac3e | 399 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
0742ac3e | 400 | |
3549c2bc PM |
401 | if (!(mask_ofl_ipi & mask)) |
402 | continue; | |
403 | retry_ipi: | |
02a5c550 PM |
404 | if (rcu_dynticks_in_eqs_since(rdp->dynticks, |
405 | rdp->exp_dynticks_snap)) { | |
0742ac3e PM |
406 | mask_ofl_test |= mask; |
407 | continue; | |
408 | } | |
3549c2bc PM |
409 | ret = smp_call_function_single(cpu, func, rsp, 0); |
410 | if (!ret) { | |
411 | mask_ofl_ipi &= ~mask; | |
412 | continue; | |
413 | } | |
385c859f | 414 | /* Failed, raced with CPU hotplug operation. */ |
3549c2bc | 415 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
385c859f | 416 | if ((rnp->qsmaskinitnext & mask) && |
3549c2bc | 417 | (rnp->expmask & mask)) { |
385c859f | 418 | /* Online, so delay for a bit and try again. */ |
3549c2bc PM |
419 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
420 | schedule_timeout_uninterruptible(1); | |
385c859f | 421 | goto retry_ipi; |
3549c2bc | 422 | } |
385c859f | 423 | /* CPU really is offline, so we can ignore it. */ |
3549c2bc PM |
424 | if (!(rnp->expmask & mask)) |
425 | mask_ofl_ipi &= ~mask; | |
426 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
427 | } | |
428 | /* Report quiescent states for those that went offline. */ | |
429 | mask_ofl_test |= mask_ofl_ipi; | |
430 | if (mask_ofl_test) | |
431 | rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); | |
432 | } | |
433 | } | |
434 | ||
435 | static void synchronize_sched_expedited_wait(struct rcu_state *rsp) | |
436 | { | |
437 | int cpu; | |
438 | unsigned long jiffies_stall; | |
439 | unsigned long jiffies_start; | |
440 | unsigned long mask; | |
441 | int ndetected; | |
442 | struct rcu_node *rnp; | |
443 | struct rcu_node *rnp_root = rcu_get_root(rsp); | |
444 | int ret; | |
445 | ||
446 | jiffies_stall = rcu_jiffies_till_stall_check(); | |
447 | jiffies_start = jiffies; | |
448 | ||
449 | for (;;) { | |
450 | ret = swait_event_timeout( | |
451 | rsp->expedited_wq, | |
452 | sync_rcu_preempt_exp_done(rnp_root), | |
453 | jiffies_stall); | |
454 | if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root)) | |
455 | return; | |
908d2c1f | 456 | WARN_ON(ret < 0); /* workqueues should not be signaled. */ |
24a6cff2 PM |
457 | if (rcu_cpu_stall_suppress) |
458 | continue; | |
459 | panic_on_rcu_stall(); | |
3549c2bc PM |
460 | pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", |
461 | rsp->name); | |
462 | ndetected = 0; | |
463 | rcu_for_each_leaf_node(rsp, rnp) { | |
464 | ndetected += rcu_print_task_exp_stall(rnp); | |
bc75e999 | 465 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
3549c2bc PM |
466 | struct rcu_data *rdp; |
467 | ||
bc75e999 | 468 | mask = leaf_node_cpu_bit(rnp, cpu); |
3549c2bc PM |
469 | if (!(rnp->expmask & mask)) |
470 | continue; | |
471 | ndetected++; | |
472 | rdp = per_cpu_ptr(rsp->rda, cpu); | |
473 | pr_cont(" %d-%c%c%c", cpu, | |
474 | "O."[!!cpu_online(cpu)], | |
475 | "o."[!!(rdp->grpmask & rnp->expmaskinit)], | |
476 | "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); | |
477 | } | |
3549c2bc PM |
478 | } |
479 | pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", | |
480 | jiffies - jiffies_start, rsp->expedited_sequence, | |
481 | rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); | |
482 | if (ndetected) { | |
483 | pr_err("blocking rcu_node structures:"); | |
484 | rcu_for_each_node_breadth_first(rsp, rnp) { | |
485 | if (rnp == rnp_root) | |
486 | continue; /* printed unconditionally */ | |
487 | if (sync_rcu_preempt_exp_done(rnp)) | |
488 | continue; | |
489 | pr_cont(" l=%u:%d-%d:%#lx/%c", | |
490 | rnp->level, rnp->grplo, rnp->grphi, | |
491 | rnp->expmask, | |
492 | ".T"[!!rnp->exp_tasks]); | |
493 | } | |
494 | pr_cont("\n"); | |
495 | } | |
496 | rcu_for_each_leaf_node(rsp, rnp) { | |
bc75e999 MR |
497 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
498 | mask = leaf_node_cpu_bit(rnp, cpu); | |
3549c2bc PM |
499 | if (!(rnp->expmask & mask)) |
500 | continue; | |
501 | dump_cpu_task(cpu); | |
502 | } | |
503 | } | |
504 | jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; | |
505 | } | |
506 | } | |
507 | ||
508 | /* | |
509 | * Wait for the current expedited grace period to complete, and then | |
510 | * wake up everyone who piggybacked on the just-completed expedited | |
511 | * grace period. Also update all the ->exp_seq_rq counters as needed | |
512 | * in order to avoid counter-wrap problems. | |
513 | */ | |
514 | static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |
515 | { | |
516 | struct rcu_node *rnp; | |
517 | ||
518 | synchronize_sched_expedited_wait(rsp); | |
519 | rcu_exp_gp_seq_end(rsp); | |
520 | trace_rcu_exp_grace_period(rsp->name, s, TPS("end")); | |
521 | ||
522 | /* | |
523 | * Switch over to wakeup mode, allowing the next GP, but -only- the | |
524 | * next GP, to proceed. | |
525 | */ | |
526 | mutex_lock(&rsp->exp_wake_mutex); | |
3549c2bc PM |
527 | |
528 | rcu_for_each_node_breadth_first(rsp, rnp) { | |
529 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { | |
530 | spin_lock(&rnp->exp_lock); | |
531 | /* Recheck, avoid hang in case someone just arrived. */ | |
532 | if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) | |
533 | rnp->exp_seq_rq = s; | |
534 | spin_unlock(&rnp->exp_lock); | |
535 | } | |
3c345825 | 536 | smp_mb(); /* All above changes before wakeup. */ |
031aeee0 | 537 | wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]); |
3549c2bc PM |
538 | } |
539 | trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); | |
540 | mutex_unlock(&rsp->exp_wake_mutex); | |
541 | } | |
542 | ||
8b355e3b PM |
543 | /* Let the workqueue handler know what it is supposed to do. */ |
544 | struct rcu_exp_work { | |
545 | smp_call_func_t rew_func; | |
546 | struct rcu_state *rew_rsp; | |
547 | unsigned long rew_s; | |
548 | struct work_struct rew_work; | |
549 | }; | |
550 | ||
52d7e48b PM |
551 | /* |
552 | * Common code to drive an expedited grace period forward, used by | |
553 | * workqueues and mid-boot-time tasks. | |
554 | */ | |
555 | static void rcu_exp_sel_wait_wake(struct rcu_state *rsp, | |
556 | smp_call_func_t func, unsigned long s) | |
557 | { | |
558 | /* Initialize the rcu_node tree in preparation for the wait. */ | |
559 | sync_rcu_exp_select_cpus(rsp, func); | |
560 | ||
561 | /* Wait and clean up, including waking everyone. */ | |
562 | rcu_exp_wait_wake(rsp, s); | |
563 | } | |
564 | ||
8b355e3b PM |
565 | /* |
566 | * Work-queue handler to drive an expedited grace period forward. | |
567 | */ | |
568 | static void wait_rcu_exp_gp(struct work_struct *wp) | |
569 | { | |
570 | struct rcu_exp_work *rewp; | |
571 | ||
8b355e3b | 572 | rewp = container_of(wp, struct rcu_exp_work, rew_work); |
52d7e48b | 573 | rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s); |
8b355e3b PM |
574 | } |
575 | ||
f7b8eb84 PM |
576 | /* |
577 | * Given an rcu_state pointer and a smp_call_function() handler, kick | |
578 | * off the specified flavor of expedited grace period. | |
579 | */ | |
580 | static void _synchronize_rcu_expedited(struct rcu_state *rsp, | |
581 | smp_call_func_t func) | |
582 | { | |
8b355e3b PM |
583 | struct rcu_data *rdp; |
584 | struct rcu_exp_work rew; | |
585 | struct rcu_node *rnp; | |
f7b8eb84 PM |
586 | unsigned long s; |
587 | ||
588 | /* If expedited grace periods are prohibited, fall back to normal. */ | |
589 | if (rcu_gp_is_normal()) { | |
590 | wait_rcu_gp(rsp->call); | |
591 | return; | |
592 | } | |
593 | ||
594 | /* Take a snapshot of the sequence number. */ | |
595 | s = rcu_exp_gp_seq_snap(rsp); | |
596 | if (exp_funnel_lock(rsp, s)) | |
597 | return; /* Someone else did our work for us. */ | |
598 | ||
52d7e48b PM |
599 | /* Ensure that load happens before action based on it. */ |
600 | if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { | |
601 | /* Direct call during scheduler init and early_initcalls(). */ | |
602 | rcu_exp_sel_wait_wake(rsp, func, s); | |
603 | } else { | |
604 | /* Marshall arguments & schedule the expedited grace period. */ | |
605 | rew.rew_func = func; | |
606 | rew.rew_rsp = rsp; | |
607 | rew.rew_s = s; | |
608 | INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); | |
609 | schedule_work(&rew.rew_work); | |
610 | } | |
8b355e3b PM |
611 | |
612 | /* Wait for expedited grace period to complete. */ | |
613 | rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); | |
614 | rnp = rcu_get_root(rsp); | |
031aeee0 PM |
615 | wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], |
616 | sync_exp_work_done(rsp, &rdp->exp_workdone0, s)); | |
3c345825 | 617 | smp_mb(); /* Workqueue actions happen before return. */ |
8b355e3b PM |
618 | |
619 | /* Let the next expedited grace period start. */ | |
620 | mutex_unlock(&rsp->exp_mutex); | |
f7b8eb84 PM |
621 | } |
622 | ||
3549c2bc PM |
623 | /** |
624 | * synchronize_sched_expedited - Brute-force RCU-sched grace period | |
625 | * | |
626 | * Wait for an RCU-sched grace period to elapse, but use a "big hammer" | |
627 | * approach to force the grace period to end quickly. This consumes | |
628 | * significant time on all CPUs and is unfriendly to real-time workloads, | |
629 | * so is thus not recommended for any sort of common-case code. In fact, | |
630 | * if you are using synchronize_sched_expedited() in a loop, please | |
631 | * restructure your code to batch your updates, and then use a single | |
632 | * synchronize_sched() instead. | |
633 | * | |
634 | * This implementation can be thought of as an application of sequence | |
635 | * locking to expedited grace periods, but using the sequence counter to | |
636 | * determine when someone else has already done the work instead of for | |
637 | * retrying readers. | |
638 | */ | |
639 | void synchronize_sched_expedited(void) | |
640 | { | |
3549c2bc PM |
641 | struct rcu_state *rsp = &rcu_sched_state; |
642 | ||
8dc79888 PM |
643 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
644 | lock_is_held(&rcu_lock_map) || | |
645 | lock_is_held(&rcu_sched_lock_map), | |
646 | "Illegal synchronize_sched_expedited() in RCU read-side critical section"); | |
647 | ||
3549c2bc PM |
648 | /* If only one CPU, this is automatically a grace period. */ |
649 | if (rcu_blocking_is_gp()) | |
650 | return; | |
651 | ||
f7b8eb84 | 652 | _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); |
3549c2bc PM |
653 | } |
654 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
40e0a6cf PM |
655 | |
656 | #ifdef CONFIG_PREEMPT_RCU | |
657 | ||
658 | /* | |
659 | * Remote handler for smp_call_function_single(). If there is an | |
660 | * RCU read-side critical section in effect, request that the | |
661 | * next rcu_read_unlock() record the quiescent state up the | |
662 | * ->expmask fields in the rcu_node tree. Otherwise, immediately | |
663 | * report the quiescent state. | |
664 | */ | |
665 | static void sync_rcu_exp_handler(void *info) | |
666 | { | |
667 | struct rcu_data *rdp; | |
668 | struct rcu_state *rsp = info; | |
669 | struct task_struct *t = current; | |
670 | ||
671 | /* | |
672 | * Within an RCU read-side critical section, request that the next | |
673 | * rcu_read_unlock() report. Unless this RCU read-side critical | |
674 | * section has already blocked, in which case it is already set | |
675 | * up for the expedited grace period to wait on it. | |
676 | */ | |
677 | if (t->rcu_read_lock_nesting > 0 && | |
678 | !t->rcu_read_unlock_special.b.blocked) { | |
679 | t->rcu_read_unlock_special.b.exp_need_qs = true; | |
680 | return; | |
681 | } | |
682 | ||
683 | /* | |
684 | * We are either exiting an RCU read-side critical section (negative | |
685 | * values of t->rcu_read_lock_nesting) or are not in one at all | |
686 | * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU | |
687 | * read-side critical section that blocked before this expedited | |
688 | * grace period started. Either way, we can immediately report | |
689 | * the quiescent state. | |
690 | */ | |
691 | rdp = this_cpu_ptr(rsp->rda); | |
692 | rcu_report_exp_rdp(rsp, rdp, true); | |
693 | } | |
694 | ||
695 | /** | |
696 | * synchronize_rcu_expedited - Brute-force RCU grace period | |
697 | * | |
698 | * Wait for an RCU-preempt grace period, but expedite it. The basic | |
699 | * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler | |
700 | * checks whether the CPU is in an RCU-preempt critical section, and | |
701 | * if so, it sets a flag that causes the outermost rcu_read_unlock() | |
702 | * to report the quiescent state. On the other hand, if the CPU is | |
703 | * not in an RCU read-side critical section, the IPI handler reports | |
704 | * the quiescent state immediately. | |
705 | * | |
706 | * Although this is a greate improvement over previous expedited | |
707 | * implementations, it is still unfriendly to real-time workloads, so is | |
708 | * thus not recommended for any sort of common-case code. In fact, if | |
709 | * you are using synchronize_rcu_expedited() in a loop, please restructure | |
710 | * your code to batch your updates, and then Use a single synchronize_rcu() | |
711 | * instead. | |
712 | */ | |
713 | void synchronize_rcu_expedited(void) | |
714 | { | |
715 | struct rcu_state *rsp = rcu_state_p; | |
40e0a6cf | 716 | |
8dc79888 PM |
717 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
718 | lock_is_held(&rcu_lock_map) || | |
719 | lock_is_held(&rcu_sched_lock_map), | |
720 | "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); | |
721 | ||
52d7e48b PM |
722 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
723 | return; | |
f7b8eb84 | 724 | _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); |
40e0a6cf PM |
725 | } |
726 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
727 | ||
728 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | |
729 | ||
730 | /* | |
731 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
732 | * But because preemptible RCU does not exist, map to rcu-sched. | |
733 | */ | |
734 | void synchronize_rcu_expedited(void) | |
735 | { | |
736 | synchronize_sched_expedited(); | |
737 | } | |
738 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
739 | ||
740 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |