]>
Commit | Line | Data |
---|---|---|
f8381cba TG |
1 | /* |
2 | * linux/kernel/time/tick-broadcast.c | |
3 | * | |
4 | * This file contains functions which emulate a local clock-event | |
5 | * device via a broadcast event source. | |
6 | * | |
7 | * Copyright(C) 2005-2006, Thomas Gleixner <[email protected]> | |
8 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
9 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
10 | * | |
11 | * This code is licenced under the GPL version 2. For details see | |
12 | * kernel-base/COPYING. | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
d7b90689 | 17 | #include <linux/interrupt.h> |
f8381cba TG |
18 | #include <linux/percpu.h> |
19 | #include <linux/profile.h> | |
20 | #include <linux/sched.h> | |
12ad1000 | 21 | #include <linux/smp.h> |
ccf33d68 | 22 | #include <linux/module.h> |
f8381cba TG |
23 | |
24 | #include "tick-internal.h" | |
25 | ||
26 | /* | |
27 | * Broadcast support for broken x86 hardware, where the local apic | |
28 | * timer stops in C3 state. | |
29 | */ | |
30 | ||
a52f5c56 | 31 | static struct tick_device tick_broadcast_device; |
668802c2 WL |
32 | static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly; |
33 | static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly; | |
34 | static cpumask_var_t tmpmask __cpumask_var_read_mostly; | |
592a438f | 35 | static int tick_broadcast_forced; |
f8381cba | 36 | |
668802c2 WL |
37 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
38 | ||
5590a536 | 39 | #ifdef CONFIG_TICK_ONESHOT |
94114c36 | 40 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
5590a536 | 41 | static void tick_broadcast_clear_oneshot(int cpu); |
080873ce | 42 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
5590a536 | 43 | #else |
94114c36 | 44 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } |
5590a536 | 45 | static inline void tick_broadcast_clear_oneshot(int cpu) { } |
080873ce | 46 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
5590a536 TG |
47 | #endif |
48 | ||
289f480a IM |
49 | /* |
50 | * Debugging: see timer_list.c | |
51 | */ | |
52 | struct tick_device *tick_get_broadcast_device(void) | |
53 | { | |
54 | return &tick_broadcast_device; | |
55 | } | |
56 | ||
6b954823 | 57 | struct cpumask *tick_get_broadcast_mask(void) |
289f480a | 58 | { |
b352bc1c | 59 | return tick_broadcast_mask; |
289f480a IM |
60 | } |
61 | ||
f8381cba TG |
62 | /* |
63 | * Start the device in periodic mode | |
64 | */ | |
65 | static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |
66 | { | |
18de5bc4 | 67 | if (bc) |
f8381cba TG |
68 | tick_setup_periodic(bc, 1); |
69 | } | |
70 | ||
71 | /* | |
72 | * Check, if the device can be utilized as broadcast device: | |
73 | */ | |
45cb8e01 TG |
74 | static bool tick_check_broadcast_device(struct clock_event_device *curdev, |
75 | struct clock_event_device *newdev) | |
76 | { | |
77 | if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || | |
245a3496 | 78 | (newdev->features & CLOCK_EVT_FEAT_PERCPU) || |
45cb8e01 TG |
79 | (newdev->features & CLOCK_EVT_FEAT_C3STOP)) |
80 | return false; | |
81 | ||
82 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && | |
83 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
84 | return false; | |
85 | ||
86 | return !curdev || newdev->rating > curdev->rating; | |
87 | } | |
88 | ||
89 | /* | |
90 | * Conditionally install/replace broadcast device | |
91 | */ | |
7172a286 | 92 | void tick_install_broadcast_device(struct clock_event_device *dev) |
f8381cba | 93 | { |
6f7a05d7 TG |
94 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
95 | ||
45cb8e01 | 96 | if (!tick_check_broadcast_device(cur, dev)) |
7172a286 | 97 | return; |
45cb8e01 | 98 | |
ccf33d68 TG |
99 | if (!try_module_get(dev->owner)) |
100 | return; | |
f8381cba | 101 | |
45cb8e01 | 102 | clockevents_exchange_device(cur, dev); |
6f7a05d7 TG |
103 | if (cur) |
104 | cur->event_handler = clockevents_handle_noop; | |
f8381cba | 105 | tick_broadcast_device.evtdev = dev; |
b352bc1c | 106 | if (!cpumask_empty(tick_broadcast_mask)) |
f8381cba | 107 | tick_broadcast_start_periodic(dev); |
c038c1c4 SB |
108 | /* |
109 | * Inform all cpus about this. We might be in a situation | |
110 | * where we did not switch to oneshot mode because the per cpu | |
111 | * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack | |
112 | * of a oneshot capable broadcast device. Without that | |
113 | * notification the systems stays stuck in periodic mode | |
114 | * forever. | |
115 | */ | |
116 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) | |
117 | tick_clock_notify(); | |
f8381cba TG |
118 | } |
119 | ||
120 | /* | |
121 | * Check, if the device is the broadcast device | |
122 | */ | |
123 | int tick_is_broadcast_device(struct clock_event_device *dev) | |
124 | { | |
125 | return (dev && tick_broadcast_device.evtdev == dev); | |
126 | } | |
127 | ||
627ee794 TG |
128 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) |
129 | { | |
130 | int ret = -ENODEV; | |
131 | ||
132 | if (tick_is_broadcast_device(dev)) { | |
133 | raw_spin_lock(&tick_broadcast_lock); | |
134 | ret = __clockevents_update_freq(dev, freq); | |
135 | raw_spin_unlock(&tick_broadcast_lock); | |
136 | } | |
137 | return ret; | |
138 | } | |
139 | ||
140 | ||
12ad1000 MR |
141 | static void err_broadcast(const struct cpumask *mask) |
142 | { | |
143 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | |
144 | } | |
145 | ||
5d1d9a29 MR |
146 | static void tick_device_setup_broadcast_func(struct clock_event_device *dev) |
147 | { | |
148 | if (!dev->broadcast) | |
149 | dev->broadcast = tick_broadcast; | |
150 | if (!dev->broadcast) { | |
151 | pr_warn_once("%s depends on broadcast, but no broadcast function available\n", | |
152 | dev->name); | |
153 | dev->broadcast = err_broadcast; | |
154 | } | |
155 | } | |
156 | ||
f8381cba TG |
157 | /* |
158 | * Check, if the device is disfunctional and a place holder, which | |
159 | * needs to be handled by the broadcast device. | |
160 | */ | |
161 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |
162 | { | |
07bd1172 | 163 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba | 164 | unsigned long flags; |
e0454311 | 165 | int ret = 0; |
f8381cba | 166 | |
b5f91da0 | 167 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
168 | |
169 | /* | |
170 | * Devices might be registered with both periodic and oneshot | |
171 | * mode disabled. This signals, that the device needs to be | |
172 | * operated from the broadcast device and is a placeholder for | |
173 | * the cpu local device. | |
174 | */ | |
175 | if (!tick_device_is_functional(dev)) { | |
176 | dev->event_handler = tick_handle_periodic; | |
5d1d9a29 | 177 | tick_device_setup_broadcast_func(dev); |
b352bc1c | 178 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
a272dcca SB |
179 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
180 | tick_broadcast_start_periodic(bc); | |
181 | else | |
182 | tick_broadcast_setup_oneshot(bc); | |
f8381cba | 183 | ret = 1; |
5590a536 TG |
184 | } else { |
185 | /* | |
07bd1172 TG |
186 | * Clear the broadcast bit for this cpu if the |
187 | * device is not power state affected. | |
5590a536 | 188 | */ |
07bd1172 | 189 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
b352bc1c | 190 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 191 | else |
5d1d9a29 | 192 | tick_device_setup_broadcast_func(dev); |
07bd1172 TG |
193 | |
194 | /* | |
195 | * Clear the broadcast bit if the CPU is not in | |
196 | * periodic broadcast on state. | |
197 | */ | |
198 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | |
199 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | |
200 | ||
201 | switch (tick_broadcast_device.mode) { | |
202 | case TICKDEV_MODE_ONESHOT: | |
203 | /* | |
204 | * If the system is in oneshot mode we can | |
205 | * unconditionally clear the oneshot mask bit, | |
206 | * because the CPU is running and therefore | |
207 | * not in an idle state which causes the power | |
208 | * state affected device to stop. Let the | |
209 | * caller initialize the device. | |
210 | */ | |
211 | tick_broadcast_clear_oneshot(cpu); | |
212 | ret = 0; | |
213 | break; | |
214 | ||
215 | case TICKDEV_MODE_PERIODIC: | |
216 | /* | |
217 | * If the system is in periodic mode, check | |
218 | * whether the broadcast device can be | |
219 | * switched off now. | |
220 | */ | |
221 | if (cpumask_empty(tick_broadcast_mask) && bc) | |
222 | clockevents_shutdown(bc); | |
223 | /* | |
224 | * If we kept the cpu in the broadcast mask, | |
225 | * tell the caller to leave the per cpu device | |
226 | * in shutdown state. The periodic interrupt | |
e0454311 TG |
227 | * is delivered by the broadcast device, if |
228 | * the broadcast device exists and is not | |
229 | * hrtimer based. | |
07bd1172 | 230 | */ |
e0454311 TG |
231 | if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER)) |
232 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | |
07bd1172 TG |
233 | break; |
234 | default: | |
07bd1172 | 235 | break; |
5590a536 TG |
236 | } |
237 | } | |
b5f91da0 | 238 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba TG |
239 | return ret; |
240 | } | |
241 | ||
12572dbb MR |
242 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
243 | int tick_receive_broadcast(void) | |
244 | { | |
245 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
246 | struct clock_event_device *evt = td->evtdev; | |
247 | ||
248 | if (!evt) | |
249 | return -ENODEV; | |
250 | ||
251 | if (!evt->event_handler) | |
252 | return -EINVAL; | |
253 | ||
254 | evt->event_handler(evt); | |
255 | return 0; | |
256 | } | |
257 | #endif | |
258 | ||
f8381cba | 259 | /* |
6b954823 | 260 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
f8381cba | 261 | */ |
2951d5c0 | 262 | static bool tick_do_broadcast(struct cpumask *mask) |
f8381cba | 263 | { |
186e3cb8 | 264 | int cpu = smp_processor_id(); |
f8381cba | 265 | struct tick_device *td; |
2951d5c0 | 266 | bool local = false; |
f8381cba TG |
267 | |
268 | /* | |
269 | * Check, if the current cpu is in the mask | |
270 | */ | |
6b954823 | 271 | if (cpumask_test_cpu(cpu, mask)) { |
8eb23126 TG |
272 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
273 | ||
6b954823 | 274 | cpumask_clear_cpu(cpu, mask); |
8eb23126 TG |
275 | /* |
276 | * We only run the local handler, if the broadcast | |
277 | * device is not hrtimer based. Otherwise we run into | |
278 | * a hrtimer recursion. | |
279 | * | |
280 | * local timer_interrupt() | |
281 | * local_handler() | |
282 | * expire_hrtimers() | |
283 | * bc_handler() | |
284 | * local_handler() | |
285 | * expire_hrtimers() | |
286 | */ | |
287 | local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); | |
f8381cba TG |
288 | } |
289 | ||
6b954823 | 290 | if (!cpumask_empty(mask)) { |
f8381cba TG |
291 | /* |
292 | * It might be necessary to actually check whether the devices | |
293 | * have different broadcast functions. For now, just use the | |
294 | * one of the first device. This works as long as we have this | |
295 | * misfeature only on x86 (lapic) | |
296 | */ | |
6b954823 RR |
297 | td = &per_cpu(tick_cpu_device, cpumask_first(mask)); |
298 | td->evtdev->broadcast(mask); | |
f8381cba | 299 | } |
2951d5c0 | 300 | return local; |
f8381cba TG |
301 | } |
302 | ||
303 | /* | |
304 | * Periodic broadcast: | |
305 | * - invoke the broadcast handlers | |
306 | */ | |
2951d5c0 | 307 | static bool tick_do_periodic_broadcast(void) |
f8381cba | 308 | { |
b352bc1c | 309 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
2951d5c0 | 310 | return tick_do_broadcast(tmpmask); |
f8381cba TG |
311 | } |
312 | ||
313 | /* | |
314 | * Event handler for periodic broadcast ticks | |
315 | */ | |
316 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |
317 | { | |
2951d5c0 TG |
318 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
319 | bool bc_local; | |
d4496b39 | 320 | |
627ee794 | 321 | raw_spin_lock(&tick_broadcast_lock); |
c4288334 TG |
322 | |
323 | /* Handle spurious interrupts gracefully */ | |
324 | if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { | |
325 | raw_spin_unlock(&tick_broadcast_lock); | |
326 | return; | |
327 | } | |
328 | ||
2951d5c0 | 329 | bc_local = tick_do_periodic_broadcast(); |
627ee794 | 330 | |
472c4a94 | 331 | if (clockevent_state_oneshot(dev)) { |
2951d5c0 | 332 | ktime_t next = ktime_add(dev->next_event, tick_period); |
f8381cba | 333 | |
2951d5c0 TG |
334 | clockevents_program_event(dev, next, true); |
335 | } | |
336 | raw_spin_unlock(&tick_broadcast_lock); | |
f8381cba TG |
337 | |
338 | /* | |
2951d5c0 TG |
339 | * We run the handler of the local cpu after dropping |
340 | * tick_broadcast_lock because the handler might deadlock when | |
341 | * trying to switch to oneshot mode. | |
f8381cba | 342 | */ |
2951d5c0 TG |
343 | if (bc_local) |
344 | td->evtdev->event_handler(td->evtdev); | |
f8381cba TG |
345 | } |
346 | ||
592a438f TG |
347 | /** |
348 | * tick_broadcast_control - Enable/disable or force broadcast mode | |
349 | * @mode: The selected broadcast mode | |
350 | * | |
351 | * Called when the system enters a state where affected tick devices | |
352 | * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. | |
f8381cba | 353 | */ |
592a438f | 354 | void tick_broadcast_control(enum tick_broadcast_mode mode) |
f8381cba TG |
355 | { |
356 | struct clock_event_device *bc, *dev; | |
357 | struct tick_device *td; | |
9c17bcda | 358 | int cpu, bc_stopped; |
202461e2 | 359 | unsigned long flags; |
f8381cba | 360 | |
202461e2 MG |
361 | /* Protects also the local clockevent device. */ |
362 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | |
592a438f | 363 | td = this_cpu_ptr(&tick_cpu_device); |
f8381cba | 364 | dev = td->evtdev; |
f8381cba TG |
365 | |
366 | /* | |
1595f452 | 367 | * Is the device not affected by the powerstate ? |
f8381cba | 368 | */ |
1595f452 | 369 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
202461e2 | 370 | goto out; |
f8381cba | 371 | |
3dfbc884 | 372 | if (!tick_device_is_functional(dev)) |
202461e2 | 373 | goto out; |
1595f452 | 374 | |
592a438f TG |
375 | cpu = smp_processor_id(); |
376 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 377 | bc_stopped = cpumask_empty(tick_broadcast_mask); |
9c17bcda | 378 | |
592a438f TG |
379 | switch (mode) { |
380 | case TICK_BROADCAST_FORCE: | |
381 | tick_broadcast_forced = 1; | |
382 | case TICK_BROADCAST_ON: | |
07bd1172 | 383 | cpumask_set_cpu(cpu, tick_broadcast_on); |
b352bc1c | 384 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
e0454311 TG |
385 | /* |
386 | * Only shutdown the cpu local device, if: | |
387 | * | |
388 | * - the broadcast device exists | |
389 | * - the broadcast device is not a hrtimer based one | |
390 | * - the broadcast device is in periodic mode to | |
391 | * avoid a hickup during switch to oneshot mode | |
392 | */ | |
393 | if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && | |
394 | tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
2344abbc | 395 | clockevents_shutdown(dev); |
f8381cba | 396 | } |
1595f452 | 397 | break; |
592a438f TG |
398 | |
399 | case TICK_BROADCAST_OFF: | |
400 | if (tick_broadcast_forced) | |
07bd1172 TG |
401 | break; |
402 | cpumask_clear_cpu(cpu, tick_broadcast_on); | |
403 | if (!tick_device_is_functional(dev)) | |
404 | break; | |
405 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | |
07454bff TG |
406 | if (tick_broadcast_device.mode == |
407 | TICKDEV_MODE_PERIODIC) | |
f8381cba TG |
408 | tick_setup_periodic(dev, 0); |
409 | } | |
1595f452 | 410 | break; |
f8381cba TG |
411 | } |
412 | ||
c4d029f2 TG |
413 | if (bc) { |
414 | if (cpumask_empty(tick_broadcast_mask)) { | |
415 | if (!bc_stopped) | |
416 | clockevents_shutdown(bc); | |
417 | } else if (bc_stopped) { | |
418 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
419 | tick_broadcast_start_periodic(bc); | |
420 | else | |
421 | tick_broadcast_setup_oneshot(bc); | |
422 | } | |
f8381cba | 423 | } |
202461e2 MG |
424 | out: |
425 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
f8381cba | 426 | } |
592a438f | 427 | EXPORT_SYMBOL_GPL(tick_broadcast_control); |
f8381cba TG |
428 | |
429 | /* | |
430 | * Set the periodic handler depending on broadcast on/off | |
431 | */ | |
432 | void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |
433 | { | |
434 | if (!broadcast) | |
435 | dev->event_handler = tick_handle_periodic; | |
436 | else | |
437 | dev->event_handler = tick_handle_periodic_broadcast; | |
438 | } | |
439 | ||
a49b116d | 440 | #ifdef CONFIG_HOTPLUG_CPU |
f8381cba TG |
441 | /* |
442 | * Remove a CPU from broadcasting | |
443 | */ | |
a49b116d | 444 | void tick_shutdown_broadcast(unsigned int cpu) |
f8381cba TG |
445 | { |
446 | struct clock_event_device *bc; | |
447 | unsigned long flags; | |
f8381cba | 448 | |
b5f91da0 | 449 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
450 | |
451 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 452 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 453 | cpumask_clear_cpu(cpu, tick_broadcast_on); |
f8381cba TG |
454 | |
455 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | |
b352bc1c | 456 | if (bc && cpumask_empty(tick_broadcast_mask)) |
2344abbc | 457 | clockevents_shutdown(bc); |
f8381cba TG |
458 | } |
459 | ||
b5f91da0 | 460 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba | 461 | } |
a49b116d | 462 | #endif |
79bf2bb3 | 463 | |
6321dd60 TG |
464 | void tick_suspend_broadcast(void) |
465 | { | |
466 | struct clock_event_device *bc; | |
467 | unsigned long flags; | |
468 | ||
b5f91da0 | 469 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
470 | |
471 | bc = tick_broadcast_device.evtdev; | |
18de5bc4 | 472 | if (bc) |
2344abbc | 473 | clockevents_shutdown(bc); |
6321dd60 | 474 | |
b5f91da0 | 475 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
476 | } |
477 | ||
f46481d0 TG |
478 | /* |
479 | * This is called from tick_resume_local() on a resuming CPU. That's | |
480 | * called from the core resume function, tick_unfreeze() and the magic XEN | |
481 | * resume hackery. | |
482 | * | |
483 | * In none of these cases the broadcast device mode can change and the | |
484 | * bit of the resuming CPU in the broadcast mask is safe as well. | |
485 | */ | |
486 | bool tick_resume_check_broadcast(void) | |
487 | { | |
488 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) | |
489 | return false; | |
490 | else | |
491 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); | |
492 | } | |
493 | ||
494 | void tick_resume_broadcast(void) | |
6321dd60 TG |
495 | { |
496 | struct clock_event_device *bc; | |
497 | unsigned long flags; | |
6321dd60 | 498 | |
b5f91da0 | 499 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
500 | |
501 | bc = tick_broadcast_device.evtdev; | |
6321dd60 | 502 | |
cd05a1f8 | 503 | if (bc) { |
554ef387 | 504 | clockevents_tick_resume(bc); |
18de5bc4 | 505 | |
cd05a1f8 TG |
506 | switch (tick_broadcast_device.mode) { |
507 | case TICKDEV_MODE_PERIODIC: | |
b352bc1c | 508 | if (!cpumask_empty(tick_broadcast_mask)) |
cd05a1f8 | 509 | tick_broadcast_start_periodic(bc); |
cd05a1f8 TG |
510 | break; |
511 | case TICKDEV_MODE_ONESHOT: | |
b352bc1c | 512 | if (!cpumask_empty(tick_broadcast_mask)) |
080873ce | 513 | tick_resume_broadcast_oneshot(bc); |
cd05a1f8 TG |
514 | break; |
515 | } | |
6321dd60 | 516 | } |
b5f91da0 | 517 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
518 | } |
519 | ||
79bf2bb3 TG |
520 | #ifdef CONFIG_TICK_ONESHOT |
521 | ||
668802c2 WL |
522 | static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly; |
523 | static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly; | |
524 | static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly; | |
79bf2bb3 | 525 | |
289f480a | 526 | /* |
6b954823 | 527 | * Exposed for debugging: see timer_list.c |
289f480a | 528 | */ |
6b954823 | 529 | struct cpumask *tick_get_broadcast_oneshot_mask(void) |
289f480a | 530 | { |
b352bc1c | 531 | return tick_broadcast_oneshot_mask; |
289f480a IM |
532 | } |
533 | ||
eaa907c5 TG |
534 | /* |
535 | * Called before going idle with interrupts disabled. Checks whether a | |
536 | * broadcast event from the other core is about to happen. We detected | |
537 | * that in tick_broadcast_oneshot_control(). The callsite can use this | |
538 | * to avoid a deep idle transition as we are about to get the | |
539 | * broadcast IPI right away. | |
540 | */ | |
541 | int tick_check_broadcast_expired(void) | |
542 | { | |
543 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
544 | } | |
545 | ||
d2348fb6 DL |
546 | /* |
547 | * Set broadcast interrupt affinity | |
548 | */ | |
549 | static void tick_broadcast_set_affinity(struct clock_event_device *bc, | |
550 | const struct cpumask *cpumask) | |
551 | { | |
552 | if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) | |
553 | return; | |
554 | ||
555 | if (cpumask_equal(bc->cpumask, cpumask)) | |
556 | return; | |
557 | ||
558 | bc->cpumask = cpumask; | |
559 | irq_set_affinity(bc->irq, bc->cpumask); | |
560 | } | |
561 | ||
298dbd1c TG |
562 | static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, |
563 | ktime_t expires) | |
79bf2bb3 | 564 | { |
472c4a94 | 565 | if (!clockevent_state_oneshot(bc)) |
d7eb231c | 566 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b9a6a235 | 567 | |
298dbd1c TG |
568 | clockevents_program_event(bc, expires, 1); |
569 | tick_broadcast_set_affinity(bc, cpumask_of(cpu)); | |
79bf2bb3 TG |
570 | } |
571 | ||
080873ce | 572 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
cd05a1f8 | 573 | { |
d7eb231c | 574 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
cd05a1f8 TG |
575 | } |
576 | ||
fb02fbc1 TG |
577 | /* |
578 | * Called from irq_enter() when idle was interrupted to reenable the | |
579 | * per cpu device. | |
580 | */ | |
e8fcaa5c | 581 | void tick_check_oneshot_broadcast_this_cpu(void) |
fb02fbc1 | 582 | { |
e8fcaa5c | 583 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
22127e93 | 584 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
fb02fbc1 | 585 | |
1f73a980 TG |
586 | /* |
587 | * We might be in the middle of switching over from | |
588 | * periodic to oneshot. If the CPU has not yet | |
589 | * switched over, leave the device alone. | |
590 | */ | |
591 | if (td->mode == TICKDEV_MODE_ONESHOT) { | |
d7eb231c | 592 | clockevents_switch_state(td->evtdev, |
77e32c89 | 593 | CLOCK_EVT_STATE_ONESHOT); |
1f73a980 | 594 | } |
fb02fbc1 TG |
595 | } |
596 | } | |
597 | ||
79bf2bb3 TG |
598 | /* |
599 | * Handle oneshot mode broadcasting | |
600 | */ | |
601 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |
602 | { | |
603 | struct tick_device *td; | |
cdc6f27d | 604 | ktime_t now, next_event; |
d2348fb6 | 605 | int cpu, next_cpu = 0; |
298dbd1c | 606 | bool bc_local; |
79bf2bb3 | 607 | |
b5f91da0 | 608 | raw_spin_lock(&tick_broadcast_lock); |
2456e855 TG |
609 | dev->next_event = KTIME_MAX; |
610 | next_event = KTIME_MAX; | |
b352bc1c | 611 | cpumask_clear(tmpmask); |
79bf2bb3 TG |
612 | now = ktime_get(); |
613 | /* Find all expired events */ | |
b352bc1c | 614 | for_each_cpu(cpu, tick_broadcast_oneshot_mask) { |
79bf2bb3 | 615 | td = &per_cpu(tick_cpu_device, cpu); |
2456e855 | 616 | if (td->evtdev->next_event <= now) { |
b352bc1c | 617 | cpumask_set_cpu(cpu, tmpmask); |
26517f3e TG |
618 | /* |
619 | * Mark the remote cpu in the pending mask, so | |
620 | * it can avoid reprogramming the cpu local | |
621 | * timer in tick_broadcast_oneshot_control(). | |
622 | */ | |
623 | cpumask_set_cpu(cpu, tick_broadcast_pending_mask); | |
2456e855 TG |
624 | } else if (td->evtdev->next_event < next_event) { |
625 | next_event = td->evtdev->next_event; | |
d2348fb6 DL |
626 | next_cpu = cpu; |
627 | } | |
79bf2bb3 TG |
628 | } |
629 | ||
2938d275 TG |
630 | /* |
631 | * Remove the current cpu from the pending mask. The event is | |
632 | * delivered immediately in tick_do_broadcast() ! | |
633 | */ | |
634 | cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask); | |
635 | ||
989dcb64 TG |
636 | /* Take care of enforced broadcast requests */ |
637 | cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); | |
638 | cpumask_clear(tick_broadcast_force_mask); | |
639 | ||
c9b5a266 TG |
640 | /* |
641 | * Sanity check. Catch the case where we try to broadcast to | |
642 | * offline cpus. | |
643 | */ | |
644 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | |
645 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | |
646 | ||
79bf2bb3 | 647 | /* |
298dbd1c | 648 | * Wakeup the cpus which have an expired event. |
cdc6f27d | 649 | */ |
298dbd1c | 650 | bc_local = tick_do_broadcast(tmpmask); |
cdc6f27d TG |
651 | |
652 | /* | |
653 | * Two reasons for reprogram: | |
654 | * | |
655 | * - The global event did not expire any CPU local | |
656 | * events. This happens in dyntick mode, as the maximum PIT | |
657 | * delta is quite small. | |
658 | * | |
659 | * - There are pending events on sleeping CPUs which were not | |
660 | * in the event mask | |
79bf2bb3 | 661 | */ |
2456e855 | 662 | if (next_event != KTIME_MAX) |
298dbd1c TG |
663 | tick_broadcast_set_event(dev, next_cpu, next_event); |
664 | ||
b5f91da0 | 665 | raw_spin_unlock(&tick_broadcast_lock); |
298dbd1c TG |
666 | |
667 | if (bc_local) { | |
668 | td = this_cpu_ptr(&tick_cpu_device); | |
669 | td->evtdev->event_handler(td->evtdev); | |
670 | } | |
79bf2bb3 TG |
671 | } |
672 | ||
5d1638ac PM |
673 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) |
674 | { | |
675 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
676 | return 0; | |
2456e855 | 677 | if (bc->next_event == KTIME_MAX) |
5d1638ac PM |
678 | return 0; |
679 | return bc->bound_on == cpu ? -EBUSY : 0; | |
680 | } | |
681 | ||
682 | static void broadcast_shutdown_local(struct clock_event_device *bc, | |
683 | struct clock_event_device *dev) | |
684 | { | |
685 | /* | |
686 | * For hrtimer based broadcasting we cannot shutdown the cpu | |
687 | * local device if our own event is the first one to expire or | |
688 | * if we own the broadcast timer. | |
689 | */ | |
690 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | |
691 | if (broadcast_needs_cpu(bc, smp_processor_id())) | |
692 | return; | |
2456e855 | 693 | if (dev->next_event < bc->next_event) |
5d1638ac PM |
694 | return; |
695 | } | |
d7eb231c | 696 | clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
5d1638ac PM |
697 | } |
698 | ||
f32dd117 | 699 | int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
79bf2bb3 TG |
700 | { |
701 | struct clock_event_device *bc, *dev; | |
da7e6f45 | 702 | int cpu, ret = 0; |
1fe5d5c3 | 703 | ktime_t now; |
79bf2bb3 | 704 | |
b78f3f3c TG |
705 | /* |
706 | * If there is no broadcast device, tell the caller not to go | |
707 | * into deep idle. | |
708 | */ | |
709 | if (!tick_broadcast_device.evtdev) | |
710 | return -EBUSY; | |
711 | ||
e3ac79e0 | 712 | dev = this_cpu_ptr(&tick_cpu_device)->evtdev; |
79bf2bb3 | 713 | |
1fe5d5c3 | 714 | raw_spin_lock(&tick_broadcast_lock); |
7372b0b1 | 715 | bc = tick_broadcast_device.evtdev; |
1fe5d5c3 | 716 | cpu = smp_processor_id(); |
79bf2bb3 | 717 | |
1fe5d5c3 | 718 | if (state == TICK_BROADCAST_ENTER) { |
d5113e13 TG |
719 | /* |
720 | * If the current CPU owns the hrtimer broadcast | |
721 | * mechanism, it cannot go deep idle and we do not add | |
722 | * the CPU to the broadcast mask. We don't have to go | |
723 | * through the EXIT path as the local timer is not | |
724 | * shutdown. | |
725 | */ | |
726 | ret = broadcast_needs_cpu(bc, cpu); | |
727 | if (ret) | |
728 | goto out; | |
729 | ||
e3ac79e0 TG |
730 | /* |
731 | * If the broadcast device is in periodic mode, we | |
732 | * return. | |
733 | */ | |
d3325726 TG |
734 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
735 | /* If it is a hrtimer based broadcast, return busy */ | |
736 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) | |
737 | ret = -EBUSY; | |
e3ac79e0 | 738 | goto out; |
d3325726 | 739 | } |
e3ac79e0 | 740 | |
b352bc1c | 741 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
2938d275 | 742 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
d5113e13 TG |
743 | |
744 | /* Conditionally shut down the local timer. */ | |
5d1638ac | 745 | broadcast_shutdown_local(bc, dev); |
d5113e13 | 746 | |
989dcb64 TG |
747 | /* |
748 | * We only reprogram the broadcast timer if we | |
749 | * did not mark ourself in the force mask and | |
750 | * if the cpu local event is earlier than the | |
751 | * broadcast event. If the current CPU is in | |
752 | * the force mask, then we are going to be | |
0cc5281a TG |
753 | * woken by the IPI right away; we return |
754 | * busy, so the CPU does not try to go deep | |
755 | * idle. | |
989dcb64 | 756 | */ |
0cc5281a TG |
757 | if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { |
758 | ret = -EBUSY; | |
2456e855 | 759 | } else if (dev->next_event < bc->next_event) { |
298dbd1c | 760 | tick_broadcast_set_event(bc, cpu, dev->next_event); |
d5113e13 TG |
761 | /* |
762 | * In case of hrtimer broadcasts the | |
763 | * programming might have moved the | |
764 | * timer to this cpu. If yes, remove | |
765 | * us from the broadcast mask and | |
766 | * return busy. | |
767 | */ | |
768 | ret = broadcast_needs_cpu(bc, cpu); | |
769 | if (ret) { | |
770 | cpumask_clear_cpu(cpu, | |
771 | tick_broadcast_oneshot_mask); | |
772 | } | |
0cc5281a | 773 | } |
79bf2bb3 TG |
774 | } |
775 | } else { | |
b352bc1c | 776 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
d7eb231c | 777 | clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); |
26517f3e TG |
778 | /* |
779 | * The cpu which was handling the broadcast | |
780 | * timer marked this cpu in the broadcast | |
781 | * pending mask and fired the broadcast | |
782 | * IPI. So we are going to handle the expired | |
783 | * event anyway via the broadcast IPI | |
784 | * handler. No need to reprogram the timer | |
785 | * with an already expired event. | |
786 | */ | |
787 | if (cpumask_test_and_clear_cpu(cpu, | |
788 | tick_broadcast_pending_mask)) | |
789 | goto out; | |
790 | ||
ea8deb8d DL |
791 | /* |
792 | * Bail out if there is no next event. | |
793 | */ | |
2456e855 | 794 | if (dev->next_event == KTIME_MAX) |
ea8deb8d | 795 | goto out; |
989dcb64 TG |
796 | /* |
797 | * If the pending bit is not set, then we are | |
798 | * either the CPU handling the broadcast | |
799 | * interrupt or we got woken by something else. | |
800 | * | |
801 | * We are not longer in the broadcast mask, so | |
802 | * if the cpu local expiry time is already | |
803 | * reached, we would reprogram the cpu local | |
804 | * timer with an already expired event. | |
805 | * | |
806 | * This can lead to a ping-pong when we return | |
807 | * to idle and therefor rearm the broadcast | |
808 | * timer before the cpu local timer was able | |
809 | * to fire. This happens because the forced | |
810 | * reprogramming makes sure that the event | |
811 | * will happen in the future and depending on | |
812 | * the min_delta setting this might be far | |
813 | * enough out that the ping-pong starts. | |
814 | * | |
815 | * If the cpu local next_event has expired | |
816 | * then we know that the broadcast timer | |
817 | * next_event has expired as well and | |
818 | * broadcast is about to be handled. So we | |
819 | * avoid reprogramming and enforce that the | |
820 | * broadcast handler, which did not run yet, | |
821 | * will invoke the cpu local handler. | |
822 | * | |
823 | * We cannot call the handler directly from | |
824 | * here, because we might be in a NOHZ phase | |
825 | * and we did not go through the irq_enter() | |
826 | * nohz fixups. | |
827 | */ | |
828 | now = ktime_get(); | |
2456e855 | 829 | if (dev->next_event <= now) { |
989dcb64 TG |
830 | cpumask_set_cpu(cpu, tick_broadcast_force_mask); |
831 | goto out; | |
832 | } | |
833 | /* | |
834 | * We got woken by something else. Reprogram | |
835 | * the cpu local timer device. | |
836 | */ | |
26517f3e | 837 | tick_program_event(dev->next_event, 1); |
79bf2bb3 TG |
838 | } |
839 | } | |
26517f3e | 840 | out: |
1fe5d5c3 | 841 | raw_spin_unlock(&tick_broadcast_lock); |
da7e6f45 | 842 | return ret; |
79bf2bb3 TG |
843 | } |
844 | ||
5590a536 TG |
845 | /* |
846 | * Reset the one shot broadcast for a cpu | |
847 | * | |
848 | * Called with tick_broadcast_lock held | |
849 | */ | |
850 | static void tick_broadcast_clear_oneshot(int cpu) | |
851 | { | |
b352bc1c | 852 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
dd5fd9b9 | 853 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
5590a536 TG |
854 | } |
855 | ||
6b954823 RR |
856 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
857 | ktime_t expires) | |
7300711e TG |
858 | { |
859 | struct tick_device *td; | |
860 | int cpu; | |
861 | ||
5db0e1e9 | 862 | for_each_cpu(cpu, mask) { |
7300711e TG |
863 | td = &per_cpu(tick_cpu_device, cpu); |
864 | if (td->evtdev) | |
865 | td->evtdev->next_event = expires; | |
866 | } | |
867 | } | |
868 | ||
79bf2bb3 | 869 | /** |
8dce39c2 | 870 | * tick_broadcast_setup_oneshot - setup the broadcast device |
79bf2bb3 | 871 | */ |
94114c36 | 872 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
79bf2bb3 | 873 | { |
07f4beb0 TG |
874 | int cpu = smp_processor_id(); |
875 | ||
c1a9eeb9 TG |
876 | if (!bc) |
877 | return; | |
878 | ||
9c17bcda TG |
879 | /* Set it up only once ! */ |
880 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | |
472c4a94 | 881 | int was_periodic = clockevent_state_periodic(bc); |
7300711e | 882 | |
9c17bcda | 883 | bc->event_handler = tick_handle_oneshot_broadcast; |
7300711e | 884 | |
7300711e TG |
885 | /* |
886 | * We must be careful here. There might be other CPUs | |
887 | * waiting for periodic broadcast. We need to set the | |
888 | * oneshot_mask bits for those and program the | |
889 | * broadcast device to fire. | |
890 | */ | |
b352bc1c TG |
891 | cpumask_copy(tmpmask, tick_broadcast_mask); |
892 | cpumask_clear_cpu(cpu, tmpmask); | |
893 | cpumask_or(tick_broadcast_oneshot_mask, | |
894 | tick_broadcast_oneshot_mask, tmpmask); | |
6b954823 | 895 | |
b352bc1c | 896 | if (was_periodic && !cpumask_empty(tmpmask)) { |
d7eb231c | 897 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b352bc1c | 898 | tick_broadcast_init_next_event(tmpmask, |
6b954823 | 899 | tick_next_period); |
298dbd1c | 900 | tick_broadcast_set_event(bc, cpu, tick_next_period); |
7300711e | 901 | } else |
2456e855 | 902 | bc->next_event = KTIME_MAX; |
07f4beb0 TG |
903 | } else { |
904 | /* | |
905 | * The first cpu which switches to oneshot mode sets | |
906 | * the bit for all other cpus which are in the general | |
907 | * (periodic) broadcast mask. So the bit is set and | |
908 | * would prevent the first broadcast enter after this | |
909 | * to program the bc device. | |
910 | */ | |
911 | tick_broadcast_clear_oneshot(cpu); | |
9c17bcda | 912 | } |
79bf2bb3 TG |
913 | } |
914 | ||
915 | /* | |
916 | * Select oneshot operating mode for the broadcast device | |
917 | */ | |
918 | void tick_broadcast_switch_to_oneshot(void) | |
919 | { | |
920 | struct clock_event_device *bc; | |
921 | unsigned long flags; | |
922 | ||
b5f91da0 | 923 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
fa4da365 SS |
924 | |
925 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | |
79bf2bb3 TG |
926 | bc = tick_broadcast_device.evtdev; |
927 | if (bc) | |
928 | tick_broadcast_setup_oneshot(bc); | |
77b0d60c | 929 | |
b5f91da0 | 930 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 TG |
931 | } |
932 | ||
a49b116d TG |
933 | #ifdef CONFIG_HOTPLUG_CPU |
934 | void hotplug_cpu__broadcast_tick_pull(int deadcpu) | |
935 | { | |
936 | struct clock_event_device *bc; | |
937 | unsigned long flags; | |
938 | ||
939 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | |
940 | bc = tick_broadcast_device.evtdev; | |
941 | ||
942 | if (bc && broadcast_needs_cpu(bc, deadcpu)) { | |
943 | /* This moves the broadcast assignment to this CPU: */ | |
944 | clockevents_program_event(bc, bc->next_event, 1); | |
945 | } | |
946 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
947 | } | |
79bf2bb3 TG |
948 | |
949 | /* | |
950 | * Remove a dead CPU from broadcasting | |
951 | */ | |
a49b116d | 952 | void tick_shutdown_broadcast_oneshot(unsigned int cpu) |
79bf2bb3 | 953 | { |
79bf2bb3 | 954 | unsigned long flags; |
79bf2bb3 | 955 | |
b5f91da0 | 956 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
79bf2bb3 | 957 | |
31d9b393 | 958 | /* |
c9b5a266 TG |
959 | * Clear the broadcast masks for the dead cpu, but do not stop |
960 | * the broadcast device! | |
31d9b393 | 961 | */ |
b352bc1c | 962 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
c9b5a266 TG |
963 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
964 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | |
79bf2bb3 | 965 | |
b5f91da0 | 966 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 | 967 | } |
a49b116d | 968 | #endif |
79bf2bb3 | 969 | |
27ce4cb4 TG |
970 | /* |
971 | * Check, whether the broadcast device is in one shot mode | |
972 | */ | |
973 | int tick_broadcast_oneshot_active(void) | |
974 | { | |
975 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | |
976 | } | |
977 | ||
3a142a06 TG |
978 | /* |
979 | * Check whether the broadcast device supports oneshot. | |
980 | */ | |
981 | bool tick_broadcast_oneshot_available(void) | |
982 | { | |
983 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
984 | ||
985 | return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; | |
986 | } | |
987 | ||
f32dd117 TG |
988 | #else |
989 | int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) | |
990 | { | |
991 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
992 | ||
993 | if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
994 | return -EBUSY; | |
995 | ||
996 | return 0; | |
997 | } | |
79bf2bb3 | 998 | #endif |
b352bc1c TG |
999 | |
1000 | void __init tick_broadcast_init(void) | |
1001 | { | |
fbd44a60 | 1002 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
07bd1172 | 1003 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); |
fbd44a60 | 1004 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
b352bc1c | 1005 | #ifdef CONFIG_TICK_ONESHOT |
fbd44a60 TG |
1006 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
1007 | zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); | |
1008 | zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); | |
b352bc1c TG |
1009 | #endif |
1010 | } |