]>
Commit | Line | Data |
---|---|---|
35728b82 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f8381cba | 2 | /* |
f8381cba TG |
3 | * This file contains functions which emulate a local clock-event |
4 | * device via a broadcast event source. | |
5 | * | |
6 | * Copyright(C) 2005-2006, Thomas Gleixner <[email protected]> | |
7 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
8 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner | |
f8381cba TG |
9 | */ |
10 | #include <linux/cpu.h> | |
11 | #include <linux/err.h> | |
12 | #include <linux/hrtimer.h> | |
d7b90689 | 13 | #include <linux/interrupt.h> |
f8381cba TG |
14 | #include <linux/percpu.h> |
15 | #include <linux/profile.h> | |
16 | #include <linux/sched.h> | |
12ad1000 | 17 | #include <linux/smp.h> |
ccf33d68 | 18 | #include <linux/module.h> |
f8381cba TG |
19 | |
20 | #include "tick-internal.h" | |
21 | ||
22 | /* | |
23 | * Broadcast support for broken x86 hardware, where the local apic | |
24 | * timer stops in C3 state. | |
25 | */ | |
26 | ||
a52f5c56 | 27 | static struct tick_device tick_broadcast_device; |
668802c2 WL |
28 | static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly; |
29 | static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly; | |
30 | static cpumask_var_t tmpmask __cpumask_var_read_mostly; | |
592a438f | 31 | static int tick_broadcast_forced; |
f8381cba | 32 | |
668802c2 WL |
33 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
34 | ||
5590a536 | 35 | #ifdef CONFIG_TICK_ONESHOT |
94114c36 | 36 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
5590a536 | 37 | static void tick_broadcast_clear_oneshot(int cpu); |
080873ce | 38 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
aba09543 | 39 | # ifdef CONFIG_HOTPLUG_CPU |
1b72d432 | 40 | static void tick_broadcast_oneshot_offline(unsigned int cpu); |
aba09543 | 41 | # endif |
5590a536 | 42 | #else |
94114c36 | 43 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } |
5590a536 | 44 | static inline void tick_broadcast_clear_oneshot(int cpu) { } |
080873ce | 45 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
aba09543 | 46 | # ifdef CONFIG_HOTPLUG_CPU |
1b72d432 | 47 | static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } |
aba09543 | 48 | # endif |
5590a536 TG |
49 | #endif |
50 | ||
289f480a IM |
51 | /* |
52 | * Debugging: see timer_list.c | |
53 | */ | |
54 | struct tick_device *tick_get_broadcast_device(void) | |
55 | { | |
56 | return &tick_broadcast_device; | |
57 | } | |
58 | ||
6b954823 | 59 | struct cpumask *tick_get_broadcast_mask(void) |
289f480a | 60 | { |
b352bc1c | 61 | return tick_broadcast_mask; |
289f480a IM |
62 | } |
63 | ||
f8381cba TG |
64 | /* |
65 | * Start the device in periodic mode | |
66 | */ | |
67 | static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |
68 | { | |
18de5bc4 | 69 | if (bc) |
f8381cba TG |
70 | tick_setup_periodic(bc, 1); |
71 | } | |
72 | ||
73 | /* | |
74 | * Check, if the device can be utilized as broadcast device: | |
75 | */ | |
45cb8e01 TG |
76 | static bool tick_check_broadcast_device(struct clock_event_device *curdev, |
77 | struct clock_event_device *newdev) | |
78 | { | |
79 | if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || | |
245a3496 | 80 | (newdev->features & CLOCK_EVT_FEAT_PERCPU) || |
45cb8e01 TG |
81 | (newdev->features & CLOCK_EVT_FEAT_C3STOP)) |
82 | return false; | |
83 | ||
84 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && | |
85 | !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) | |
86 | return false; | |
87 | ||
88 | return !curdev || newdev->rating > curdev->rating; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Conditionally install/replace broadcast device | |
93 | */ | |
7172a286 | 94 | void tick_install_broadcast_device(struct clock_event_device *dev) |
f8381cba | 95 | { |
6f7a05d7 TG |
96 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
97 | ||
45cb8e01 | 98 | if (!tick_check_broadcast_device(cur, dev)) |
7172a286 | 99 | return; |
45cb8e01 | 100 | |
ccf33d68 TG |
101 | if (!try_module_get(dev->owner)) |
102 | return; | |
f8381cba | 103 | |
45cb8e01 | 104 | clockevents_exchange_device(cur, dev); |
6f7a05d7 TG |
105 | if (cur) |
106 | cur->event_handler = clockevents_handle_noop; | |
f8381cba | 107 | tick_broadcast_device.evtdev = dev; |
b352bc1c | 108 | if (!cpumask_empty(tick_broadcast_mask)) |
f8381cba | 109 | tick_broadcast_start_periodic(dev); |
c038c1c4 SB |
110 | /* |
111 | * Inform all cpus about this. We might be in a situation | |
112 | * where we did not switch to oneshot mode because the per cpu | |
113 | * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack | |
114 | * of a oneshot capable broadcast device. Without that | |
115 | * notification the systems stays stuck in periodic mode | |
116 | * forever. | |
117 | */ | |
118 | if (dev->features & CLOCK_EVT_FEAT_ONESHOT) | |
119 | tick_clock_notify(); | |
f8381cba TG |
120 | } |
121 | ||
122 | /* | |
123 | * Check, if the device is the broadcast device | |
124 | */ | |
125 | int tick_is_broadcast_device(struct clock_event_device *dev) | |
126 | { | |
127 | return (dev && tick_broadcast_device.evtdev == dev); | |
128 | } | |
129 | ||
627ee794 TG |
130 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) |
131 | { | |
132 | int ret = -ENODEV; | |
133 | ||
134 | if (tick_is_broadcast_device(dev)) { | |
135 | raw_spin_lock(&tick_broadcast_lock); | |
136 | ret = __clockevents_update_freq(dev, freq); | |
137 | raw_spin_unlock(&tick_broadcast_lock); | |
138 | } | |
139 | return ret; | |
140 | } | |
141 | ||
142 | ||
12ad1000 MR |
143 | static void err_broadcast(const struct cpumask *mask) |
144 | { | |
145 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | |
146 | } | |
147 | ||
5d1d9a29 MR |
148 | static void tick_device_setup_broadcast_func(struct clock_event_device *dev) |
149 | { | |
150 | if (!dev->broadcast) | |
151 | dev->broadcast = tick_broadcast; | |
152 | if (!dev->broadcast) { | |
153 | pr_warn_once("%s depends on broadcast, but no broadcast function available\n", | |
154 | dev->name); | |
155 | dev->broadcast = err_broadcast; | |
156 | } | |
157 | } | |
158 | ||
f8381cba TG |
159 | /* |
160 | * Check, if the device is disfunctional and a place holder, which | |
161 | * needs to be handled by the broadcast device. | |
162 | */ | |
163 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |
164 | { | |
07bd1172 | 165 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba | 166 | unsigned long flags; |
e0454311 | 167 | int ret = 0; |
f8381cba | 168 | |
b5f91da0 | 169 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba TG |
170 | |
171 | /* | |
172 | * Devices might be registered with both periodic and oneshot | |
173 | * mode disabled. This signals, that the device needs to be | |
174 | * operated from the broadcast device and is a placeholder for | |
175 | * the cpu local device. | |
176 | */ | |
177 | if (!tick_device_is_functional(dev)) { | |
178 | dev->event_handler = tick_handle_periodic; | |
5d1d9a29 | 179 | tick_device_setup_broadcast_func(dev); |
b352bc1c | 180 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
a272dcca SB |
181 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
182 | tick_broadcast_start_periodic(bc); | |
183 | else | |
184 | tick_broadcast_setup_oneshot(bc); | |
f8381cba | 185 | ret = 1; |
5590a536 TG |
186 | } else { |
187 | /* | |
07bd1172 TG |
188 | * Clear the broadcast bit for this cpu if the |
189 | * device is not power state affected. | |
5590a536 | 190 | */ |
07bd1172 | 191 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
b352bc1c | 192 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd1172 | 193 | else |
5d1d9a29 | 194 | tick_device_setup_broadcast_func(dev); |
07bd1172 TG |
195 | |
196 | /* | |
197 | * Clear the broadcast bit if the CPU is not in | |
198 | * periodic broadcast on state. | |
199 | */ | |
200 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | |
201 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | |
202 | ||
203 | switch (tick_broadcast_device.mode) { | |
204 | case TICKDEV_MODE_ONESHOT: | |
205 | /* | |
206 | * If the system is in oneshot mode we can | |
207 | * unconditionally clear the oneshot mask bit, | |
208 | * because the CPU is running and therefore | |
209 | * not in an idle state which causes the power | |
210 | * state affected device to stop. Let the | |
211 | * caller initialize the device. | |
212 | */ | |
213 | tick_broadcast_clear_oneshot(cpu); | |
214 | ret = 0; | |
215 | break; | |
216 | ||
217 | case TICKDEV_MODE_PERIODIC: | |
218 | /* | |
219 | * If the system is in periodic mode, check | |
220 | * whether the broadcast device can be | |
221 | * switched off now. | |
222 | */ | |
223 | if (cpumask_empty(tick_broadcast_mask) && bc) | |
224 | clockevents_shutdown(bc); | |
225 | /* | |
226 | * If we kept the cpu in the broadcast mask, | |
227 | * tell the caller to leave the per cpu device | |
228 | * in shutdown state. The periodic interrupt | |
e0454311 TG |
229 | * is delivered by the broadcast device, if |
230 | * the broadcast device exists and is not | |
231 | * hrtimer based. | |
07bd1172 | 232 | */ |
e0454311 TG |
233 | if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER)) |
234 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | |
07bd1172 TG |
235 | break; |
236 | default: | |
07bd1172 | 237 | break; |
5590a536 TG |
238 | } |
239 | } | |
b5f91da0 | 240 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba TG |
241 | return ret; |
242 | } | |
243 | ||
12572dbb MR |
244 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
245 | int tick_receive_broadcast(void) | |
246 | { | |
247 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); | |
248 | struct clock_event_device *evt = td->evtdev; | |
249 | ||
250 | if (!evt) | |
251 | return -ENODEV; | |
252 | ||
253 | if (!evt->event_handler) | |
254 | return -EINVAL; | |
255 | ||
256 | evt->event_handler(evt); | |
257 | return 0; | |
258 | } | |
259 | #endif | |
260 | ||
f8381cba | 261 | /* |
6b954823 | 262 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
f8381cba | 263 | */ |
2951d5c0 | 264 | static bool tick_do_broadcast(struct cpumask *mask) |
f8381cba | 265 | { |
186e3cb8 | 266 | int cpu = smp_processor_id(); |
f8381cba | 267 | struct tick_device *td; |
2951d5c0 | 268 | bool local = false; |
f8381cba TG |
269 | |
270 | /* | |
271 | * Check, if the current cpu is in the mask | |
272 | */ | |
6b954823 | 273 | if (cpumask_test_cpu(cpu, mask)) { |
8eb23126 TG |
274 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
275 | ||
6b954823 | 276 | cpumask_clear_cpu(cpu, mask); |
8eb23126 TG |
277 | /* |
278 | * We only run the local handler, if the broadcast | |
279 | * device is not hrtimer based. Otherwise we run into | |
280 | * a hrtimer recursion. | |
281 | * | |
282 | * local timer_interrupt() | |
283 | * local_handler() | |
284 | * expire_hrtimers() | |
285 | * bc_handler() | |
286 | * local_handler() | |
287 | * expire_hrtimers() | |
288 | */ | |
289 | local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); | |
f8381cba TG |
290 | } |
291 | ||
6b954823 | 292 | if (!cpumask_empty(mask)) { |
f8381cba TG |
293 | /* |
294 | * It might be necessary to actually check whether the devices | |
295 | * have different broadcast functions. For now, just use the | |
296 | * one of the first device. This works as long as we have this | |
297 | * misfeature only on x86 (lapic) | |
298 | */ | |
6b954823 RR |
299 | td = &per_cpu(tick_cpu_device, cpumask_first(mask)); |
300 | td->evtdev->broadcast(mask); | |
f8381cba | 301 | } |
2951d5c0 | 302 | return local; |
f8381cba TG |
303 | } |
304 | ||
305 | /* | |
306 | * Periodic broadcast: | |
307 | * - invoke the broadcast handlers | |
308 | */ | |
2951d5c0 | 309 | static bool tick_do_periodic_broadcast(void) |
f8381cba | 310 | { |
b352bc1c | 311 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
2951d5c0 | 312 | return tick_do_broadcast(tmpmask); |
f8381cba TG |
313 | } |
314 | ||
315 | /* | |
316 | * Event handler for periodic broadcast ticks | |
317 | */ | |
318 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |
319 | { | |
2951d5c0 TG |
320 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
321 | bool bc_local; | |
d4496b39 | 322 | |
627ee794 | 323 | raw_spin_lock(&tick_broadcast_lock); |
c4288334 TG |
324 | |
325 | /* Handle spurious interrupts gracefully */ | |
326 | if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { | |
327 | raw_spin_unlock(&tick_broadcast_lock); | |
328 | return; | |
329 | } | |
330 | ||
2951d5c0 | 331 | bc_local = tick_do_periodic_broadcast(); |
627ee794 | 332 | |
472c4a94 | 333 | if (clockevent_state_oneshot(dev)) { |
b9965449 | 334 | ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC); |
f8381cba | 335 | |
2951d5c0 TG |
336 | clockevents_program_event(dev, next, true); |
337 | } | |
338 | raw_spin_unlock(&tick_broadcast_lock); | |
f8381cba TG |
339 | |
340 | /* | |
2951d5c0 TG |
341 | * We run the handler of the local cpu after dropping |
342 | * tick_broadcast_lock because the handler might deadlock when | |
343 | * trying to switch to oneshot mode. | |
f8381cba | 344 | */ |
2951d5c0 TG |
345 | if (bc_local) |
346 | td->evtdev->event_handler(td->evtdev); | |
f8381cba TG |
347 | } |
348 | ||
592a438f TG |
349 | /** |
350 | * tick_broadcast_control - Enable/disable or force broadcast mode | |
351 | * @mode: The selected broadcast mode | |
352 | * | |
353 | * Called when the system enters a state where affected tick devices | |
354 | * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. | |
f8381cba | 355 | */ |
592a438f | 356 | void tick_broadcast_control(enum tick_broadcast_mode mode) |
f8381cba TG |
357 | { |
358 | struct clock_event_device *bc, *dev; | |
359 | struct tick_device *td; | |
9c17bcda | 360 | int cpu, bc_stopped; |
202461e2 | 361 | unsigned long flags; |
f8381cba | 362 | |
202461e2 MG |
363 | /* Protects also the local clockevent device. */ |
364 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | |
592a438f | 365 | td = this_cpu_ptr(&tick_cpu_device); |
f8381cba | 366 | dev = td->evtdev; |
f8381cba TG |
367 | |
368 | /* | |
1595f452 | 369 | * Is the device not affected by the powerstate ? |
f8381cba | 370 | */ |
1595f452 | 371 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
202461e2 | 372 | goto out; |
f8381cba | 373 | |
3dfbc884 | 374 | if (!tick_device_is_functional(dev)) |
202461e2 | 375 | goto out; |
1595f452 | 376 | |
592a438f TG |
377 | cpu = smp_processor_id(); |
378 | bc = tick_broadcast_device.evtdev; | |
b352bc1c | 379 | bc_stopped = cpumask_empty(tick_broadcast_mask); |
9c17bcda | 380 | |
592a438f TG |
381 | switch (mode) { |
382 | case TICK_BROADCAST_FORCE: | |
383 | tick_broadcast_forced = 1; | |
df561f66 | 384 | fallthrough; |
592a438f | 385 | case TICK_BROADCAST_ON: |
07bd1172 | 386 | cpumask_set_cpu(cpu, tick_broadcast_on); |
b352bc1c | 387 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
e0454311 TG |
388 | /* |
389 | * Only shutdown the cpu local device, if: | |
390 | * | |
391 | * - the broadcast device exists | |
392 | * - the broadcast device is not a hrtimer based one | |
393 | * - the broadcast device is in periodic mode to | |
394 | * avoid a hickup during switch to oneshot mode | |
395 | */ | |
396 | if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && | |
397 | tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
2344abbc | 398 | clockevents_shutdown(dev); |
f8381cba | 399 | } |
1595f452 | 400 | break; |
592a438f TG |
401 | |
402 | case TICK_BROADCAST_OFF: | |
403 | if (tick_broadcast_forced) | |
07bd1172 TG |
404 | break; |
405 | cpumask_clear_cpu(cpu, tick_broadcast_on); | |
07bd1172 | 406 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { |
07454bff TG |
407 | if (tick_broadcast_device.mode == |
408 | TICKDEV_MODE_PERIODIC) | |
f8381cba TG |
409 | tick_setup_periodic(dev, 0); |
410 | } | |
1595f452 | 411 | break; |
f8381cba TG |
412 | } |
413 | ||
c4d029f2 TG |
414 | if (bc) { |
415 | if (cpumask_empty(tick_broadcast_mask)) { | |
416 | if (!bc_stopped) | |
417 | clockevents_shutdown(bc); | |
418 | } else if (bc_stopped) { | |
419 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | |
420 | tick_broadcast_start_periodic(bc); | |
421 | else | |
422 | tick_broadcast_setup_oneshot(bc); | |
423 | } | |
f8381cba | 424 | } |
202461e2 MG |
425 | out: |
426 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
f8381cba | 427 | } |
592a438f | 428 | EXPORT_SYMBOL_GPL(tick_broadcast_control); |
f8381cba TG |
429 | |
430 | /* | |
431 | * Set the periodic handler depending on broadcast on/off | |
432 | */ | |
433 | void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) | |
434 | { | |
435 | if (!broadcast) | |
436 | dev->event_handler = tick_handle_periodic; | |
437 | else | |
438 | dev->event_handler = tick_handle_periodic_broadcast; | |
439 | } | |
440 | ||
a49b116d | 441 | #ifdef CONFIG_HOTPLUG_CPU |
1b72d432 | 442 | static void tick_shutdown_broadcast(void) |
f8381cba | 443 | { |
1b72d432 | 444 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba TG |
445 | |
446 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | |
b352bc1c | 447 | if (bc && cpumask_empty(tick_broadcast_mask)) |
2344abbc | 448 | clockevents_shutdown(bc); |
f8381cba | 449 | } |
1b72d432 | 450 | } |
f8381cba | 451 | |
1b72d432 TG |
452 | /* |
453 | * Remove a CPU from broadcasting | |
454 | */ | |
455 | void tick_broadcast_offline(unsigned int cpu) | |
456 | { | |
457 | raw_spin_lock(&tick_broadcast_lock); | |
458 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | |
459 | cpumask_clear_cpu(cpu, tick_broadcast_on); | |
460 | tick_broadcast_oneshot_offline(cpu); | |
461 | tick_shutdown_broadcast(); | |
462 | raw_spin_unlock(&tick_broadcast_lock); | |
f8381cba | 463 | } |
1b72d432 | 464 | |
a49b116d | 465 | #endif |
79bf2bb3 | 466 | |
6321dd60 TG |
467 | void tick_suspend_broadcast(void) |
468 | { | |
469 | struct clock_event_device *bc; | |
470 | unsigned long flags; | |
471 | ||
b5f91da0 | 472 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
473 | |
474 | bc = tick_broadcast_device.evtdev; | |
18de5bc4 | 475 | if (bc) |
2344abbc | 476 | clockevents_shutdown(bc); |
6321dd60 | 477 | |
b5f91da0 | 478 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
479 | } |
480 | ||
f46481d0 TG |
481 | /* |
482 | * This is called from tick_resume_local() on a resuming CPU. That's | |
483 | * called from the core resume function, tick_unfreeze() and the magic XEN | |
484 | * resume hackery. | |
485 | * | |
486 | * In none of these cases the broadcast device mode can change and the | |
487 | * bit of the resuming CPU in the broadcast mask is safe as well. | |
488 | */ | |
489 | bool tick_resume_check_broadcast(void) | |
490 | { | |
491 | if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) | |
492 | return false; | |
493 | else | |
494 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); | |
495 | } | |
496 | ||
497 | void tick_resume_broadcast(void) | |
6321dd60 TG |
498 | { |
499 | struct clock_event_device *bc; | |
500 | unsigned long flags; | |
6321dd60 | 501 | |
b5f91da0 | 502 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60 TG |
503 | |
504 | bc = tick_broadcast_device.evtdev; | |
6321dd60 | 505 | |
cd05a1f8 | 506 | if (bc) { |
554ef387 | 507 | clockevents_tick_resume(bc); |
18de5bc4 | 508 | |
cd05a1f8 TG |
509 | switch (tick_broadcast_device.mode) { |
510 | case TICKDEV_MODE_PERIODIC: | |
b352bc1c | 511 | if (!cpumask_empty(tick_broadcast_mask)) |
cd05a1f8 | 512 | tick_broadcast_start_periodic(bc); |
cd05a1f8 TG |
513 | break; |
514 | case TICKDEV_MODE_ONESHOT: | |
b352bc1c | 515 | if (!cpumask_empty(tick_broadcast_mask)) |
080873ce | 516 | tick_resume_broadcast_oneshot(bc); |
cd05a1f8 TG |
517 | break; |
518 | } | |
6321dd60 | 519 | } |
b5f91da0 | 520 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60 TG |
521 | } |
522 | ||
79bf2bb3 TG |
523 | #ifdef CONFIG_TICK_ONESHOT |
524 | ||
668802c2 WL |
525 | static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly; |
526 | static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly; | |
527 | static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly; | |
79bf2bb3 | 528 | |
289f480a | 529 | /* |
6b954823 | 530 | * Exposed for debugging: see timer_list.c |
289f480a | 531 | */ |
6b954823 | 532 | struct cpumask *tick_get_broadcast_oneshot_mask(void) |
289f480a | 533 | { |
b352bc1c | 534 | return tick_broadcast_oneshot_mask; |
289f480a IM |
535 | } |
536 | ||
eaa907c5 TG |
537 | /* |
538 | * Called before going idle with interrupts disabled. Checks whether a | |
539 | * broadcast event from the other core is about to happen. We detected | |
540 | * that in tick_broadcast_oneshot_control(). The callsite can use this | |
541 | * to avoid a deep idle transition as we are about to get the | |
542 | * broadcast IPI right away. | |
543 | */ | |
544 | int tick_check_broadcast_expired(void) | |
545 | { | |
546 | return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); | |
547 | } | |
548 | ||
d2348fb6 DL |
549 | /* |
550 | * Set broadcast interrupt affinity | |
551 | */ | |
552 | static void tick_broadcast_set_affinity(struct clock_event_device *bc, | |
553 | const struct cpumask *cpumask) | |
554 | { | |
555 | if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) | |
556 | return; | |
557 | ||
558 | if (cpumask_equal(bc->cpumask, cpumask)) | |
559 | return; | |
560 | ||
561 | bc->cpumask = cpumask; | |
562 | irq_set_affinity(bc->irq, bc->cpumask); | |
563 | } | |
564 | ||
298dbd1c TG |
565 | static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, |
566 | ktime_t expires) | |
79bf2bb3 | 567 | { |
472c4a94 | 568 | if (!clockevent_state_oneshot(bc)) |
d7eb231c | 569 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b9a6a235 | 570 | |
298dbd1c TG |
571 | clockevents_program_event(bc, expires, 1); |
572 | tick_broadcast_set_affinity(bc, cpumask_of(cpu)); | |
79bf2bb3 TG |
573 | } |
574 | ||
080873ce | 575 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
cd05a1f8 | 576 | { |
d7eb231c | 577 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
cd05a1f8 TG |
578 | } |
579 | ||
fb02fbc1 TG |
580 | /* |
581 | * Called from irq_enter() when idle was interrupted to reenable the | |
582 | * per cpu device. | |
583 | */ | |
e8fcaa5c | 584 | void tick_check_oneshot_broadcast_this_cpu(void) |
fb02fbc1 | 585 | { |
e8fcaa5c | 586 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
22127e93 | 587 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
fb02fbc1 | 588 | |
1f73a980 TG |
589 | /* |
590 | * We might be in the middle of switching over from | |
591 | * periodic to oneshot. If the CPU has not yet | |
592 | * switched over, leave the device alone. | |
593 | */ | |
594 | if (td->mode == TICKDEV_MODE_ONESHOT) { | |
d7eb231c | 595 | clockevents_switch_state(td->evtdev, |
77e32c89 | 596 | CLOCK_EVT_STATE_ONESHOT); |
1f73a980 | 597 | } |
fb02fbc1 TG |
598 | } |
599 | } | |
600 | ||
79bf2bb3 TG |
601 | /* |
602 | * Handle oneshot mode broadcasting | |
603 | */ | |
604 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |
605 | { | |
606 | struct tick_device *td; | |
cdc6f27d | 607 | ktime_t now, next_event; |
d2348fb6 | 608 | int cpu, next_cpu = 0; |
298dbd1c | 609 | bool bc_local; |
79bf2bb3 | 610 | |
b5f91da0 | 611 | raw_spin_lock(&tick_broadcast_lock); |
2456e855 TG |
612 | dev->next_event = KTIME_MAX; |
613 | next_event = KTIME_MAX; | |
b352bc1c | 614 | cpumask_clear(tmpmask); |
79bf2bb3 TG |
615 | now = ktime_get(); |
616 | /* Find all expired events */ | |
b352bc1c | 617 | for_each_cpu(cpu, tick_broadcast_oneshot_mask) { |
5596fe34 DC |
618 | /* |
619 | * Required for !SMP because for_each_cpu() reports | |
620 | * unconditionally CPU0 as set on UP kernels. | |
621 | */ | |
622 | if (!IS_ENABLED(CONFIG_SMP) && | |
623 | cpumask_empty(tick_broadcast_oneshot_mask)) | |
624 | break; | |
625 | ||
79bf2bb3 | 626 | td = &per_cpu(tick_cpu_device, cpu); |
2456e855 | 627 | if (td->evtdev->next_event <= now) { |
b352bc1c | 628 | cpumask_set_cpu(cpu, tmpmask); |
26517f3e TG |
629 | /* |
630 | * Mark the remote cpu in the pending mask, so | |
631 | * it can avoid reprogramming the cpu local | |
632 | * timer in tick_broadcast_oneshot_control(). | |
633 | */ | |
634 | cpumask_set_cpu(cpu, tick_broadcast_pending_mask); | |
2456e855 TG |
635 | } else if (td->evtdev->next_event < next_event) { |
636 | next_event = td->evtdev->next_event; | |
d2348fb6 DL |
637 | next_cpu = cpu; |
638 | } | |
79bf2bb3 TG |
639 | } |
640 | ||
2938d275 TG |
641 | /* |
642 | * Remove the current cpu from the pending mask. The event is | |
643 | * delivered immediately in tick_do_broadcast() ! | |
644 | */ | |
645 | cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask); | |
646 | ||
989dcb64 TG |
647 | /* Take care of enforced broadcast requests */ |
648 | cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); | |
649 | cpumask_clear(tick_broadcast_force_mask); | |
650 | ||
c9b5a266 TG |
651 | /* |
652 | * Sanity check. Catch the case where we try to broadcast to | |
653 | * offline cpus. | |
654 | */ | |
655 | if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) | |
656 | cpumask_and(tmpmask, tmpmask, cpu_online_mask); | |
657 | ||
79bf2bb3 | 658 | /* |
298dbd1c | 659 | * Wakeup the cpus which have an expired event. |
cdc6f27d | 660 | */ |
298dbd1c | 661 | bc_local = tick_do_broadcast(tmpmask); |
cdc6f27d TG |
662 | |
663 | /* | |
664 | * Two reasons for reprogram: | |
665 | * | |
666 | * - The global event did not expire any CPU local | |
667 | * events. This happens in dyntick mode, as the maximum PIT | |
668 | * delta is quite small. | |
669 | * | |
670 | * - There are pending events on sleeping CPUs which were not | |
671 | * in the event mask | |
79bf2bb3 | 672 | */ |
2456e855 | 673 | if (next_event != KTIME_MAX) |
298dbd1c TG |
674 | tick_broadcast_set_event(dev, next_cpu, next_event); |
675 | ||
b5f91da0 | 676 | raw_spin_unlock(&tick_broadcast_lock); |
298dbd1c TG |
677 | |
678 | if (bc_local) { | |
679 | td = this_cpu_ptr(&tick_cpu_device); | |
680 | td->evtdev->event_handler(td->evtdev); | |
681 | } | |
79bf2bb3 TG |
682 | } |
683 | ||
5d1638ac PM |
684 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) |
685 | { | |
686 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
687 | return 0; | |
2456e855 | 688 | if (bc->next_event == KTIME_MAX) |
5d1638ac PM |
689 | return 0; |
690 | return bc->bound_on == cpu ? -EBUSY : 0; | |
691 | } | |
692 | ||
693 | static void broadcast_shutdown_local(struct clock_event_device *bc, | |
694 | struct clock_event_device *dev) | |
695 | { | |
696 | /* | |
697 | * For hrtimer based broadcasting we cannot shutdown the cpu | |
698 | * local device if our own event is the first one to expire or | |
699 | * if we own the broadcast timer. | |
700 | */ | |
701 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | |
702 | if (broadcast_needs_cpu(bc, smp_processor_id())) | |
703 | return; | |
2456e855 | 704 | if (dev->next_event < bc->next_event) |
5d1638ac PM |
705 | return; |
706 | } | |
d7eb231c | 707 | clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
5d1638ac PM |
708 | } |
709 | ||
f32dd117 | 710 | int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
79bf2bb3 TG |
711 | { |
712 | struct clock_event_device *bc, *dev; | |
da7e6f45 | 713 | int cpu, ret = 0; |
1fe5d5c3 | 714 | ktime_t now; |
79bf2bb3 | 715 | |
b78f3f3c TG |
716 | /* |
717 | * If there is no broadcast device, tell the caller not to go | |
718 | * into deep idle. | |
719 | */ | |
720 | if (!tick_broadcast_device.evtdev) | |
721 | return -EBUSY; | |
722 | ||
e3ac79e0 | 723 | dev = this_cpu_ptr(&tick_cpu_device)->evtdev; |
79bf2bb3 | 724 | |
1fe5d5c3 | 725 | raw_spin_lock(&tick_broadcast_lock); |
7372b0b1 | 726 | bc = tick_broadcast_device.evtdev; |
1fe5d5c3 | 727 | cpu = smp_processor_id(); |
79bf2bb3 | 728 | |
1fe5d5c3 | 729 | if (state == TICK_BROADCAST_ENTER) { |
d5113e13 TG |
730 | /* |
731 | * If the current CPU owns the hrtimer broadcast | |
732 | * mechanism, it cannot go deep idle and we do not add | |
733 | * the CPU to the broadcast mask. We don't have to go | |
734 | * through the EXIT path as the local timer is not | |
735 | * shutdown. | |
736 | */ | |
737 | ret = broadcast_needs_cpu(bc, cpu); | |
738 | if (ret) | |
739 | goto out; | |
740 | ||
e3ac79e0 TG |
741 | /* |
742 | * If the broadcast device is in periodic mode, we | |
743 | * return. | |
744 | */ | |
d3325726 TG |
745 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
746 | /* If it is a hrtimer based broadcast, return busy */ | |
747 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) | |
748 | ret = -EBUSY; | |
e3ac79e0 | 749 | goto out; |
d3325726 | 750 | } |
e3ac79e0 | 751 | |
b352bc1c | 752 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
2938d275 | 753 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
d5113e13 TG |
754 | |
755 | /* Conditionally shut down the local timer. */ | |
5d1638ac | 756 | broadcast_shutdown_local(bc, dev); |
d5113e13 | 757 | |
989dcb64 TG |
758 | /* |
759 | * We only reprogram the broadcast timer if we | |
760 | * did not mark ourself in the force mask and | |
761 | * if the cpu local event is earlier than the | |
762 | * broadcast event. If the current CPU is in | |
763 | * the force mask, then we are going to be | |
0cc5281a TG |
764 | * woken by the IPI right away; we return |
765 | * busy, so the CPU does not try to go deep | |
766 | * idle. | |
989dcb64 | 767 | */ |
0cc5281a TG |
768 | if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { |
769 | ret = -EBUSY; | |
2456e855 | 770 | } else if (dev->next_event < bc->next_event) { |
298dbd1c | 771 | tick_broadcast_set_event(bc, cpu, dev->next_event); |
d5113e13 TG |
772 | /* |
773 | * In case of hrtimer broadcasts the | |
774 | * programming might have moved the | |
775 | * timer to this cpu. If yes, remove | |
776 | * us from the broadcast mask and | |
777 | * return busy. | |
778 | */ | |
779 | ret = broadcast_needs_cpu(bc, cpu); | |
780 | if (ret) { | |
781 | cpumask_clear_cpu(cpu, | |
782 | tick_broadcast_oneshot_mask); | |
783 | } | |
0cc5281a | 784 | } |
79bf2bb3 TG |
785 | } |
786 | } else { | |
b352bc1c | 787 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
d7eb231c | 788 | clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); |
26517f3e TG |
789 | /* |
790 | * The cpu which was handling the broadcast | |
791 | * timer marked this cpu in the broadcast | |
792 | * pending mask and fired the broadcast | |
793 | * IPI. So we are going to handle the expired | |
794 | * event anyway via the broadcast IPI | |
795 | * handler. No need to reprogram the timer | |
796 | * with an already expired event. | |
797 | */ | |
798 | if (cpumask_test_and_clear_cpu(cpu, | |
799 | tick_broadcast_pending_mask)) | |
800 | goto out; | |
801 | ||
ea8deb8d DL |
802 | /* |
803 | * Bail out if there is no next event. | |
804 | */ | |
2456e855 | 805 | if (dev->next_event == KTIME_MAX) |
ea8deb8d | 806 | goto out; |
989dcb64 TG |
807 | /* |
808 | * If the pending bit is not set, then we are | |
809 | * either the CPU handling the broadcast | |
810 | * interrupt or we got woken by something else. | |
811 | * | |
13e792a1 | 812 | * We are no longer in the broadcast mask, so |
989dcb64 TG |
813 | * if the cpu local expiry time is already |
814 | * reached, we would reprogram the cpu local | |
815 | * timer with an already expired event. | |
816 | * | |
817 | * This can lead to a ping-pong when we return | |
13e792a1 | 818 | * to idle and therefore rearm the broadcast |
989dcb64 TG |
819 | * timer before the cpu local timer was able |
820 | * to fire. This happens because the forced | |
821 | * reprogramming makes sure that the event | |
822 | * will happen in the future and depending on | |
823 | * the min_delta setting this might be far | |
824 | * enough out that the ping-pong starts. | |
825 | * | |
826 | * If the cpu local next_event has expired | |
827 | * then we know that the broadcast timer | |
828 | * next_event has expired as well and | |
829 | * broadcast is about to be handled. So we | |
830 | * avoid reprogramming and enforce that the | |
831 | * broadcast handler, which did not run yet, | |
832 | * will invoke the cpu local handler. | |
833 | * | |
834 | * We cannot call the handler directly from | |
835 | * here, because we might be in a NOHZ phase | |
836 | * and we did not go through the irq_enter() | |
837 | * nohz fixups. | |
838 | */ | |
839 | now = ktime_get(); | |
2456e855 | 840 | if (dev->next_event <= now) { |
989dcb64 TG |
841 | cpumask_set_cpu(cpu, tick_broadcast_force_mask); |
842 | goto out; | |
843 | } | |
844 | /* | |
845 | * We got woken by something else. Reprogram | |
846 | * the cpu local timer device. | |
847 | */ | |
26517f3e | 848 | tick_program_event(dev->next_event, 1); |
79bf2bb3 TG |
849 | } |
850 | } | |
26517f3e | 851 | out: |
1fe5d5c3 | 852 | raw_spin_unlock(&tick_broadcast_lock); |
da7e6f45 | 853 | return ret; |
79bf2bb3 TG |
854 | } |
855 | ||
5590a536 TG |
856 | /* |
857 | * Reset the one shot broadcast for a cpu | |
858 | * | |
859 | * Called with tick_broadcast_lock held | |
860 | */ | |
861 | static void tick_broadcast_clear_oneshot(int cpu) | |
862 | { | |
b352bc1c | 863 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
dd5fd9b9 | 864 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
5590a536 TG |
865 | } |
866 | ||
6b954823 RR |
867 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
868 | ktime_t expires) | |
7300711e TG |
869 | { |
870 | struct tick_device *td; | |
871 | int cpu; | |
872 | ||
5db0e1e9 | 873 | for_each_cpu(cpu, mask) { |
7300711e TG |
874 | td = &per_cpu(tick_cpu_device, cpu); |
875 | if (td->evtdev) | |
876 | td->evtdev->next_event = expires; | |
877 | } | |
878 | } | |
879 | ||
f73f64d5 TG |
880 | static inline ktime_t tick_get_next_period(void) |
881 | { | |
882 | ktime_t next; | |
883 | ||
884 | /* | |
885 | * Protect against concurrent updates (store /load tearing on | |
886 | * 32bit). It does not matter if the time is already in the | |
887 | * past. The broadcast device which is about to be programmed will | |
888 | * fire in any case. | |
889 | */ | |
890 | raw_spin_lock(&jiffies_lock); | |
891 | next = tick_next_period; | |
892 | raw_spin_unlock(&jiffies_lock); | |
893 | return next; | |
894 | } | |
895 | ||
79bf2bb3 | 896 | /** |
8dce39c2 | 897 | * tick_broadcast_setup_oneshot - setup the broadcast device |
79bf2bb3 | 898 | */ |
94114c36 | 899 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
79bf2bb3 | 900 | { |
07f4beb0 TG |
901 | int cpu = smp_processor_id(); |
902 | ||
c1a9eeb9 TG |
903 | if (!bc) |
904 | return; | |
905 | ||
9c17bcda TG |
906 | /* Set it up only once ! */ |
907 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | |
472c4a94 | 908 | int was_periodic = clockevent_state_periodic(bc); |
7300711e | 909 | |
9c17bcda | 910 | bc->event_handler = tick_handle_oneshot_broadcast; |
7300711e | 911 | |
7300711e TG |
912 | /* |
913 | * We must be careful here. There might be other CPUs | |
914 | * waiting for periodic broadcast. We need to set the | |
915 | * oneshot_mask bits for those and program the | |
916 | * broadcast device to fire. | |
917 | */ | |
b352bc1c TG |
918 | cpumask_copy(tmpmask, tick_broadcast_mask); |
919 | cpumask_clear_cpu(cpu, tmpmask); | |
920 | cpumask_or(tick_broadcast_oneshot_mask, | |
921 | tick_broadcast_oneshot_mask, tmpmask); | |
6b954823 | 922 | |
b352bc1c | 923 | if (was_periodic && !cpumask_empty(tmpmask)) { |
f73f64d5 TG |
924 | ktime_t nextevt = tick_get_next_period(); |
925 | ||
d7eb231c | 926 | clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
f73f64d5 TG |
927 | tick_broadcast_init_next_event(tmpmask, nextevt); |
928 | tick_broadcast_set_event(bc, cpu, nextevt); | |
7300711e | 929 | } else |
2456e855 | 930 | bc->next_event = KTIME_MAX; |
07f4beb0 TG |
931 | } else { |
932 | /* | |
933 | * The first cpu which switches to oneshot mode sets | |
934 | * the bit for all other cpus which are in the general | |
935 | * (periodic) broadcast mask. So the bit is set and | |
936 | * would prevent the first broadcast enter after this | |
937 | * to program the bc device. | |
938 | */ | |
939 | tick_broadcast_clear_oneshot(cpu); | |
9c17bcda | 940 | } |
79bf2bb3 TG |
941 | } |
942 | ||
943 | /* | |
944 | * Select oneshot operating mode for the broadcast device | |
945 | */ | |
946 | void tick_broadcast_switch_to_oneshot(void) | |
947 | { | |
948 | struct clock_event_device *bc; | |
949 | unsigned long flags; | |
950 | ||
b5f91da0 | 951 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
fa4da365 SS |
952 | |
953 | tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; | |
79bf2bb3 TG |
954 | bc = tick_broadcast_device.evtdev; |
955 | if (bc) | |
956 | tick_broadcast_setup_oneshot(bc); | |
77b0d60c | 957 | |
b5f91da0 | 958 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb3 TG |
959 | } |
960 | ||
a49b116d TG |
961 | #ifdef CONFIG_HOTPLUG_CPU |
962 | void hotplug_cpu__broadcast_tick_pull(int deadcpu) | |
963 | { | |
964 | struct clock_event_device *bc; | |
965 | unsigned long flags; | |
966 | ||
967 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | |
968 | bc = tick_broadcast_device.evtdev; | |
969 | ||
970 | if (bc && broadcast_needs_cpu(bc, deadcpu)) { | |
971 | /* This moves the broadcast assignment to this CPU: */ | |
972 | clockevents_program_event(bc, bc->next_event, 1); | |
973 | } | |
974 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | |
975 | } | |
79bf2bb3 TG |
976 | |
977 | /* | |
1b72d432 | 978 | * Remove a dying CPU from broadcasting |
79bf2bb3 | 979 | */ |
1b72d432 | 980 | static void tick_broadcast_oneshot_offline(unsigned int cpu) |
79bf2bb3 | 981 | { |
31d9b393 | 982 | /* |
c9b5a266 TG |
983 | * Clear the broadcast masks for the dead cpu, but do not stop |
984 | * the broadcast device! | |
31d9b393 | 985 | */ |
b352bc1c | 986 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
c9b5a266 TG |
987 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
988 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | |
79bf2bb3 | 989 | } |
a49b116d | 990 | #endif |
79bf2bb3 | 991 | |
27ce4cb4 TG |
992 | /* |
993 | * Check, whether the broadcast device is in one shot mode | |
994 | */ | |
995 | int tick_broadcast_oneshot_active(void) | |
996 | { | |
997 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | |
998 | } | |
999 | ||
3a142a06 TG |
1000 | /* |
1001 | * Check whether the broadcast device supports oneshot. | |
1002 | */ | |
1003 | bool tick_broadcast_oneshot_available(void) | |
1004 | { | |
1005 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
1006 | ||
1007 | return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; | |
1008 | } | |
1009 | ||
f32dd117 TG |
1010 | #else |
1011 | int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) | |
1012 | { | |
1013 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | |
1014 | ||
1015 | if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER)) | |
1016 | return -EBUSY; | |
1017 | ||
1018 | return 0; | |
1019 | } | |
79bf2bb3 | 1020 | #endif |
b352bc1c TG |
1021 | |
1022 | void __init tick_broadcast_init(void) | |
1023 | { | |
fbd44a60 | 1024 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
07bd1172 | 1025 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); |
fbd44a60 | 1026 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
b352bc1c | 1027 | #ifdef CONFIG_TICK_ONESHOT |
fbd44a60 TG |
1028 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
1029 | zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); | |
1030 | zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); | |
b352bc1c TG |
1031 | #endif |
1032 | } |