]>
Commit | Line | Data |
---|---|---|
52a65ff5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
a34db9b2 IM |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
4 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
5 | * |
6 | * This file contains driver APIs to the irq subsystem. | |
7 | */ | |
8 | ||
97fd75b7 AM |
9 | #define pr_fmt(fmt) "genirq: " fmt |
10 | ||
1da177e4 | 11 | #include <linux/irq.h> |
3aa551c9 | 12 | #include <linux/kthread.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/random.h> | |
15 | #include <linux/interrupt.h> | |
4001d8e8 | 16 | #include <linux/irqdomain.h> |
1aeb272c | 17 | #include <linux/slab.h> |
3aa551c9 | 18 | #include <linux/sched.h> |
8bd75c77 | 19 | #include <linux/sched/rt.h> |
0881e7bd | 20 | #include <linux/sched/task.h> |
ae7e81c0 | 21 | #include <uapi/linux/sched/types.h> |
4d1d61a6 | 22 | #include <linux/task_work.h> |
1da177e4 LT |
23 | |
24 | #include "internals.h" | |
25 | ||
b6a32bbd | 26 | #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) |
8d32a307 | 27 | __read_mostly bool force_irqthreads; |
47b82e88 | 28 | EXPORT_SYMBOL_GPL(force_irqthreads); |
8d32a307 TG |
29 | |
30 | static int __init setup_forced_irqthreads(char *arg) | |
31 | { | |
32 | force_irqthreads = true; | |
33 | return 0; | |
34 | } | |
35 | early_param("threadirqs", setup_forced_irqthreads); | |
36 | #endif | |
37 | ||
62e04686 | 38 | static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) |
1da177e4 | 39 | { |
62e04686 | 40 | struct irq_data *irqd = irq_desc_get_irq_data(desc); |
32f4125e | 41 | bool inprogress; |
1da177e4 | 42 | |
a98ce5c6 HX |
43 | do { |
44 | unsigned long flags; | |
45 | ||
46 | /* | |
47 | * Wait until we're out of the critical section. This might | |
48 | * give the wrong answer due to the lack of memory barriers. | |
49 | */ | |
32f4125e | 50 | while (irqd_irq_inprogress(&desc->irq_data)) |
a98ce5c6 HX |
51 | cpu_relax(); |
52 | ||
53 | /* Ok, that indicated we're done: double-check carefully. */ | |
239007b8 | 54 | raw_spin_lock_irqsave(&desc->lock, flags); |
32f4125e | 55 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
62e04686 TG |
56 | |
57 | /* | |
58 | * If requested and supported, check at the chip whether it | |
59 | * is in flight at the hardware level, i.e. already pending | |
60 | * in a CPU and waiting for service and acknowledge. | |
61 | */ | |
62 | if (!inprogress && sync_chip) { | |
63 | /* | |
64 | * Ignore the return code. inprogress is only updated | |
65 | * when the chip supports it. | |
66 | */ | |
67 | __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, | |
68 | &inprogress); | |
69 | } | |
239007b8 | 70 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
a98ce5c6 HX |
71 | |
72 | /* Oops, that failed? */ | |
32f4125e | 73 | } while (inprogress); |
18258f72 TG |
74 | } |
75 | ||
76 | /** | |
77 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) | |
78 | * @irq: interrupt number to wait for | |
79 | * | |
80 | * This function waits for any pending hard IRQ handlers for this | |
81 | * interrupt to complete before returning. If you use this | |
82 | * function while holding a resource the IRQ handler may need you | |
83 | * will deadlock. It does not take associated threaded handlers | |
84 | * into account. | |
85 | * | |
86 | * Do not use this for shutdown scenarios where you must be sure | |
87 | * that all parts (hardirq and threaded handler) have completed. | |
88 | * | |
02cea395 PZ |
89 | * Returns: false if a threaded handler is active. |
90 | * | |
18258f72 | 91 | * This function may be called - with care - from IRQ context. |
62e04686 TG |
92 | * |
93 | * It does not check whether there is an interrupt in flight at the | |
94 | * hardware level, but not serviced yet, as this might deadlock when | |
95 | * called with interrupts disabled and the target CPU of the interrupt | |
96 | * is the current CPU. | |
18258f72 | 97 | */ |
02cea395 | 98 | bool synchronize_hardirq(unsigned int irq) |
18258f72 TG |
99 | { |
100 | struct irq_desc *desc = irq_to_desc(irq); | |
3aa551c9 | 101 | |
02cea395 | 102 | if (desc) { |
62e04686 | 103 | __synchronize_hardirq(desc, false); |
02cea395 PZ |
104 | return !atomic_read(&desc->threads_active); |
105 | } | |
106 | ||
107 | return true; | |
18258f72 TG |
108 | } |
109 | EXPORT_SYMBOL(synchronize_hardirq); | |
110 | ||
111 | /** | |
112 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
113 | * @irq: interrupt number to wait for | |
114 | * | |
115 | * This function waits for any pending IRQ handlers for this interrupt | |
116 | * to complete before returning. If you use this function while | |
117 | * holding a resource the IRQ handler may need you will deadlock. | |
118 | * | |
1d21f2af TG |
119 | * Can only be called from preemptible code as it might sleep when |
120 | * an interrupt thread is associated to @irq. | |
62e04686 TG |
121 | * |
122 | * It optionally makes sure (when the irq chip supports that method) | |
123 | * that the interrupt is not pending in any CPU and waiting for | |
124 | * service. | |
18258f72 TG |
125 | */ |
126 | void synchronize_irq(unsigned int irq) | |
127 | { | |
128 | struct irq_desc *desc = irq_to_desc(irq); | |
129 | ||
130 | if (desc) { | |
62e04686 | 131 | __synchronize_hardirq(desc, true); |
18258f72 TG |
132 | /* |
133 | * We made sure that no hardirq handler is | |
134 | * running. Now verify that no threaded handlers are | |
135 | * active. | |
136 | */ | |
137 | wait_event(desc->wait_for_threads, | |
138 | !atomic_read(&desc->threads_active)); | |
139 | } | |
1da177e4 | 140 | } |
1da177e4 LT |
141 | EXPORT_SYMBOL(synchronize_irq); |
142 | ||
3aa551c9 TG |
143 | #ifdef CONFIG_SMP |
144 | cpumask_var_t irq_default_affinity; | |
145 | ||
9c255583 | 146 | static bool __irq_can_set_affinity(struct irq_desc *desc) |
e019c249 JL |
147 | { |
148 | if (!desc || !irqd_can_balance(&desc->irq_data) || | |
149 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | |
9c255583 TG |
150 | return false; |
151 | return true; | |
e019c249 JL |
152 | } |
153 | ||
771ee3b0 TG |
154 | /** |
155 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
156 | * @irq: Interrupt to check | |
157 | * | |
158 | */ | |
159 | int irq_can_set_affinity(unsigned int irq) | |
160 | { | |
e019c249 | 161 | return __irq_can_set_affinity(irq_to_desc(irq)); |
771ee3b0 TG |
162 | } |
163 | ||
9c255583 TG |
164 | /** |
165 | * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space | |
166 | * @irq: Interrupt to check | |
167 | * | |
168 | * Like irq_can_set_affinity() above, but additionally checks for the | |
169 | * AFFINITY_MANAGED flag. | |
170 | */ | |
171 | bool irq_can_set_affinity_usr(unsigned int irq) | |
172 | { | |
173 | struct irq_desc *desc = irq_to_desc(irq); | |
174 | ||
175 | return __irq_can_set_affinity(desc) && | |
176 | !irqd_affinity_is_managed(&desc->irq_data); | |
177 | } | |
178 | ||
591d2fb0 TG |
179 | /** |
180 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | |
181 | * @desc: irq descriptor which has affitnity changed | |
182 | * | |
183 | * We just set IRQTF_AFFINITY and delegate the affinity setting | |
184 | * to the interrupt thread itself. We can not call | |
185 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | |
186 | * code can be called from hard interrupt context. | |
187 | */ | |
188 | void irq_set_thread_affinity(struct irq_desc *desc) | |
3aa551c9 | 189 | { |
f944b5a7 | 190 | struct irqaction *action; |
3aa551c9 | 191 | |
f944b5a7 | 192 | for_each_action_of_desc(desc, action) |
3aa551c9 | 193 | if (action->thread) |
591d2fb0 | 194 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
3aa551c9 TG |
195 | } |
196 | ||
19e1d4e9 TG |
197 | static void irq_validate_effective_affinity(struct irq_data *data) |
198 | { | |
199 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | |
200 | const struct cpumask *m = irq_data_get_effective_affinity_mask(data); | |
201 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
202 | ||
203 | if (!cpumask_empty(m)) | |
204 | return; | |
205 | pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", | |
206 | chip->name, data->irq); | |
207 | #endif | |
208 | } | |
209 | ||
818b0f3b JL |
210 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
211 | bool force) | |
212 | { | |
213 | struct irq_desc *desc = irq_data_to_desc(data); | |
214 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
215 | int ret; | |
216 | ||
e43b3b58 TG |
217 | if (!chip || !chip->irq_set_affinity) |
218 | return -EINVAL; | |
219 | ||
01f8fa4f | 220 | ret = chip->irq_set_affinity(data, mask, force); |
818b0f3b JL |
221 | switch (ret) { |
222 | case IRQ_SET_MASK_OK: | |
2cb62547 | 223 | case IRQ_SET_MASK_OK_DONE: |
9df872fa | 224 | cpumask_copy(desc->irq_common_data.affinity, mask); |
93417a3f | 225 | /* fall through */ |
818b0f3b | 226 | case IRQ_SET_MASK_OK_NOCOPY: |
19e1d4e9 | 227 | irq_validate_effective_affinity(data); |
818b0f3b JL |
228 | irq_set_thread_affinity(desc); |
229 | ret = 0; | |
230 | } | |
231 | ||
232 | return ret; | |
233 | } | |
234 | ||
12f47073 TG |
235 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
236 | static inline int irq_set_affinity_pending(struct irq_data *data, | |
237 | const struct cpumask *dest) | |
238 | { | |
239 | struct irq_desc *desc = irq_data_to_desc(data); | |
240 | ||
241 | irqd_set_move_pending(data); | |
242 | irq_copy_pending(desc, dest); | |
243 | return 0; | |
244 | } | |
245 | #else | |
246 | static inline int irq_set_affinity_pending(struct irq_data *data, | |
247 | const struct cpumask *dest) | |
248 | { | |
249 | return -EBUSY; | |
250 | } | |
251 | #endif | |
252 | ||
253 | static int irq_try_set_affinity(struct irq_data *data, | |
254 | const struct cpumask *dest, bool force) | |
255 | { | |
256 | int ret = irq_do_set_affinity(data, dest, force); | |
257 | ||
258 | /* | |
259 | * In case that the underlying vector management is busy and the | |
260 | * architecture supports the generic pending mechanism then utilize | |
261 | * this to avoid returning an error to user space. | |
262 | */ | |
263 | if (ret == -EBUSY && !force) | |
264 | ret = irq_set_affinity_pending(data, dest); | |
265 | return ret; | |
266 | } | |
267 | ||
01f8fa4f TG |
268 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
269 | bool force) | |
771ee3b0 | 270 | { |
c2d0c555 DD |
271 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
272 | struct irq_desc *desc = irq_data_to_desc(data); | |
1fa46f1f | 273 | int ret = 0; |
771ee3b0 | 274 | |
c2d0c555 | 275 | if (!chip || !chip->irq_set_affinity) |
771ee3b0 TG |
276 | return -EINVAL; |
277 | ||
12f47073 TG |
278 | if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { |
279 | ret = irq_try_set_affinity(data, mask, force); | |
1fa46f1f | 280 | } else { |
c2d0c555 | 281 | irqd_set_move_pending(data); |
1fa46f1f | 282 | irq_copy_pending(desc, mask); |
57b150cc | 283 | } |
1fa46f1f | 284 | |
cd7eab44 BH |
285 | if (desc->affinity_notify) { |
286 | kref_get(&desc->affinity_notify->kref); | |
287 | schedule_work(&desc->affinity_notify->work); | |
288 | } | |
c2d0c555 DD |
289 | irqd_set(data, IRQD_AFFINITY_SET); |
290 | ||
291 | return ret; | |
292 | } | |
293 | ||
01f8fa4f | 294 | int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
c2d0c555 DD |
295 | { |
296 | struct irq_desc *desc = irq_to_desc(irq); | |
297 | unsigned long flags; | |
298 | int ret; | |
299 | ||
300 | if (!desc) | |
301 | return -EINVAL; | |
302 | ||
303 | raw_spin_lock_irqsave(&desc->lock, flags); | |
01f8fa4f | 304 | ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
239007b8 | 305 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1fa46f1f | 306 | return ret; |
771ee3b0 TG |
307 | } |
308 | ||
e7a297b0 PWJ |
309 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
310 | { | |
e7a297b0 | 311 | unsigned long flags; |
31d9d9b6 | 312 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
e7a297b0 PWJ |
313 | |
314 | if (!desc) | |
315 | return -EINVAL; | |
e7a297b0 | 316 | desc->affinity_hint = m; |
02725e74 | 317 | irq_put_desc_unlock(desc, flags); |
e2e64a93 | 318 | /* set the initial affinity to prevent every interrupt being on CPU0 */ |
4fe7ffb7 JB |
319 | if (m) |
320 | __irq_set_affinity(irq, m, false); | |
e7a297b0 PWJ |
321 | return 0; |
322 | } | |
323 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | |
324 | ||
cd7eab44 BH |
325 | static void irq_affinity_notify(struct work_struct *work) |
326 | { | |
327 | struct irq_affinity_notify *notify = | |
328 | container_of(work, struct irq_affinity_notify, work); | |
329 | struct irq_desc *desc = irq_to_desc(notify->irq); | |
330 | cpumask_var_t cpumask; | |
331 | unsigned long flags; | |
332 | ||
1fa46f1f | 333 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
cd7eab44 BH |
334 | goto out; |
335 | ||
336 | raw_spin_lock_irqsave(&desc->lock, flags); | |
0ef5ca1e | 337 | if (irq_move_pending(&desc->irq_data)) |
1fa46f1f | 338 | irq_get_pending(cpumask, desc); |
cd7eab44 | 339 | else |
9df872fa | 340 | cpumask_copy(cpumask, desc->irq_common_data.affinity); |
cd7eab44 BH |
341 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
342 | ||
343 | notify->notify(notify, cpumask); | |
344 | ||
345 | free_cpumask_var(cpumask); | |
346 | out: | |
347 | kref_put(¬ify->kref, notify->release); | |
348 | } | |
349 | ||
350 | /** | |
351 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | |
352 | * @irq: Interrupt for which to enable/disable notification | |
353 | * @notify: Context for notification, or %NULL to disable | |
354 | * notification. Function pointers must be initialised; | |
355 | * the other fields will be initialised by this function. | |
356 | * | |
357 | * Must be called in process context. Notification may only be enabled | |
358 | * after the IRQ is allocated and must be disabled before the IRQ is | |
359 | * freed using free_irq(). | |
360 | */ | |
361 | int | |
362 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |
363 | { | |
364 | struct irq_desc *desc = irq_to_desc(irq); | |
365 | struct irq_affinity_notify *old_notify; | |
366 | unsigned long flags; | |
367 | ||
368 | /* The release function is promised process context */ | |
369 | might_sleep(); | |
370 | ||
b525903c | 371 | if (!desc || desc->istate & IRQS_NMI) |
cd7eab44 BH |
372 | return -EINVAL; |
373 | ||
374 | /* Complete initialisation of *notify */ | |
375 | if (notify) { | |
376 | notify->irq = irq; | |
377 | kref_init(¬ify->kref); | |
378 | INIT_WORK(¬ify->work, irq_affinity_notify); | |
379 | } | |
380 | ||
381 | raw_spin_lock_irqsave(&desc->lock, flags); | |
382 | old_notify = desc->affinity_notify; | |
383 | desc->affinity_notify = notify; | |
384 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
385 | ||
59c39840 PS |
386 | if (old_notify) { |
387 | cancel_work_sync(&old_notify->work); | |
cd7eab44 | 388 | kref_put(&old_notify->kref, old_notify->release); |
59c39840 | 389 | } |
cd7eab44 BH |
390 | |
391 | return 0; | |
392 | } | |
393 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |
394 | ||
18404756 MK |
395 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
396 | /* | |
397 | * Generic version of the affinity autoselector. | |
398 | */ | |
43564bd9 | 399 | int irq_setup_affinity(struct irq_desc *desc) |
18404756 | 400 | { |
569bda8d | 401 | struct cpumask *set = irq_default_affinity; |
cba4235e TG |
402 | int ret, node = irq_desc_get_node(desc); |
403 | static DEFINE_RAW_SPINLOCK(mask_lock); | |
404 | static struct cpumask mask; | |
569bda8d | 405 | |
b008207c | 406 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
e019c249 | 407 | if (!__irq_can_set_affinity(desc)) |
18404756 MK |
408 | return 0; |
409 | ||
cba4235e | 410 | raw_spin_lock(&mask_lock); |
f6d87f4b | 411 | /* |
9332ef9d | 412 | * Preserve the managed affinity setting and a userspace affinity |
06ee6d57 | 413 | * setup, but make sure that one of the targets is online. |
f6d87f4b | 414 | */ |
06ee6d57 TG |
415 | if (irqd_affinity_is_managed(&desc->irq_data) || |
416 | irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | |
9df872fa | 417 | if (cpumask_intersects(desc->irq_common_data.affinity, |
569bda8d | 418 | cpu_online_mask)) |
9df872fa | 419 | set = desc->irq_common_data.affinity; |
0c6f8a8b | 420 | else |
2bdd1055 | 421 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
f6d87f4b | 422 | } |
18404756 | 423 | |
cba4235e | 424 | cpumask_and(&mask, cpu_online_mask, set); |
bddda606 SR |
425 | if (cpumask_empty(&mask)) |
426 | cpumask_copy(&mask, cpu_online_mask); | |
427 | ||
241fc640 PB |
428 | if (node != NUMA_NO_NODE) { |
429 | const struct cpumask *nodemask = cpumask_of_node(node); | |
430 | ||
431 | /* make sure at least one of the cpus in nodemask is online */ | |
cba4235e TG |
432 | if (cpumask_intersects(&mask, nodemask)) |
433 | cpumask_and(&mask, &mask, nodemask); | |
241fc640 | 434 | } |
cba4235e TG |
435 | ret = irq_do_set_affinity(&desc->irq_data, &mask, false); |
436 | raw_spin_unlock(&mask_lock); | |
437 | return ret; | |
18404756 | 438 | } |
f6d87f4b | 439 | #else |
a8a98eac | 440 | /* Wrapper for ALPHA specific affinity selector magic */ |
cba4235e | 441 | int irq_setup_affinity(struct irq_desc *desc) |
f6d87f4b | 442 | { |
cba4235e | 443 | return irq_select_affinity(irq_desc_get_irq(desc)); |
f6d87f4b | 444 | } |
18404756 MK |
445 | #endif |
446 | ||
f6d87f4b | 447 | /* |
cba4235e | 448 | * Called when a bogus affinity is set via /proc/irq |
f6d87f4b | 449 | */ |
cba4235e | 450 | int irq_select_affinity_usr(unsigned int irq) |
f6d87f4b TG |
451 | { |
452 | struct irq_desc *desc = irq_to_desc(irq); | |
453 | unsigned long flags; | |
454 | int ret; | |
455 | ||
239007b8 | 456 | raw_spin_lock_irqsave(&desc->lock, flags); |
cba4235e | 457 | ret = irq_setup_affinity(desc); |
239007b8 | 458 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
f6d87f4b TG |
459 | return ret; |
460 | } | |
1da177e4 LT |
461 | #endif |
462 | ||
fcf1ae2f FW |
463 | /** |
464 | * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt | |
465 | * @irq: interrupt number to set affinity | |
250a53d6 CD |
466 | * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU |
467 | * specific data for percpu_devid interrupts | |
fcf1ae2f FW |
468 | * |
469 | * This function uses the vCPU specific data to set the vCPU | |
470 | * affinity for an irq. The vCPU specific data is passed from | |
471 | * outside, such as KVM. One example code path is as below: | |
472 | * KVM -> IOMMU -> irq_set_vcpu_affinity(). | |
473 | */ | |
474 | int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) | |
475 | { | |
476 | unsigned long flags; | |
477 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | |
478 | struct irq_data *data; | |
479 | struct irq_chip *chip; | |
480 | int ret = -ENOSYS; | |
481 | ||
482 | if (!desc) | |
483 | return -EINVAL; | |
484 | ||
485 | data = irq_desc_get_irq_data(desc); | |
0abce64a MZ |
486 | do { |
487 | chip = irq_data_get_irq_chip(data); | |
488 | if (chip && chip->irq_set_vcpu_affinity) | |
489 | break; | |
490 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
491 | data = data->parent_data; | |
492 | #else | |
493 | data = NULL; | |
494 | #endif | |
495 | } while (data); | |
496 | ||
497 | if (data) | |
fcf1ae2f FW |
498 | ret = chip->irq_set_vcpu_affinity(data, vcpu_info); |
499 | irq_put_desc_unlock(desc, flags); | |
500 | ||
501 | return ret; | |
502 | } | |
503 | EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); | |
504 | ||
79ff1cda | 505 | void __disable_irq(struct irq_desc *desc) |
0a0c5168 | 506 | { |
3aae994f | 507 | if (!desc->depth++) |
87923470 | 508 | irq_disable(desc); |
0a0c5168 RW |
509 | } |
510 | ||
02725e74 TG |
511 | static int __disable_irq_nosync(unsigned int irq) |
512 | { | |
513 | unsigned long flags; | |
31d9d9b6 | 514 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 TG |
515 | |
516 | if (!desc) | |
517 | return -EINVAL; | |
79ff1cda | 518 | __disable_irq(desc); |
02725e74 TG |
519 | irq_put_desc_busunlock(desc, flags); |
520 | return 0; | |
521 | } | |
522 | ||
1da177e4 LT |
523 | /** |
524 | * disable_irq_nosync - disable an irq without waiting | |
525 | * @irq: Interrupt to disable | |
526 | * | |
527 | * Disable the selected interrupt line. Disables and Enables are | |
528 | * nested. | |
529 | * Unlike disable_irq(), this function does not ensure existing | |
530 | * instances of the IRQ handler have completed before returning. | |
531 | * | |
532 | * This function may be called from IRQ context. | |
533 | */ | |
534 | void disable_irq_nosync(unsigned int irq) | |
535 | { | |
02725e74 | 536 | __disable_irq_nosync(irq); |
1da177e4 | 537 | } |
1da177e4 LT |
538 | EXPORT_SYMBOL(disable_irq_nosync); |
539 | ||
540 | /** | |
541 | * disable_irq - disable an irq and wait for completion | |
542 | * @irq: Interrupt to disable | |
543 | * | |
544 | * Disable the selected interrupt line. Enables and Disables are | |
545 | * nested. | |
546 | * This function waits for any pending IRQ handlers for this interrupt | |
547 | * to complete before returning. If you use this function while | |
548 | * holding a resource the IRQ handler may need you will deadlock. | |
549 | * | |
550 | * This function may be called - with care - from IRQ context. | |
551 | */ | |
552 | void disable_irq(unsigned int irq) | |
553 | { | |
02725e74 | 554 | if (!__disable_irq_nosync(irq)) |
1da177e4 LT |
555 | synchronize_irq(irq); |
556 | } | |
1da177e4 LT |
557 | EXPORT_SYMBOL(disable_irq); |
558 | ||
02cea395 PZ |
559 | /** |
560 | * disable_hardirq - disables an irq and waits for hardirq completion | |
561 | * @irq: Interrupt to disable | |
562 | * | |
563 | * Disable the selected interrupt line. Enables and Disables are | |
564 | * nested. | |
565 | * This function waits for any pending hard IRQ handlers for this | |
566 | * interrupt to complete before returning. If you use this function while | |
567 | * holding a resource the hard IRQ handler may need you will deadlock. | |
568 | * | |
569 | * When used to optimistically disable an interrupt from atomic context | |
570 | * the return value must be checked. | |
571 | * | |
572 | * Returns: false if a threaded handler is active. | |
573 | * | |
574 | * This function may be called - with care - from IRQ context. | |
575 | */ | |
576 | bool disable_hardirq(unsigned int irq) | |
577 | { | |
578 | if (!__disable_irq_nosync(irq)) | |
579 | return synchronize_hardirq(irq); | |
580 | ||
581 | return false; | |
582 | } | |
583 | EXPORT_SYMBOL_GPL(disable_hardirq); | |
584 | ||
b525903c JT |
585 | /** |
586 | * disable_nmi_nosync - disable an nmi without waiting | |
587 | * @irq: Interrupt to disable | |
588 | * | |
589 | * Disable the selected interrupt line. Disables and enables are | |
590 | * nested. | |
591 | * The interrupt to disable must have been requested through request_nmi. | |
592 | * Unlike disable_nmi(), this function does not ensure existing | |
593 | * instances of the IRQ handler have completed before returning. | |
594 | */ | |
595 | void disable_nmi_nosync(unsigned int irq) | |
596 | { | |
597 | disable_irq_nosync(irq); | |
598 | } | |
599 | ||
79ff1cda | 600 | void __enable_irq(struct irq_desc *desc) |
1adb0850 TG |
601 | { |
602 | switch (desc->depth) { | |
603 | case 0: | |
0a0c5168 | 604 | err_out: |
79ff1cda JL |
605 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", |
606 | irq_desc_get_irq(desc)); | |
1adb0850 TG |
607 | break; |
608 | case 1: { | |
c531e836 | 609 | if (desc->istate & IRQS_SUSPENDED) |
0a0c5168 | 610 | goto err_out; |
1adb0850 | 611 | /* Prevent probing on this irq: */ |
1ccb4e61 | 612 | irq_settings_set_noprobe(desc); |
201d7f47 TG |
613 | /* |
614 | * Call irq_startup() not irq_enable() here because the | |
615 | * interrupt might be marked NOAUTOEN. So irq_startup() | |
616 | * needs to be invoked when it gets enabled the first | |
617 | * time. If it was already started up, then irq_startup() | |
618 | * will invoke irq_enable() under the hood. | |
619 | */ | |
c942cee4 | 620 | irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); |
201d7f47 | 621 | break; |
1adb0850 TG |
622 | } |
623 | default: | |
624 | desc->depth--; | |
625 | } | |
626 | } | |
627 | ||
1da177e4 LT |
628 | /** |
629 | * enable_irq - enable handling of an irq | |
630 | * @irq: Interrupt to enable | |
631 | * | |
632 | * Undoes the effect of one call to disable_irq(). If this | |
633 | * matches the last disable, processing of interrupts on this | |
634 | * IRQ line is re-enabled. | |
635 | * | |
70aedd24 | 636 | * This function may be called from IRQ context only when |
6b8ff312 | 637 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
1da177e4 LT |
638 | */ |
639 | void enable_irq(unsigned int irq) | |
640 | { | |
1da177e4 | 641 | unsigned long flags; |
31d9d9b6 | 642 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
1da177e4 | 643 | |
7d94f7ca | 644 | if (!desc) |
c2b5a251 | 645 | return; |
50f7c032 TG |
646 | if (WARN(!desc->irq_data.chip, |
647 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | |
02725e74 | 648 | goto out; |
2656c366 | 649 | |
79ff1cda | 650 | __enable_irq(desc); |
02725e74 TG |
651 | out: |
652 | irq_put_desc_busunlock(desc, flags); | |
1da177e4 | 653 | } |
1da177e4 LT |
654 | EXPORT_SYMBOL(enable_irq); |
655 | ||
b525903c JT |
656 | /** |
657 | * enable_nmi - enable handling of an nmi | |
658 | * @irq: Interrupt to enable | |
659 | * | |
660 | * The interrupt to enable must have been requested through request_nmi. | |
661 | * Undoes the effect of one call to disable_nmi(). If this | |
662 | * matches the last disable, processing of interrupts on this | |
663 | * IRQ line is re-enabled. | |
664 | */ | |
665 | void enable_nmi(unsigned int irq) | |
666 | { | |
667 | enable_irq(irq); | |
668 | } | |
669 | ||
0c5d1eb7 | 670 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 671 | { |
08678b08 | 672 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
673 | int ret = -ENXIO; |
674 | ||
60f96b41 SS |
675 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) |
676 | return 0; | |
677 | ||
2f7e99bb TG |
678 | if (desc->irq_data.chip->irq_set_wake) |
679 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | |
2db87321 UKK |
680 | |
681 | return ret; | |
682 | } | |
683 | ||
ba9a2331 | 684 | /** |
a0cd9ca2 | 685 | * irq_set_irq_wake - control irq power management wakeup |
ba9a2331 TG |
686 | * @irq: interrupt to control |
687 | * @on: enable/disable power management wakeup | |
688 | * | |
15a647eb DB |
689 | * Enable/disable power management wakeup mode, which is |
690 | * disabled by default. Enables and disables must match, | |
691 | * just as they match for non-wakeup mode support. | |
692 | * | |
693 | * Wakeup mode lets this IRQ wake the system from sleep | |
694 | * states like "suspend to RAM". | |
ba9a2331 | 695 | */ |
a0cd9ca2 | 696 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
ba9a2331 | 697 | { |
ba9a2331 | 698 | unsigned long flags; |
31d9d9b6 | 699 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
2db87321 | 700 | int ret = 0; |
ba9a2331 | 701 | |
13863a66 JJ |
702 | if (!desc) |
703 | return -EINVAL; | |
704 | ||
b525903c JT |
705 | /* Don't use NMIs as wake up interrupts please */ |
706 | if (desc->istate & IRQS_NMI) { | |
707 | ret = -EINVAL; | |
708 | goto out_unlock; | |
709 | } | |
710 | ||
15a647eb DB |
711 | /* wakeup-capable irqs can be shared between drivers that |
712 | * don't need to have the same sleep mode behaviors. | |
713 | */ | |
15a647eb | 714 | if (on) { |
2db87321 UKK |
715 | if (desc->wake_depth++ == 0) { |
716 | ret = set_irq_wake_real(irq, on); | |
717 | if (ret) | |
718 | desc->wake_depth = 0; | |
719 | else | |
7f94226f | 720 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db87321 | 721 | } |
15a647eb DB |
722 | } else { |
723 | if (desc->wake_depth == 0) { | |
7a2c4770 | 724 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
2db87321 UKK |
725 | } else if (--desc->wake_depth == 0) { |
726 | ret = set_irq_wake_real(irq, on); | |
727 | if (ret) | |
728 | desc->wake_depth = 1; | |
729 | else | |
7f94226f | 730 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db87321 | 731 | } |
15a647eb | 732 | } |
b525903c JT |
733 | |
734 | out_unlock: | |
02725e74 | 735 | irq_put_desc_busunlock(desc, flags); |
ba9a2331 TG |
736 | return ret; |
737 | } | |
a0cd9ca2 | 738 | EXPORT_SYMBOL(irq_set_irq_wake); |
ba9a2331 | 739 | |
1da177e4 LT |
740 | /* |
741 | * Internal function that tells the architecture code whether a | |
742 | * particular irq has been exclusively allocated or is available | |
743 | * for driver use. | |
744 | */ | |
745 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
746 | { | |
cc8c3b78 | 747 | unsigned long flags; |
31d9d9b6 | 748 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
02725e74 | 749 | int canrequest = 0; |
1da177e4 | 750 | |
7d94f7ca YL |
751 | if (!desc) |
752 | return 0; | |
753 | ||
02725e74 | 754 | if (irq_settings_can_request(desc)) { |
2779db8d BH |
755 | if (!desc->action || |
756 | irqflags & desc->action->flags & IRQF_SHARED) | |
757 | canrequest = 1; | |
02725e74 TG |
758 | } |
759 | irq_put_desc_unlock(desc, flags); | |
760 | return canrequest; | |
1da177e4 LT |
761 | } |
762 | ||
a1ff541a | 763 | int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) |
82736f4d | 764 | { |
6b8ff312 | 765 | struct irq_chip *chip = desc->irq_data.chip; |
d4d5e089 | 766 | int ret, unmask = 0; |
82736f4d | 767 | |
b2ba2c30 | 768 | if (!chip || !chip->irq_set_type) { |
82736f4d UKK |
769 | /* |
770 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
771 | * flow-types? | |
772 | */ | |
a1ff541a JL |
773 | pr_debug("No set_type function for IRQ %d (%s)\n", |
774 | irq_desc_get_irq(desc), | |
f5d89470 | 775 | chip ? (chip->name ? : "unknown") : "unknown"); |
82736f4d UKK |
776 | return 0; |
777 | } | |
778 | ||
d4d5e089 | 779 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
32f4125e | 780 | if (!irqd_irq_masked(&desc->irq_data)) |
d4d5e089 | 781 | mask_irq(desc); |
32f4125e | 782 | if (!irqd_irq_disabled(&desc->irq_data)) |
d4d5e089 TG |
783 | unmask = 1; |
784 | } | |
785 | ||
00b992de AK |
786 | /* Mask all flags except trigger mode */ |
787 | flags &= IRQ_TYPE_SENSE_MASK; | |
b2ba2c30 | 788 | ret = chip->irq_set_type(&desc->irq_data, flags); |
82736f4d | 789 | |
876dbd4c TG |
790 | switch (ret) { |
791 | case IRQ_SET_MASK_OK: | |
2cb62547 | 792 | case IRQ_SET_MASK_OK_DONE: |
876dbd4c TG |
793 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
794 | irqd_set(&desc->irq_data, flags); | |
44133f7e | 795 | /* fall through */ |
876dbd4c TG |
796 | |
797 | case IRQ_SET_MASK_OK_NOCOPY: | |
798 | flags = irqd_get_trigger_type(&desc->irq_data); | |
799 | irq_settings_set_trigger_mask(desc, flags); | |
800 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | |
801 | irq_settings_clr_level(desc); | |
802 | if (flags & IRQ_TYPE_LEVEL_MASK) { | |
803 | irq_settings_set_level(desc); | |
804 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
805 | } | |
46732475 | 806 | |
d4d5e089 | 807 | ret = 0; |
8fff39e0 | 808 | break; |
876dbd4c | 809 | default: |
d75f773c | 810 | pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", |
a1ff541a | 811 | flags, irq_desc_get_irq(desc), chip->irq_set_type); |
0c5d1eb7 | 812 | } |
d4d5e089 TG |
813 | if (unmask) |
814 | unmask_irq(desc); | |
82736f4d UKK |
815 | return ret; |
816 | } | |
817 | ||
293a7a0a TG |
818 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
819 | int irq_set_parent(int irq, int parent_irq) | |
820 | { | |
821 | unsigned long flags; | |
822 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | |
823 | ||
824 | if (!desc) | |
825 | return -EINVAL; | |
826 | ||
827 | desc->parent_irq = parent_irq; | |
828 | ||
829 | irq_put_desc_unlock(desc, flags); | |
830 | return 0; | |
831 | } | |
3118dac5 | 832 | EXPORT_SYMBOL_GPL(irq_set_parent); |
293a7a0a TG |
833 | #endif |
834 | ||
b25c340c TG |
835 | /* |
836 | * Default primary interrupt handler for threaded interrupts. Is | |
837 | * assigned as primary handler when request_threaded_irq is called | |
838 | * with handler == NULL. Useful for oneshot interrupts. | |
839 | */ | |
840 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
841 | { | |
842 | return IRQ_WAKE_THREAD; | |
843 | } | |
844 | ||
399b5da2 TG |
845 | /* |
846 | * Primary handler for nested threaded interrupts. Should never be | |
847 | * called. | |
848 | */ | |
849 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
850 | { | |
851 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
852 | return IRQ_NONE; | |
853 | } | |
854 | ||
2a1d3ab8 TG |
855 | static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) |
856 | { | |
857 | WARN(1, "Secondary action handler called for irq %d\n", irq); | |
858 | return IRQ_NONE; | |
859 | } | |
860 | ||
3aa551c9 TG |
861 | static int irq_wait_for_interrupt(struct irqaction *action) |
862 | { | |
519cc865 LW |
863 | for (;;) { |
864 | set_current_state(TASK_INTERRUPTIBLE); | |
550acb19 | 865 | |
519cc865 LW |
866 | if (kthread_should_stop()) { |
867 | /* may need to run one last time */ | |
868 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
869 | &action->thread_flags)) { | |
870 | __set_current_state(TASK_RUNNING); | |
871 | return 0; | |
872 | } | |
873 | __set_current_state(TASK_RUNNING); | |
874 | return -1; | |
875 | } | |
f48fe81e TG |
876 | |
877 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
878 | &action->thread_flags)) { | |
3aa551c9 TG |
879 | __set_current_state(TASK_RUNNING); |
880 | return 0; | |
f48fe81e TG |
881 | } |
882 | schedule(); | |
3aa551c9 | 883 | } |
3aa551c9 TG |
884 | } |
885 | ||
b25c340c TG |
886 | /* |
887 | * Oneshot interrupts keep the irq line masked until the threaded | |
888 | * handler finished. unmask if the interrupt has not been disabled and | |
889 | * is marked MASKED. | |
890 | */ | |
b5faba21 | 891 | static void irq_finalize_oneshot(struct irq_desc *desc, |
f3f79e38 | 892 | struct irqaction *action) |
b25c340c | 893 | { |
2a1d3ab8 TG |
894 | if (!(desc->istate & IRQS_ONESHOT) || |
895 | action->handler == irq_forced_secondary_handler) | |
b5faba21 | 896 | return; |
0b1adaa0 | 897 | again: |
3876ec9e | 898 | chip_bus_lock(desc); |
239007b8 | 899 | raw_spin_lock_irq(&desc->lock); |
0b1adaa0 TG |
900 | |
901 | /* | |
902 | * Implausible though it may be we need to protect us against | |
903 | * the following scenario: | |
904 | * | |
905 | * The thread is faster done than the hard interrupt handler | |
906 | * on the other CPU. If we unmask the irq line then the | |
907 | * interrupt can come in again and masks the line, leaves due | |
009b4c3b | 908 | * to IRQS_INPROGRESS and the irq line is masked forever. |
b5faba21 TG |
909 | * |
910 | * This also serializes the state of shared oneshot handlers | |
911 | * versus "desc->threads_onehsot |= action->thread_mask;" in | |
912 | * irq_wake_thread(). See the comment there which explains the | |
913 | * serialization. | |
0b1adaa0 | 914 | */ |
32f4125e | 915 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
0b1adaa0 | 916 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 917 | chip_bus_sync_unlock(desc); |
0b1adaa0 TG |
918 | cpu_relax(); |
919 | goto again; | |
920 | } | |
921 | ||
b5faba21 TG |
922 | /* |
923 | * Now check again, whether the thread should run. Otherwise | |
924 | * we would clear the threads_oneshot bit of this thread which | |
925 | * was just set. | |
926 | */ | |
f3f79e38 | 927 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
b5faba21 TG |
928 | goto out_unlock; |
929 | ||
930 | desc->threads_oneshot &= ~action->thread_mask; | |
931 | ||
32f4125e TG |
932 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
933 | irqd_irq_masked(&desc->irq_data)) | |
328a4978 | 934 | unmask_threaded_irq(desc); |
32f4125e | 935 | |
b5faba21 | 936 | out_unlock: |
239007b8 | 937 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 938 | chip_bus_sync_unlock(desc); |
b25c340c TG |
939 | } |
940 | ||
61f38261 | 941 | #ifdef CONFIG_SMP |
591d2fb0 | 942 | /* |
b04c644e | 943 | * Check whether we need to change the affinity of the interrupt thread. |
591d2fb0 TG |
944 | */ |
945 | static void | |
946 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
947 | { | |
948 | cpumask_var_t mask; | |
04aa530e | 949 | bool valid = true; |
591d2fb0 TG |
950 | |
951 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
952 | return; | |
953 | ||
954 | /* | |
955 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
956 | * try again next time | |
957 | */ | |
958 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
959 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
960 | return; | |
961 | } | |
962 | ||
239007b8 | 963 | raw_spin_lock_irq(&desc->lock); |
04aa530e TG |
964 | /* |
965 | * This code is triggered unconditionally. Check the affinity | |
966 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | |
967 | */ | |
cbf86999 TG |
968 | if (cpumask_available(desc->irq_common_data.affinity)) { |
969 | const struct cpumask *m; | |
970 | ||
971 | m = irq_data_get_effective_affinity_mask(&desc->irq_data); | |
972 | cpumask_copy(mask, m); | |
973 | } else { | |
04aa530e | 974 | valid = false; |
cbf86999 | 975 | } |
239007b8 | 976 | raw_spin_unlock_irq(&desc->lock); |
591d2fb0 | 977 | |
04aa530e TG |
978 | if (valid) |
979 | set_cpus_allowed_ptr(current, mask); | |
591d2fb0 TG |
980 | free_cpumask_var(mask); |
981 | } | |
61f38261 BP |
982 | #else |
983 | static inline void | |
984 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
985 | #endif | |
591d2fb0 | 986 | |
8d32a307 | 987 | /* |
c5f48c0a | 988 | * Interrupts which are not explicitly requested as threaded |
8d32a307 TG |
989 | * interrupts rely on the implicit bh/preempt disable of the hard irq |
990 | * context. So we need to disable bh here to avoid deadlocks and other | |
991 | * side effects. | |
992 | */ | |
3a43e05f | 993 | static irqreturn_t |
8d32a307 TG |
994 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
995 | { | |
3a43e05f SAS |
996 | irqreturn_t ret; |
997 | ||
8d32a307 | 998 | local_bh_disable(); |
3a43e05f | 999 | ret = action->thread_fn(action->irq, action->dev_id); |
746a923b LW |
1000 | if (ret == IRQ_HANDLED) |
1001 | atomic_inc(&desc->threads_handled); | |
1002 | ||
f3f79e38 | 1003 | irq_finalize_oneshot(desc, action); |
8d32a307 | 1004 | local_bh_enable(); |
3a43e05f | 1005 | return ret; |
8d32a307 TG |
1006 | } |
1007 | ||
1008 | /* | |
f788e7bf | 1009 | * Interrupts explicitly requested as threaded interrupts want to be |
8d32a307 TG |
1010 | * preemtible - many of them need to sleep and wait for slow busses to |
1011 | * complete. | |
1012 | */ | |
3a43e05f SAS |
1013 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
1014 | struct irqaction *action) | |
8d32a307 | 1015 | { |
3a43e05f SAS |
1016 | irqreturn_t ret; |
1017 | ||
1018 | ret = action->thread_fn(action->irq, action->dev_id); | |
746a923b LW |
1019 | if (ret == IRQ_HANDLED) |
1020 | atomic_inc(&desc->threads_handled); | |
1021 | ||
f3f79e38 | 1022 | irq_finalize_oneshot(desc, action); |
3a43e05f | 1023 | return ret; |
8d32a307 TG |
1024 | } |
1025 | ||
7140ea19 IY |
1026 | static void wake_threads_waitq(struct irq_desc *desc) |
1027 | { | |
c685689f | 1028 | if (atomic_dec_and_test(&desc->threads_active)) |
7140ea19 IY |
1029 | wake_up(&desc->wait_for_threads); |
1030 | } | |
1031 | ||
67d12145 | 1032 | static void irq_thread_dtor(struct callback_head *unused) |
4d1d61a6 ON |
1033 | { |
1034 | struct task_struct *tsk = current; | |
1035 | struct irq_desc *desc; | |
1036 | struct irqaction *action; | |
1037 | ||
1038 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | |
1039 | return; | |
1040 | ||
1041 | action = kthread_data(tsk); | |
1042 | ||
fb21affa | 1043 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
19af395d | 1044 | tsk->comm, tsk->pid, action->irq); |
4d1d61a6 ON |
1045 | |
1046 | ||
1047 | desc = irq_to_desc(action->irq); | |
1048 | /* | |
1049 | * If IRQTF_RUNTHREAD is set, we need to decrement | |
1050 | * desc->threads_active and wake possible waiters. | |
1051 | */ | |
1052 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | |
1053 | wake_threads_waitq(desc); | |
1054 | ||
1055 | /* Prevent a stale desc->threads_oneshot */ | |
1056 | irq_finalize_oneshot(desc, action); | |
1057 | } | |
1058 | ||
2a1d3ab8 TG |
1059 | static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) |
1060 | { | |
1061 | struct irqaction *secondary = action->secondary; | |
1062 | ||
1063 | if (WARN_ON_ONCE(!secondary)) | |
1064 | return; | |
1065 | ||
1066 | raw_spin_lock_irq(&desc->lock); | |
1067 | __irq_wake_thread(desc, secondary); | |
1068 | raw_spin_unlock_irq(&desc->lock); | |
1069 | } | |
1070 | ||
3aa551c9 TG |
1071 | /* |
1072 | * Interrupt handler thread | |
1073 | */ | |
1074 | static int irq_thread(void *data) | |
1075 | { | |
67d12145 | 1076 | struct callback_head on_exit_work; |
3aa551c9 TG |
1077 | struct irqaction *action = data; |
1078 | struct irq_desc *desc = irq_to_desc(action->irq); | |
3a43e05f SAS |
1079 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
1080 | struct irqaction *action); | |
3aa551c9 | 1081 | |
540b60e2 | 1082 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
8d32a307 TG |
1083 | &action->thread_flags)) |
1084 | handler_fn = irq_forced_thread_fn; | |
1085 | else | |
1086 | handler_fn = irq_thread_fn; | |
1087 | ||
41f9d29f | 1088 | init_task_work(&on_exit_work, irq_thread_dtor); |
4d1d61a6 | 1089 | task_work_add(current, &on_exit_work, false); |
3aa551c9 | 1090 | |
f3de44ed SM |
1091 | irq_thread_check_affinity(desc, action); |
1092 | ||
3aa551c9 | 1093 | while (!irq_wait_for_interrupt(action)) { |
7140ea19 | 1094 | irqreturn_t action_ret; |
3aa551c9 | 1095 | |
591d2fb0 TG |
1096 | irq_thread_check_affinity(desc, action); |
1097 | ||
7140ea19 | 1098 | action_ret = handler_fn(desc, action); |
2a1d3ab8 TG |
1099 | if (action_ret == IRQ_WAKE_THREAD) |
1100 | irq_wake_secondary(desc, action); | |
3aa551c9 | 1101 | |
7140ea19 | 1102 | wake_threads_waitq(desc); |
3aa551c9 TG |
1103 | } |
1104 | ||
7140ea19 IY |
1105 | /* |
1106 | * This is the regular exit path. __free_irq() is stopping the | |
1107 | * thread via kthread_stop() after calling | |
519cc865 | 1108 | * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the |
836557bd | 1109 | * oneshot mask bit can be set. |
3aa551c9 | 1110 | */ |
4d1d61a6 | 1111 | task_work_cancel(current, irq_thread_dtor); |
3aa551c9 TG |
1112 | return 0; |
1113 | } | |
1114 | ||
a92444c6 TG |
1115 | /** |
1116 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | |
1117 | * @irq: Interrupt line | |
1118 | * @dev_id: Device identity for which the thread should be woken | |
1119 | * | |
1120 | */ | |
1121 | void irq_wake_thread(unsigned int irq, void *dev_id) | |
1122 | { | |
1123 | struct irq_desc *desc = irq_to_desc(irq); | |
1124 | struct irqaction *action; | |
1125 | unsigned long flags; | |
1126 | ||
1127 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
1128 | return; | |
1129 | ||
1130 | raw_spin_lock_irqsave(&desc->lock, flags); | |
f944b5a7 | 1131 | for_each_action_of_desc(desc, action) { |
a92444c6 TG |
1132 | if (action->dev_id == dev_id) { |
1133 | if (action->thread) | |
1134 | __irq_wake_thread(desc, action); | |
1135 | break; | |
1136 | } | |
1137 | } | |
1138 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1139 | } | |
1140 | EXPORT_SYMBOL_GPL(irq_wake_thread); | |
1141 | ||
2a1d3ab8 | 1142 | static int irq_setup_forced_threading(struct irqaction *new) |
8d32a307 TG |
1143 | { |
1144 | if (!force_irqthreads) | |
2a1d3ab8 | 1145 | return 0; |
8d32a307 | 1146 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
2a1d3ab8 | 1147 | return 0; |
8d32a307 | 1148 | |
d1f0301b TG |
1149 | /* |
1150 | * No further action required for interrupts which are requested as | |
1151 | * threaded interrupts already | |
1152 | */ | |
1153 | if (new->handler == irq_default_primary_handler) | |
1154 | return 0; | |
1155 | ||
8d32a307 TG |
1156 | new->flags |= IRQF_ONESHOT; |
1157 | ||
2a1d3ab8 TG |
1158 | /* |
1159 | * Handle the case where we have a real primary handler and a | |
1160 | * thread handler. We force thread them as well by creating a | |
1161 | * secondary action. | |
1162 | */ | |
d1f0301b | 1163 | if (new->handler && new->thread_fn) { |
2a1d3ab8 TG |
1164 | /* Allocate the secondary action */ |
1165 | new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
1166 | if (!new->secondary) | |
1167 | return -ENOMEM; | |
1168 | new->secondary->handler = irq_forced_secondary_handler; | |
1169 | new->secondary->thread_fn = new->thread_fn; | |
1170 | new->secondary->dev_id = new->dev_id; | |
1171 | new->secondary->irq = new->irq; | |
1172 | new->secondary->name = new->name; | |
8d32a307 | 1173 | } |
2a1d3ab8 TG |
1174 | /* Deal with the primary handler */ |
1175 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | |
1176 | new->thread_fn = new->handler; | |
1177 | new->handler = irq_default_primary_handler; | |
1178 | return 0; | |
8d32a307 TG |
1179 | } |
1180 | ||
c1bacbae TG |
1181 | static int irq_request_resources(struct irq_desc *desc) |
1182 | { | |
1183 | struct irq_data *d = &desc->irq_data; | |
1184 | struct irq_chip *c = d->chip; | |
1185 | ||
1186 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | |
1187 | } | |
1188 | ||
1189 | static void irq_release_resources(struct irq_desc *desc) | |
1190 | { | |
1191 | struct irq_data *d = &desc->irq_data; | |
1192 | struct irq_chip *c = d->chip; | |
1193 | ||
1194 | if (c->irq_release_resources) | |
1195 | c->irq_release_resources(d); | |
1196 | } | |
1197 | ||
b525903c JT |
1198 | static bool irq_supports_nmi(struct irq_desc *desc) |
1199 | { | |
1200 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
1201 | ||
1202 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
1203 | /* Only IRQs directly managed by the root irqchip can be set as NMI */ | |
1204 | if (d->parent_data) | |
1205 | return false; | |
1206 | #endif | |
1207 | /* Don't support NMIs for chips behind a slow bus */ | |
1208 | if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) | |
1209 | return false; | |
1210 | ||
1211 | return d->chip->flags & IRQCHIP_SUPPORTS_NMI; | |
1212 | } | |
1213 | ||
1214 | static int irq_nmi_setup(struct irq_desc *desc) | |
1215 | { | |
1216 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
1217 | struct irq_chip *c = d->chip; | |
1218 | ||
1219 | return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; | |
1220 | } | |
1221 | ||
1222 | static void irq_nmi_teardown(struct irq_desc *desc) | |
1223 | { | |
1224 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
1225 | struct irq_chip *c = d->chip; | |
1226 | ||
1227 | if (c->irq_nmi_teardown) | |
1228 | c->irq_nmi_teardown(d); | |
1229 | } | |
1230 | ||
2a1d3ab8 TG |
1231 | static int |
1232 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | |
1233 | { | |
1234 | struct task_struct *t; | |
1235 | struct sched_param param = { | |
1236 | .sched_priority = MAX_USER_RT_PRIO/2, | |
1237 | }; | |
1238 | ||
1239 | if (!secondary) { | |
1240 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
1241 | new->name); | |
1242 | } else { | |
1243 | t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, | |
1244 | new->name); | |
1245 | param.sched_priority -= 1; | |
1246 | } | |
1247 | ||
1248 | if (IS_ERR(t)) | |
1249 | return PTR_ERR(t); | |
1250 | ||
1251 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); | |
1252 | ||
1253 | /* | |
1254 | * We keep the reference to the task struct even if | |
1255 | * the thread dies to avoid that the interrupt code | |
1256 | * references an already freed task_struct. | |
1257 | */ | |
7b3c92b8 | 1258 | new->thread = get_task_struct(t); |
2a1d3ab8 TG |
1259 | /* |
1260 | * Tell the thread to set its affinity. This is | |
1261 | * important for shared interrupt handlers as we do | |
1262 | * not invoke setup_affinity() for the secondary | |
1263 | * handlers as everything is already set up. Even for | |
1264 | * interrupts marked with IRQF_NO_BALANCE this is | |
1265 | * correct as we want the thread to move to the cpu(s) | |
1266 | * on which the requesting code placed the interrupt. | |
1267 | */ | |
1268 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | |
1269 | return 0; | |
1270 | } | |
1271 | ||
1da177e4 LT |
1272 | /* |
1273 | * Internal function to register an irqaction - typically used to | |
1274 | * allocate special interrupts that are part of the architecture. | |
19d39a38 TG |
1275 | * |
1276 | * Locking rules: | |
1277 | * | |
1278 | * desc->request_mutex Provides serialization against a concurrent free_irq() | |
1279 | * chip_bus_lock Provides serialization for slow bus operations | |
1280 | * desc->lock Provides serialization against hard interrupts | |
1281 | * | |
1282 | * chip_bus_lock and desc->lock are sufficient for all other management and | |
1283 | * interrupt related functions. desc->request_mutex solely serializes | |
1284 | * request/free_irq(). | |
1da177e4 | 1285 | */ |
d3c60047 | 1286 | static int |
327ec569 | 1287 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 1288 | { |
f17c7545 | 1289 | struct irqaction *old, **old_ptr; |
b5faba21 | 1290 | unsigned long flags, thread_mask = 0; |
3b8249e7 | 1291 | int ret, nested, shared = 0; |
1da177e4 | 1292 | |
7d94f7ca | 1293 | if (!desc) |
c2b5a251 MW |
1294 | return -EINVAL; |
1295 | ||
6b8ff312 | 1296 | if (desc->irq_data.chip == &no_irq_chip) |
1da177e4 | 1297 | return -ENOSYS; |
b6873807 SAS |
1298 | if (!try_module_get(desc->owner)) |
1299 | return -ENODEV; | |
1da177e4 | 1300 | |
2a1d3ab8 TG |
1301 | new->irq = irq; |
1302 | ||
4b357dae JH |
1303 | /* |
1304 | * If the trigger type is not specified by the caller, | |
1305 | * then use the default for this interrupt. | |
1306 | */ | |
1307 | if (!(new->flags & IRQF_TRIGGER_MASK)) | |
1308 | new->flags |= irqd_get_trigger_type(&desc->irq_data); | |
1309 | ||
3aa551c9 | 1310 | /* |
399b5da2 TG |
1311 | * Check whether the interrupt nests into another interrupt |
1312 | * thread. | |
1313 | */ | |
1ccb4e61 | 1314 | nested = irq_settings_is_nested_thread(desc); |
399b5da2 | 1315 | if (nested) { |
b6873807 SAS |
1316 | if (!new->thread_fn) { |
1317 | ret = -EINVAL; | |
1318 | goto out_mput; | |
1319 | } | |
399b5da2 TG |
1320 | /* |
1321 | * Replace the primary handler which was provided from | |
1322 | * the driver for non nested interrupt handling by the | |
1323 | * dummy function which warns when called. | |
1324 | */ | |
1325 | new->handler = irq_nested_primary_handler; | |
8d32a307 | 1326 | } else { |
2a1d3ab8 TG |
1327 | if (irq_settings_can_thread(desc)) { |
1328 | ret = irq_setup_forced_threading(new); | |
1329 | if (ret) | |
1330 | goto out_mput; | |
1331 | } | |
399b5da2 TG |
1332 | } |
1333 | ||
3aa551c9 | 1334 | /* |
399b5da2 TG |
1335 | * Create a handler thread when a thread function is supplied |
1336 | * and the interrupt does not nest into another interrupt | |
1337 | * thread. | |
3aa551c9 | 1338 | */ |
399b5da2 | 1339 | if (new->thread_fn && !nested) { |
2a1d3ab8 TG |
1340 | ret = setup_irq_thread(new, irq, false); |
1341 | if (ret) | |
b6873807 | 1342 | goto out_mput; |
2a1d3ab8 TG |
1343 | if (new->secondary) { |
1344 | ret = setup_irq_thread(new->secondary, irq, true); | |
1345 | if (ret) | |
1346 | goto out_thread; | |
b6873807 | 1347 | } |
3aa551c9 TG |
1348 | } |
1349 | ||
dc9b229a TG |
1350 | /* |
1351 | * Drivers are often written to work w/o knowledge about the | |
1352 | * underlying irq chip implementation, so a request for a | |
1353 | * threaded irq without a primary hard irq context handler | |
1354 | * requires the ONESHOT flag to be set. Some irq chips like | |
1355 | * MSI based interrupts are per se one shot safe. Check the | |
1356 | * chip flags, so we can avoid the unmask dance at the end of | |
1357 | * the threaded handler for those. | |
1358 | */ | |
1359 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) | |
1360 | new->flags &= ~IRQF_ONESHOT; | |
1361 | ||
19d39a38 TG |
1362 | /* |
1363 | * Protects against a concurrent __free_irq() call which might wait | |
519cc865 | 1364 | * for synchronize_hardirq() to complete without holding the optional |
836557bd LW |
1365 | * chip bus lock and desc->lock. Also protects against handing out |
1366 | * a recycled oneshot thread_mask bit while it's still in use by | |
1367 | * its previous owner. | |
19d39a38 | 1368 | */ |
9114014c | 1369 | mutex_lock(&desc->request_mutex); |
19d39a38 TG |
1370 | |
1371 | /* | |
1372 | * Acquire bus lock as the irq_request_resources() callback below | |
1373 | * might rely on the serialization or the magic power management | |
1374 | * functions which are abusing the irq_bus_lock() callback, | |
1375 | */ | |
1376 | chip_bus_lock(desc); | |
1377 | ||
1378 | /* First installed action requests resources. */ | |
46e48e25 TG |
1379 | if (!desc->action) { |
1380 | ret = irq_request_resources(desc); | |
1381 | if (ret) { | |
1382 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | |
1383 | new->name, irq, desc->irq_data.chip->name); | |
19d39a38 | 1384 | goto out_bus_unlock; |
46e48e25 TG |
1385 | } |
1386 | } | |
9114014c | 1387 | |
1da177e4 LT |
1388 | /* |
1389 | * The following block of code has to be executed atomically | |
19d39a38 TG |
1390 | * protected against a concurrent interrupt and any of the other |
1391 | * management calls which are not serialized via | |
1392 | * desc->request_mutex or the optional bus lock. | |
1da177e4 | 1393 | */ |
239007b8 | 1394 | raw_spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
1395 | old_ptr = &desc->action; |
1396 | old = *old_ptr; | |
06fcb0c6 | 1397 | if (old) { |
e76de9f8 TG |
1398 | /* |
1399 | * Can't share interrupts unless both agree to and are | |
1400 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 1401 | * fields must have IRQF_SHARED set and the bits which |
9d591edd TG |
1402 | * set the trigger type must match. Also all must |
1403 | * agree on ONESHOT. | |
b525903c | 1404 | * Interrupt lines used for NMIs cannot be shared. |
e76de9f8 | 1405 | */ |
4f8413a3 MZ |
1406 | unsigned int oldtype; |
1407 | ||
b525903c JT |
1408 | if (desc->istate & IRQS_NMI) { |
1409 | pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", | |
1410 | new->name, irq, desc->irq_data.chip->name); | |
1411 | ret = -EINVAL; | |
1412 | goto out_unlock; | |
1413 | } | |
1414 | ||
4f8413a3 MZ |
1415 | /* |
1416 | * If nobody did set the configuration before, inherit | |
1417 | * the one provided by the requester. | |
1418 | */ | |
1419 | if (irqd_trigger_type_was_set(&desc->irq_data)) { | |
1420 | oldtype = irqd_get_trigger_type(&desc->irq_data); | |
1421 | } else { | |
1422 | oldtype = new->flags & IRQF_TRIGGER_MASK; | |
1423 | irqd_set_trigger_type(&desc->irq_data, oldtype); | |
1424 | } | |
382bd4de | 1425 | |
3cca53b0 | 1426 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
382bd4de | 1427 | (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || |
f5d89470 | 1428 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) |
f5163427 DS |
1429 | goto mismatch; |
1430 | ||
f5163427 | 1431 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
1432 | if ((old->flags & IRQF_PERCPU) != |
1433 | (new->flags & IRQF_PERCPU)) | |
f5163427 | 1434 | goto mismatch; |
1da177e4 LT |
1435 | |
1436 | /* add new interrupt at end of irq queue */ | |
1437 | do { | |
52abb700 TG |
1438 | /* |
1439 | * Or all existing action->thread_mask bits, | |
1440 | * so we can find the next zero bit for this | |
1441 | * new action. | |
1442 | */ | |
b5faba21 | 1443 | thread_mask |= old->thread_mask; |
f17c7545 IM |
1444 | old_ptr = &old->next; |
1445 | old = *old_ptr; | |
1da177e4 LT |
1446 | } while (old); |
1447 | shared = 1; | |
1448 | } | |
1449 | ||
b5faba21 | 1450 | /* |
52abb700 TG |
1451 | * Setup the thread mask for this irqaction for ONESHOT. For |
1452 | * !ONESHOT irqs the thread mask is 0 so we can avoid a | |
1453 | * conditional in irq_wake_thread(). | |
b5faba21 | 1454 | */ |
52abb700 TG |
1455 | if (new->flags & IRQF_ONESHOT) { |
1456 | /* | |
1457 | * Unlikely to have 32 resp 64 irqs sharing one line, | |
1458 | * but who knows. | |
1459 | */ | |
1460 | if (thread_mask == ~0UL) { | |
1461 | ret = -EBUSY; | |
cba4235e | 1462 | goto out_unlock; |
52abb700 TG |
1463 | } |
1464 | /* | |
1465 | * The thread_mask for the action is or'ed to | |
1466 | * desc->thread_active to indicate that the | |
1467 | * IRQF_ONESHOT thread handler has been woken, but not | |
1468 | * yet finished. The bit is cleared when a thread | |
1469 | * completes. When all threads of a shared interrupt | |
1470 | * line have completed desc->threads_active becomes | |
1471 | * zero and the interrupt line is unmasked. See | |
1472 | * handle.c:irq_wake_thread() for further information. | |
1473 | * | |
1474 | * If no thread is woken by primary (hard irq context) | |
1475 | * interrupt handlers, then desc->threads_active is | |
1476 | * also checked for zero to unmask the irq line in the | |
1477 | * affected hard irq flow handlers | |
1478 | * (handle_[fasteoi|level]_irq). | |
1479 | * | |
1480 | * The new action gets the first zero bit of | |
1481 | * thread_mask assigned. See the loop above which or's | |
1482 | * all existing action->thread_mask bits. | |
1483 | */ | |
ffc661c9 | 1484 | new->thread_mask = 1UL << ffz(thread_mask); |
1c6c6952 | 1485 | |
dc9b229a TG |
1486 | } else if (new->handler == irq_default_primary_handler && |
1487 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { | |
1c6c6952 TG |
1488 | /* |
1489 | * The interrupt was requested with handler = NULL, so | |
1490 | * we use the default primary handler for it. But it | |
1491 | * does not have the oneshot flag set. In combination | |
1492 | * with level interrupts this is deadly, because the | |
1493 | * default primary handler just wakes the thread, then | |
1494 | * the irq lines is reenabled, but the device still | |
1495 | * has the level irq asserted. Rinse and repeat.... | |
1496 | * | |
1497 | * While this works for edge type interrupts, we play | |
1498 | * it safe and reject unconditionally because we can't | |
1499 | * say for sure which type this interrupt really | |
1500 | * has. The type flags are unreliable as the | |
1501 | * underlying chip implementation can override them. | |
1502 | */ | |
97fd75b7 | 1503 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
1c6c6952 TG |
1504 | irq); |
1505 | ret = -EINVAL; | |
cba4235e | 1506 | goto out_unlock; |
b5faba21 | 1507 | } |
b5faba21 | 1508 | |
1da177e4 | 1509 | if (!shared) { |
3aa551c9 TG |
1510 | init_waitqueue_head(&desc->wait_for_threads); |
1511 | ||
e76de9f8 | 1512 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 1513 | if (new->flags & IRQF_TRIGGER_MASK) { |
a1ff541a JL |
1514 | ret = __irq_set_trigger(desc, |
1515 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 1516 | |
19d39a38 | 1517 | if (ret) |
cba4235e | 1518 | goto out_unlock; |
091738a2 | 1519 | } |
6a6de9ef | 1520 | |
c942cee4 TG |
1521 | /* |
1522 | * Activate the interrupt. That activation must happen | |
1523 | * independently of IRQ_NOAUTOEN. request_irq() can fail | |
1524 | * and the callers are supposed to handle | |
1525 | * that. enable_irq() of an interrupt requested with | |
1526 | * IRQ_NOAUTOEN is not supposed to fail. The activation | |
1527 | * keeps it in shutdown mode, it merily associates | |
1528 | * resources if necessary and if that's not possible it | |
1529 | * fails. Interrupts which are in managed shutdown mode | |
1530 | * will simply ignore that activation request. | |
1531 | */ | |
1532 | ret = irq_activate(desc); | |
1533 | if (ret) | |
1534 | goto out_unlock; | |
1535 | ||
009b4c3b | 1536 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
32f4125e TG |
1537 | IRQS_ONESHOT | IRQS_WAITING); |
1538 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
94d39e1f | 1539 | |
a005677b TG |
1540 | if (new->flags & IRQF_PERCPU) { |
1541 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
1542 | irq_settings_set_per_cpu(desc); | |
1543 | } | |
6a58fb3b | 1544 | |
b25c340c | 1545 | if (new->flags & IRQF_ONESHOT) |
3d67baec | 1546 | desc->istate |= IRQS_ONESHOT; |
b25c340c | 1547 | |
2e051552 TG |
1548 | /* Exclude IRQ from balancing if requested */ |
1549 | if (new->flags & IRQF_NOBALANCING) { | |
1550 | irq_settings_set_no_balancing(desc); | |
1551 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
1552 | } | |
1553 | ||
04c848d3 | 1554 | if (irq_settings_can_autoenable(desc)) { |
4cde9c6b | 1555 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
04c848d3 TG |
1556 | } else { |
1557 | /* | |
1558 | * Shared interrupts do not go well with disabling | |
1559 | * auto enable. The sharing interrupt might request | |
1560 | * it while it's still disabled and then wait for | |
1561 | * interrupts forever. | |
1562 | */ | |
1563 | WARN_ON_ONCE(new->flags & IRQF_SHARED); | |
e76de9f8 TG |
1564 | /* Undo nested disables: */ |
1565 | desc->depth = 1; | |
04c848d3 | 1566 | } |
18404756 | 1567 | |
876dbd4c TG |
1568 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1569 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | |
7ee7e87d | 1570 | unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); |
876dbd4c TG |
1571 | |
1572 | if (nmsk != omsk) | |
1573 | /* hope the handler works with current trigger mode */ | |
a395d6a7 | 1574 | pr_warn("irq %d uses trigger mode %u; requested %u\n", |
7ee7e87d | 1575 | irq, omsk, nmsk); |
1da177e4 | 1576 | } |
82736f4d | 1577 | |
f17c7545 | 1578 | *old_ptr = new; |
82736f4d | 1579 | |
cab303be TG |
1580 | irq_pm_install_action(desc, new); |
1581 | ||
8528b0f1 LT |
1582 | /* Reset broken irq detection when installing new handler */ |
1583 | desc->irq_count = 0; | |
1584 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
1585 | |
1586 | /* | |
1587 | * Check whether we disabled the irq via the spurious handler | |
1588 | * before. Reenable it and give it another chance. | |
1589 | */ | |
7acdd53e TG |
1590 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1591 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | |
79ff1cda | 1592 | __enable_irq(desc); |
1adb0850 TG |
1593 | } |
1594 | ||
239007b8 | 1595 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3a90795e | 1596 | chip_bus_sync_unlock(desc); |
9114014c | 1597 | mutex_unlock(&desc->request_mutex); |
1da177e4 | 1598 | |
b2d3d61a DL |
1599 | irq_setup_timings(desc, new); |
1600 | ||
69ab8494 TG |
1601 | /* |
1602 | * Strictly no need to wake it up, but hung_task complains | |
1603 | * when no hard interrupt wakes the thread up. | |
1604 | */ | |
1605 | if (new->thread) | |
1606 | wake_up_process(new->thread); | |
2a1d3ab8 TG |
1607 | if (new->secondary) |
1608 | wake_up_process(new->secondary->thread); | |
69ab8494 | 1609 | |
2c6927a3 | 1610 | register_irq_proc(irq, desc); |
1da177e4 LT |
1611 | new->dir = NULL; |
1612 | register_handler_proc(irq, new); | |
1da177e4 | 1613 | return 0; |
f5163427 DS |
1614 | |
1615 | mismatch: | |
3cca53b0 | 1616 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
97fd75b7 | 1617 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
f5d89470 TG |
1618 | irq, new->flags, new->name, old->flags, old->name); |
1619 | #ifdef CONFIG_DEBUG_SHIRQ | |
13e87ec6 | 1620 | dump_stack(); |
3f050447 | 1621 | #endif |
f5d89470 | 1622 | } |
3aa551c9 TG |
1623 | ret = -EBUSY; |
1624 | ||
cba4235e | 1625 | out_unlock: |
1c389795 | 1626 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3b8249e7 | 1627 | |
46e48e25 TG |
1628 | if (!desc->action) |
1629 | irq_release_resources(desc); | |
19d39a38 TG |
1630 | out_bus_unlock: |
1631 | chip_bus_sync_unlock(desc); | |
9114014c TG |
1632 | mutex_unlock(&desc->request_mutex); |
1633 | ||
3aa551c9 | 1634 | out_thread: |
3aa551c9 TG |
1635 | if (new->thread) { |
1636 | struct task_struct *t = new->thread; | |
1637 | ||
1638 | new->thread = NULL; | |
05d74efa | 1639 | kthread_stop(t); |
3aa551c9 TG |
1640 | put_task_struct(t); |
1641 | } | |
2a1d3ab8 TG |
1642 | if (new->secondary && new->secondary->thread) { |
1643 | struct task_struct *t = new->secondary->thread; | |
1644 | ||
1645 | new->secondary->thread = NULL; | |
1646 | kthread_stop(t); | |
1647 | put_task_struct(t); | |
1648 | } | |
b6873807 SAS |
1649 | out_mput: |
1650 | module_put(desc->owner); | |
3aa551c9 | 1651 | return ret; |
1da177e4 LT |
1652 | } |
1653 | ||
d3c60047 TG |
1654 | /** |
1655 | * setup_irq - setup an interrupt | |
1656 | * @irq: Interrupt line to setup | |
1657 | * @act: irqaction for the interrupt | |
1658 | * | |
1659 | * Used to statically setup interrupts in the early boot process. | |
1660 | */ | |
1661 | int setup_irq(unsigned int irq, struct irqaction *act) | |
1662 | { | |
986c011d | 1663 | int retval; |
d3c60047 TG |
1664 | struct irq_desc *desc = irq_to_desc(irq); |
1665 | ||
9b5d585d | 1666 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
31d9d9b6 | 1667 | return -EINVAL; |
be45beb2 JH |
1668 | |
1669 | retval = irq_chip_pm_get(&desc->irq_data); | |
1670 | if (retval < 0) | |
1671 | return retval; | |
1672 | ||
986c011d | 1673 | retval = __setup_irq(irq, desc, act); |
986c011d | 1674 | |
be45beb2 JH |
1675 | if (retval) |
1676 | irq_chip_pm_put(&desc->irq_data); | |
1677 | ||
986c011d | 1678 | return retval; |
d3c60047 | 1679 | } |
eb53b4e8 | 1680 | EXPORT_SYMBOL_GPL(setup_irq); |
d3c60047 | 1681 | |
31d9d9b6 | 1682 | /* |
cbf94f06 MD |
1683 | * Internal function to unregister an irqaction - used to free |
1684 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 1685 | */ |
83ac4ca9 | 1686 | static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) |
1da177e4 | 1687 | { |
83ac4ca9 | 1688 | unsigned irq = desc->irq_data.irq; |
f17c7545 | 1689 | struct irqaction *action, **action_ptr; |
1da177e4 LT |
1690 | unsigned long flags; |
1691 | ||
ae88a23b | 1692 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 1693 | |
9114014c | 1694 | mutex_lock(&desc->request_mutex); |
abc7e40c | 1695 | chip_bus_lock(desc); |
239007b8 | 1696 | raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
1697 | |
1698 | /* | |
1699 | * There can be multiple actions per IRQ descriptor, find the right | |
1700 | * one based on the dev_id: | |
1701 | */ | |
f17c7545 | 1702 | action_ptr = &desc->action; |
1da177e4 | 1703 | for (;;) { |
f17c7545 | 1704 | action = *action_ptr; |
1da177e4 | 1705 | |
ae88a23b IM |
1706 | if (!action) { |
1707 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
239007b8 | 1708 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
abc7e40c | 1709 | chip_bus_sync_unlock(desc); |
19d39a38 | 1710 | mutex_unlock(&desc->request_mutex); |
f21cfb25 | 1711 | return NULL; |
ae88a23b | 1712 | } |
1da177e4 | 1713 | |
8316e381 IM |
1714 | if (action->dev_id == dev_id) |
1715 | break; | |
f17c7545 | 1716 | action_ptr = &action->next; |
ae88a23b | 1717 | } |
dbce706e | 1718 | |
ae88a23b | 1719 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 1720 | *action_ptr = action->next; |
ae88a23b | 1721 | |
cab303be TG |
1722 | irq_pm_remove_action(desc, action); |
1723 | ||
ae88a23b | 1724 | /* If this was the last handler, shut down the IRQ line: */ |
c1bacbae | 1725 | if (!desc->action) { |
e9849777 | 1726 | irq_settings_clr_disable_unlazy(desc); |
4001d8e8 | 1727 | /* Only shutdown. Deactivate after synchronize_hardirq() */ |
46999238 | 1728 | irq_shutdown(desc); |
c1bacbae | 1729 | } |
3aa551c9 | 1730 | |
e7a297b0 PWJ |
1731 | #ifdef CONFIG_SMP |
1732 | /* make sure affinity_hint is cleaned up */ | |
1733 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
1734 | desc->affinity_hint = NULL; | |
1735 | #endif | |
1736 | ||
239007b8 | 1737 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
19d39a38 TG |
1738 | /* |
1739 | * Drop bus_lock here so the changes which were done in the chip | |
1740 | * callbacks above are synced out to the irq chips which hang | |
519cc865 | 1741 | * behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). |
19d39a38 TG |
1742 | * |
1743 | * Aside of that the bus_lock can also be taken from the threaded | |
1744 | * handler in irq_finalize_oneshot() which results in a deadlock | |
519cc865 | 1745 | * because kthread_stop() would wait forever for the thread to |
19d39a38 TG |
1746 | * complete, which is blocked on the bus lock. |
1747 | * | |
1748 | * The still held desc->request_mutex() protects against a | |
1749 | * concurrent request_irq() of this irq so the release of resources | |
1750 | * and timing data is properly serialized. | |
1751 | */ | |
abc7e40c | 1752 | chip_bus_sync_unlock(desc); |
ae88a23b IM |
1753 | |
1754 | unregister_handler_proc(irq, action); | |
1755 | ||
62e04686 TG |
1756 | /* |
1757 | * Make sure it's not being used on another CPU and if the chip | |
1758 | * supports it also make sure that there is no (not yet serviced) | |
1759 | * interrupt in flight at the hardware level. | |
1760 | */ | |
1761 | __synchronize_hardirq(desc, true); | |
1da177e4 | 1762 | |
70edcd77 | 1763 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
1764 | /* |
1765 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
1766 | * event to happen even now it's being freed, so let's make sure that | |
1767 | * is so by doing an extra call to the handler .... | |
1768 | * | |
1769 | * ( We do this after actually deregistering it, to make sure that a | |
0a13ec0b | 1770 | * 'real' IRQ doesn't run in parallel with our fake. ) |
ae88a23b IM |
1771 | */ |
1772 | if (action->flags & IRQF_SHARED) { | |
1773 | local_irq_save(flags); | |
1774 | action->handler(irq, dev_id); | |
1775 | local_irq_restore(flags); | |
1da177e4 | 1776 | } |
ae88a23b | 1777 | #endif |
2d860ad7 | 1778 | |
519cc865 LW |
1779 | /* |
1780 | * The action has already been removed above, but the thread writes | |
1781 | * its oneshot mask bit when it completes. Though request_mutex is | |
1782 | * held across this which prevents __setup_irq() from handing out | |
1783 | * the same bit to a newly requested action. | |
1784 | */ | |
2d860ad7 | 1785 | if (action->thread) { |
05d74efa | 1786 | kthread_stop(action->thread); |
2d860ad7 | 1787 | put_task_struct(action->thread); |
2a1d3ab8 TG |
1788 | if (action->secondary && action->secondary->thread) { |
1789 | kthread_stop(action->secondary->thread); | |
1790 | put_task_struct(action->secondary->thread); | |
1791 | } | |
2d860ad7 LT |
1792 | } |
1793 | ||
19d39a38 | 1794 | /* Last action releases resources */ |
2343877f | 1795 | if (!desc->action) { |
19d39a38 TG |
1796 | /* |
1797 | * Reaquire bus lock as irq_release_resources() might | |
1798 | * require it to deallocate resources over the slow bus. | |
1799 | */ | |
1800 | chip_bus_lock(desc); | |
4001d8e8 TG |
1801 | /* |
1802 | * There is no interrupt on the fly anymore. Deactivate it | |
1803 | * completely. | |
1804 | */ | |
1805 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1806 | irq_domain_deactivate_irq(&desc->irq_data); | |
1807 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1808 | ||
46e48e25 | 1809 | irq_release_resources(desc); |
19d39a38 | 1810 | chip_bus_sync_unlock(desc); |
2343877f TG |
1811 | irq_remove_timings(desc); |
1812 | } | |
46e48e25 | 1813 | |
9114014c TG |
1814 | mutex_unlock(&desc->request_mutex); |
1815 | ||
be45beb2 | 1816 | irq_chip_pm_put(&desc->irq_data); |
b6873807 | 1817 | module_put(desc->owner); |
2a1d3ab8 | 1818 | kfree(action->secondary); |
f21cfb25 MD |
1819 | return action; |
1820 | } | |
1821 | ||
cbf94f06 MD |
1822 | /** |
1823 | * remove_irq - free an interrupt | |
1824 | * @irq: Interrupt line to free | |
1825 | * @act: irqaction for the interrupt | |
1826 | * | |
1827 | * Used to remove interrupts statically setup by the early boot process. | |
1828 | */ | |
1829 | void remove_irq(unsigned int irq, struct irqaction *act) | |
1830 | { | |
31d9d9b6 MZ |
1831 | struct irq_desc *desc = irq_to_desc(irq); |
1832 | ||
1833 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
83ac4ca9 | 1834 | __free_irq(desc, act->dev_id); |
cbf94f06 | 1835 | } |
eb53b4e8 | 1836 | EXPORT_SYMBOL_GPL(remove_irq); |
cbf94f06 | 1837 | |
f21cfb25 MD |
1838 | /** |
1839 | * free_irq - free an interrupt allocated with request_irq | |
1840 | * @irq: Interrupt line to free | |
1841 | * @dev_id: Device identity to free | |
1842 | * | |
1843 | * Remove an interrupt handler. The handler is removed and if the | |
1844 | * interrupt line is no longer in use by any driver it is disabled. | |
1845 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
1846 | * on the card it drives before calling this function. The function | |
1847 | * does not return until any executing interrupts for this IRQ | |
1848 | * have completed. | |
1849 | * | |
1850 | * This function must not be called from interrupt context. | |
25ce4be7 CH |
1851 | * |
1852 | * Returns the devname argument passed to request_irq. | |
f21cfb25 | 1853 | */ |
25ce4be7 | 1854 | const void *free_irq(unsigned int irq, void *dev_id) |
f21cfb25 | 1855 | { |
70aedd24 | 1856 | struct irq_desc *desc = irq_to_desc(irq); |
25ce4be7 CH |
1857 | struct irqaction *action; |
1858 | const char *devname; | |
70aedd24 | 1859 | |
31d9d9b6 | 1860 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
25ce4be7 | 1861 | return NULL; |
70aedd24 | 1862 | |
cd7eab44 BH |
1863 | #ifdef CONFIG_SMP |
1864 | if (WARN_ON(desc->affinity_notify)) | |
1865 | desc->affinity_notify = NULL; | |
1866 | #endif | |
1867 | ||
83ac4ca9 | 1868 | action = __free_irq(desc, dev_id); |
2827a418 AM |
1869 | |
1870 | if (!action) | |
1871 | return NULL; | |
1872 | ||
25ce4be7 CH |
1873 | devname = action->name; |
1874 | kfree(action); | |
1875 | return devname; | |
1da177e4 | 1876 | } |
1da177e4 LT |
1877 | EXPORT_SYMBOL(free_irq); |
1878 | ||
b525903c JT |
1879 | /* This function must be called with desc->lock held */ |
1880 | static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) | |
1881 | { | |
1882 | const char *devname = NULL; | |
1883 | ||
1884 | desc->istate &= ~IRQS_NMI; | |
1885 | ||
1886 | if (!WARN_ON(desc->action == NULL)) { | |
1887 | irq_pm_remove_action(desc, desc->action); | |
1888 | devname = desc->action->name; | |
1889 | unregister_handler_proc(irq, desc->action); | |
1890 | ||
1891 | kfree(desc->action); | |
1892 | desc->action = NULL; | |
1893 | } | |
1894 | ||
1895 | irq_settings_clr_disable_unlazy(desc); | |
4001d8e8 | 1896 | irq_shutdown_and_deactivate(desc); |
b525903c JT |
1897 | |
1898 | irq_release_resources(desc); | |
1899 | ||
1900 | irq_chip_pm_put(&desc->irq_data); | |
1901 | module_put(desc->owner); | |
1902 | ||
1903 | return devname; | |
1904 | } | |
1905 | ||
1906 | const void *free_nmi(unsigned int irq, void *dev_id) | |
1907 | { | |
1908 | struct irq_desc *desc = irq_to_desc(irq); | |
1909 | unsigned long flags; | |
1910 | const void *devname; | |
1911 | ||
1912 | if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) | |
1913 | return NULL; | |
1914 | ||
1915 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
1916 | return NULL; | |
1917 | ||
1918 | /* NMI still enabled */ | |
1919 | if (WARN_ON(desc->depth == 0)) | |
1920 | disable_nmi_nosync(irq); | |
1921 | ||
1922 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1923 | ||
1924 | irq_nmi_teardown(desc); | |
1925 | devname = __cleanup_nmi(irq, desc); | |
1926 | ||
1927 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1928 | ||
1929 | return devname; | |
1930 | } | |
1931 | ||
1da177e4 | 1932 | /** |
3aa551c9 | 1933 | * request_threaded_irq - allocate an interrupt line |
1da177e4 | 1934 | * @irq: Interrupt line to allocate |
3aa551c9 TG |
1935 | * @handler: Function to be called when the IRQ occurs. |
1936 | * Primary handler for threaded interrupts | |
b25c340c TG |
1937 | * If NULL and thread_fn != NULL the default |
1938 | * primary handler is installed | |
f48fe81e TG |
1939 | * @thread_fn: Function called from the irq handler thread |
1940 | * If NULL, no irq thread is created | |
1da177e4 LT |
1941 | * @irqflags: Interrupt type flags |
1942 | * @devname: An ascii name for the claiming device | |
1943 | * @dev_id: A cookie passed back to the handler function | |
1944 | * | |
1945 | * This call allocates interrupt resources and enables the | |
1946 | * interrupt line and IRQ handling. From the point this | |
1947 | * call is made your handler function may be invoked. Since | |
1948 | * your handler function must clear any interrupt the board | |
1949 | * raises, you must take care both to initialise your hardware | |
1950 | * and to set up the interrupt handler in the right order. | |
1951 | * | |
3aa551c9 | 1952 | * If you want to set up a threaded irq handler for your device |
6d21af4f | 1953 | * then you need to supply @handler and @thread_fn. @handler is |
3aa551c9 TG |
1954 | * still called in hard interrupt context and has to check |
1955 | * whether the interrupt originates from the device. If yes it | |
1956 | * needs to disable the interrupt on the device and return | |
39a2eddb | 1957 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
3aa551c9 TG |
1958 | * @thread_fn. This split handler design is necessary to support |
1959 | * shared interrupts. | |
1960 | * | |
1da177e4 LT |
1961 | * Dev_id must be globally unique. Normally the address of the |
1962 | * device data structure is used as the cookie. Since the handler | |
1963 | * receives this value it makes sense to use it. | |
1964 | * | |
1965 | * If your interrupt is shared you must pass a non NULL dev_id | |
1966 | * as this is required when freeing the interrupt. | |
1967 | * | |
1968 | * Flags: | |
1969 | * | |
3cca53b0 | 1970 | * IRQF_SHARED Interrupt is shared |
0c5d1eb7 | 1971 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4 LT |
1972 | * |
1973 | */ | |
3aa551c9 TG |
1974 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1975 | irq_handler_t thread_fn, unsigned long irqflags, | |
1976 | const char *devname, void *dev_id) | |
1da177e4 | 1977 | { |
06fcb0c6 | 1978 | struct irqaction *action; |
08678b08 | 1979 | struct irq_desc *desc; |
d3c60047 | 1980 | int retval; |
1da177e4 | 1981 | |
e237a551 CF |
1982 | if (irq == IRQ_NOTCONNECTED) |
1983 | return -ENOTCONN; | |
1984 | ||
1da177e4 LT |
1985 | /* |
1986 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
1987 | * otherwise we'll have trouble later trying to figure out | |
1988 | * which interrupt is which (messes up the interrupt freeing | |
1989 | * logic etc). | |
17f48034 RW |
1990 | * |
1991 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and | |
1992 | * it cannot be set along with IRQF_NO_SUSPEND. | |
1da177e4 | 1993 | */ |
17f48034 RW |
1994 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
1995 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || | |
1996 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) | |
1da177e4 | 1997 | return -EINVAL; |
7d94f7ca | 1998 | |
cb5bc832 | 1999 | desc = irq_to_desc(irq); |
7d94f7ca | 2000 | if (!desc) |
1da177e4 | 2001 | return -EINVAL; |
7d94f7ca | 2002 | |
31d9d9b6 MZ |
2003 | if (!irq_settings_can_request(desc) || |
2004 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
6550c775 | 2005 | return -EINVAL; |
b25c340c TG |
2006 | |
2007 | if (!handler) { | |
2008 | if (!thread_fn) | |
2009 | return -EINVAL; | |
2010 | handler = irq_default_primary_handler; | |
2011 | } | |
1da177e4 | 2012 | |
45535732 | 2013 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
2014 | if (!action) |
2015 | return -ENOMEM; | |
2016 | ||
2017 | action->handler = handler; | |
3aa551c9 | 2018 | action->thread_fn = thread_fn; |
1da177e4 | 2019 | action->flags = irqflags; |
1da177e4 | 2020 | action->name = devname; |
1da177e4 LT |
2021 | action->dev_id = dev_id; |
2022 | ||
be45beb2 | 2023 | retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c SL |
2024 | if (retval < 0) { |
2025 | kfree(action); | |
be45beb2 | 2026 | return retval; |
4396f46c | 2027 | } |
be45beb2 | 2028 | |
d3c60047 | 2029 | retval = __setup_irq(irq, desc, action); |
70aedd24 | 2030 | |
2a1d3ab8 | 2031 | if (retval) { |
be45beb2 | 2032 | irq_chip_pm_put(&desc->irq_data); |
2a1d3ab8 | 2033 | kfree(action->secondary); |
377bf1e4 | 2034 | kfree(action); |
2a1d3ab8 | 2035 | } |
377bf1e4 | 2036 | |
6d83f94d | 2037 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
6ce51c43 | 2038 | if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b8 DW |
2039 | /* |
2040 | * It's a shared IRQ -- the driver ought to be prepared for it | |
2041 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
2042 | * We disable the irq to make sure that a 'real' IRQ doesn't |
2043 | * run in parallel with our fake. | |
a304e1b8 | 2044 | */ |
59845b1f | 2045 | unsigned long flags; |
a304e1b8 | 2046 | |
377bf1e4 | 2047 | disable_irq(irq); |
59845b1f | 2048 | local_irq_save(flags); |
377bf1e4 | 2049 | |
59845b1f | 2050 | handler(irq, dev_id); |
377bf1e4 | 2051 | |
59845b1f | 2052 | local_irq_restore(flags); |
377bf1e4 | 2053 | enable_irq(irq); |
a304e1b8 DW |
2054 | } |
2055 | #endif | |
1da177e4 LT |
2056 | return retval; |
2057 | } | |
3aa551c9 | 2058 | EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d MZ |
2059 | |
2060 | /** | |
2061 | * request_any_context_irq - allocate an interrupt line | |
2062 | * @irq: Interrupt line to allocate | |
2063 | * @handler: Function to be called when the IRQ occurs. | |
2064 | * Threaded handler for threaded interrupts. | |
2065 | * @flags: Interrupt type flags | |
2066 | * @name: An ascii name for the claiming device | |
2067 | * @dev_id: A cookie passed back to the handler function | |
2068 | * | |
2069 | * This call allocates interrupt resources and enables the | |
2070 | * interrupt line and IRQ handling. It selects either a | |
2071 | * hardirq or threaded handling method depending on the | |
2072 | * context. | |
2073 | * | |
2074 | * On failure, it returns a negative value. On success, | |
2075 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
2076 | */ | |
2077 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
2078 | unsigned long flags, const char *name, void *dev_id) | |
2079 | { | |
e237a551 | 2080 | struct irq_desc *desc; |
ae731f8d MZ |
2081 | int ret; |
2082 | ||
e237a551 CF |
2083 | if (irq == IRQ_NOTCONNECTED) |
2084 | return -ENOTCONN; | |
2085 | ||
2086 | desc = irq_to_desc(irq); | |
ae731f8d MZ |
2087 | if (!desc) |
2088 | return -EINVAL; | |
2089 | ||
1ccb4e61 | 2090 | if (irq_settings_is_nested_thread(desc)) { |
ae731f8d MZ |
2091 | ret = request_threaded_irq(irq, NULL, handler, |
2092 | flags, name, dev_id); | |
2093 | return !ret ? IRQC_IS_NESTED : ret; | |
2094 | } | |
2095 | ||
2096 | ret = request_irq(irq, handler, flags, name, dev_id); | |
2097 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
2098 | } | |
2099 | EXPORT_SYMBOL_GPL(request_any_context_irq); | |
31d9d9b6 | 2100 | |
b525903c JT |
2101 | /** |
2102 | * request_nmi - allocate an interrupt line for NMI delivery | |
2103 | * @irq: Interrupt line to allocate | |
2104 | * @handler: Function to be called when the IRQ occurs. | |
2105 | * Threaded handler for threaded interrupts. | |
2106 | * @irqflags: Interrupt type flags | |
2107 | * @name: An ascii name for the claiming device | |
2108 | * @dev_id: A cookie passed back to the handler function | |
2109 | * | |
2110 | * This call allocates interrupt resources and enables the | |
2111 | * interrupt line and IRQ handling. It sets up the IRQ line | |
2112 | * to be handled as an NMI. | |
2113 | * | |
2114 | * An interrupt line delivering NMIs cannot be shared and IRQ handling | |
2115 | * cannot be threaded. | |
2116 | * | |
2117 | * Interrupt lines requested for NMI delivering must produce per cpu | |
2118 | * interrupts and have auto enabling setting disabled. | |
2119 | * | |
2120 | * Dev_id must be globally unique. Normally the address of the | |
2121 | * device data structure is used as the cookie. Since the handler | |
2122 | * receives this value it makes sense to use it. | |
2123 | * | |
2124 | * If the interrupt line cannot be used to deliver NMIs, function | |
2125 | * will fail and return a negative value. | |
2126 | */ | |
2127 | int request_nmi(unsigned int irq, irq_handler_t handler, | |
2128 | unsigned long irqflags, const char *name, void *dev_id) | |
2129 | { | |
2130 | struct irqaction *action; | |
2131 | struct irq_desc *desc; | |
2132 | unsigned long flags; | |
2133 | int retval; | |
2134 | ||
2135 | if (irq == IRQ_NOTCONNECTED) | |
2136 | return -ENOTCONN; | |
2137 | ||
2138 | /* NMI cannot be shared, used for Polling */ | |
2139 | if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) | |
2140 | return -EINVAL; | |
2141 | ||
2142 | if (!(irqflags & IRQF_PERCPU)) | |
2143 | return -EINVAL; | |
2144 | ||
2145 | if (!handler) | |
2146 | return -EINVAL; | |
2147 | ||
2148 | desc = irq_to_desc(irq); | |
2149 | ||
2150 | if (!desc || irq_settings_can_autoenable(desc) || | |
2151 | !irq_settings_can_request(desc) || | |
2152 | WARN_ON(irq_settings_is_per_cpu_devid(desc)) || | |
2153 | !irq_supports_nmi(desc)) | |
2154 | return -EINVAL; | |
2155 | ||
2156 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
2157 | if (!action) | |
2158 | return -ENOMEM; | |
2159 | ||
2160 | action->handler = handler; | |
2161 | action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; | |
2162 | action->name = name; | |
2163 | action->dev_id = dev_id; | |
2164 | ||
2165 | retval = irq_chip_pm_get(&desc->irq_data); | |
2166 | if (retval < 0) | |
2167 | goto err_out; | |
2168 | ||
2169 | retval = __setup_irq(irq, desc, action); | |
2170 | if (retval) | |
2171 | goto err_irq_setup; | |
2172 | ||
2173 | raw_spin_lock_irqsave(&desc->lock, flags); | |
2174 | ||
2175 | /* Setup NMI state */ | |
2176 | desc->istate |= IRQS_NMI; | |
2177 | retval = irq_nmi_setup(desc); | |
2178 | if (retval) { | |
2179 | __cleanup_nmi(irq, desc); | |
2180 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
2181 | return -EINVAL; | |
2182 | } | |
2183 | ||
2184 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
2185 | ||
2186 | return 0; | |
2187 | ||
2188 | err_irq_setup: | |
2189 | irq_chip_pm_put(&desc->irq_data); | |
2190 | err_out: | |
2191 | kfree(action); | |
2192 | ||
2193 | return retval; | |
2194 | } | |
2195 | ||
1e7c5fd2 | 2196 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
31d9d9b6 MZ |
2197 | { |
2198 | unsigned int cpu = smp_processor_id(); | |
2199 | unsigned long flags; | |
2200 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
2201 | ||
2202 | if (!desc) | |
2203 | return; | |
2204 | ||
f35ad083 MZ |
2205 | /* |
2206 | * If the trigger type is not specified by the caller, then | |
2207 | * use the default for this interrupt. | |
2208 | */ | |
1e7c5fd2 | 2209 | type &= IRQ_TYPE_SENSE_MASK; |
f35ad083 MZ |
2210 | if (type == IRQ_TYPE_NONE) |
2211 | type = irqd_get_trigger_type(&desc->irq_data); | |
2212 | ||
1e7c5fd2 MZ |
2213 | if (type != IRQ_TYPE_NONE) { |
2214 | int ret; | |
2215 | ||
a1ff541a | 2216 | ret = __irq_set_trigger(desc, type); |
1e7c5fd2 MZ |
2217 | |
2218 | if (ret) { | |
32cffdde | 2219 | WARN(1, "failed to set type for IRQ%d\n", irq); |
1e7c5fd2 MZ |
2220 | goto out; |
2221 | } | |
2222 | } | |
2223 | ||
31d9d9b6 | 2224 | irq_percpu_enable(desc, cpu); |
1e7c5fd2 | 2225 | out: |
31d9d9b6 MZ |
2226 | irq_put_desc_unlock(desc, flags); |
2227 | } | |
36a5df85 | 2228 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
31d9d9b6 | 2229 | |
4b078c3f JT |
2230 | void enable_percpu_nmi(unsigned int irq, unsigned int type) |
2231 | { | |
2232 | enable_percpu_irq(irq, type); | |
2233 | } | |
2234 | ||
f0cb3220 TP |
2235 | /** |
2236 | * irq_percpu_is_enabled - Check whether the per cpu irq is enabled | |
2237 | * @irq: Linux irq number to check for | |
2238 | * | |
2239 | * Must be called from a non migratable context. Returns the enable | |
2240 | * state of a per cpu interrupt on the current cpu. | |
2241 | */ | |
2242 | bool irq_percpu_is_enabled(unsigned int irq) | |
2243 | { | |
2244 | unsigned int cpu = smp_processor_id(); | |
2245 | struct irq_desc *desc; | |
2246 | unsigned long flags; | |
2247 | bool is_enabled; | |
2248 | ||
2249 | desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
2250 | if (!desc) | |
2251 | return false; | |
2252 | ||
2253 | is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); | |
2254 | irq_put_desc_unlock(desc, flags); | |
2255 | ||
2256 | return is_enabled; | |
2257 | } | |
2258 | EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); | |
2259 | ||
31d9d9b6 MZ |
2260 | void disable_percpu_irq(unsigned int irq) |
2261 | { | |
2262 | unsigned int cpu = smp_processor_id(); | |
2263 | unsigned long flags; | |
2264 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
2265 | ||
2266 | if (!desc) | |
2267 | return; | |
2268 | ||
2269 | irq_percpu_disable(desc, cpu); | |
2270 | irq_put_desc_unlock(desc, flags); | |
2271 | } | |
36a5df85 | 2272 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
31d9d9b6 | 2273 | |
4b078c3f JT |
2274 | void disable_percpu_nmi(unsigned int irq) |
2275 | { | |
2276 | disable_percpu_irq(irq); | |
2277 | } | |
2278 | ||
31d9d9b6 MZ |
2279 | /* |
2280 | * Internal function to unregister a percpu irqaction. | |
2281 | */ | |
2282 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
2283 | { | |
2284 | struct irq_desc *desc = irq_to_desc(irq); | |
2285 | struct irqaction *action; | |
2286 | unsigned long flags; | |
2287 | ||
2288 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | |
2289 | ||
2290 | if (!desc) | |
2291 | return NULL; | |
2292 | ||
2293 | raw_spin_lock_irqsave(&desc->lock, flags); | |
2294 | ||
2295 | action = desc->action; | |
2296 | if (!action || action->percpu_dev_id != dev_id) { | |
2297 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
2298 | goto bad; | |
2299 | } | |
2300 | ||
2301 | if (!cpumask_empty(desc->percpu_enabled)) { | |
2302 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | |
2303 | irq, cpumask_first(desc->percpu_enabled)); | |
2304 | goto bad; | |
2305 | } | |
2306 | ||
2307 | /* Found it - now remove it from the list of entries: */ | |
2308 | desc->action = NULL; | |
2309 | ||
4b078c3f JT |
2310 | desc->istate &= ~IRQS_NMI; |
2311 | ||
31d9d9b6 MZ |
2312 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
2313 | ||
2314 | unregister_handler_proc(irq, action); | |
2315 | ||
be45beb2 | 2316 | irq_chip_pm_put(&desc->irq_data); |
31d9d9b6 MZ |
2317 | module_put(desc->owner); |
2318 | return action; | |
2319 | ||
2320 | bad: | |
2321 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
2322 | return NULL; | |
2323 | } | |
2324 | ||
2325 | /** | |
2326 | * remove_percpu_irq - free a per-cpu interrupt | |
2327 | * @irq: Interrupt line to free | |
2328 | * @act: irqaction for the interrupt | |
2329 | * | |
2330 | * Used to remove interrupts statically setup by the early boot process. | |
2331 | */ | |
2332 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) | |
2333 | { | |
2334 | struct irq_desc *desc = irq_to_desc(irq); | |
2335 | ||
2336 | if (desc && irq_settings_is_per_cpu_devid(desc)) | |
2337 | __free_percpu_irq(irq, act->percpu_dev_id); | |
2338 | } | |
2339 | ||
2340 | /** | |
2341 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq | |
2342 | * @irq: Interrupt line to free | |
2343 | * @dev_id: Device identity to free | |
2344 | * | |
2345 | * Remove a percpu interrupt handler. The handler is removed, but | |
2346 | * the interrupt line is not disabled. This must be done on each | |
2347 | * CPU before calling this function. The function does not return | |
2348 | * until any executing interrupts for this IRQ have completed. | |
2349 | * | |
2350 | * This function must not be called from interrupt context. | |
2351 | */ | |
2352 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
2353 | { | |
2354 | struct irq_desc *desc = irq_to_desc(irq); | |
2355 | ||
2356 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
2357 | return; | |
2358 | ||
2359 | chip_bus_lock(desc); | |
2360 | kfree(__free_percpu_irq(irq, dev_id)); | |
2361 | chip_bus_sync_unlock(desc); | |
2362 | } | |
aec2e2ad | 2363 | EXPORT_SYMBOL_GPL(free_percpu_irq); |
31d9d9b6 | 2364 | |
4b078c3f JT |
2365 | void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) |
2366 | { | |
2367 | struct irq_desc *desc = irq_to_desc(irq); | |
2368 | ||
2369 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
2370 | return; | |
2371 | ||
2372 | if (WARN_ON(!(desc->istate & IRQS_NMI))) | |
2373 | return; | |
2374 | ||
2375 | kfree(__free_percpu_irq(irq, dev_id)); | |
2376 | } | |
2377 | ||
31d9d9b6 MZ |
2378 | /** |
2379 | * setup_percpu_irq - setup a per-cpu interrupt | |
2380 | * @irq: Interrupt line to setup | |
2381 | * @act: irqaction for the interrupt | |
2382 | * | |
2383 | * Used to statically setup per-cpu interrupts in the early boot process. | |
2384 | */ | |
2385 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | |
2386 | { | |
2387 | struct irq_desc *desc = irq_to_desc(irq); | |
2388 | int retval; | |
2389 | ||
2390 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
2391 | return -EINVAL; | |
be45beb2 JH |
2392 | |
2393 | retval = irq_chip_pm_get(&desc->irq_data); | |
2394 | if (retval < 0) | |
2395 | return retval; | |
2396 | ||
31d9d9b6 | 2397 | retval = __setup_irq(irq, desc, act); |
31d9d9b6 | 2398 | |
be45beb2 JH |
2399 | if (retval) |
2400 | irq_chip_pm_put(&desc->irq_data); | |
2401 | ||
31d9d9b6 MZ |
2402 | return retval; |
2403 | } | |
2404 | ||
2405 | /** | |
c80081b9 | 2406 | * __request_percpu_irq - allocate a percpu interrupt line |
31d9d9b6 MZ |
2407 | * @irq: Interrupt line to allocate |
2408 | * @handler: Function to be called when the IRQ occurs. | |
c80081b9 | 2409 | * @flags: Interrupt type flags (IRQF_TIMER only) |
31d9d9b6 MZ |
2410 | * @devname: An ascii name for the claiming device |
2411 | * @dev_id: A percpu cookie passed back to the handler function | |
2412 | * | |
a1b7febd MR |
2413 | * This call allocates interrupt resources and enables the |
2414 | * interrupt on the local CPU. If the interrupt is supposed to be | |
2415 | * enabled on other CPUs, it has to be done on each CPU using | |
2416 | * enable_percpu_irq(). | |
31d9d9b6 MZ |
2417 | * |
2418 | * Dev_id must be globally unique. It is a per-cpu variable, and | |
2419 | * the handler gets called with the interrupted CPU's instance of | |
2420 | * that variable. | |
2421 | */ | |
c80081b9 DL |
2422 | int __request_percpu_irq(unsigned int irq, irq_handler_t handler, |
2423 | unsigned long flags, const char *devname, | |
2424 | void __percpu *dev_id) | |
31d9d9b6 MZ |
2425 | { |
2426 | struct irqaction *action; | |
2427 | struct irq_desc *desc; | |
2428 | int retval; | |
2429 | ||
2430 | if (!dev_id) | |
2431 | return -EINVAL; | |
2432 | ||
2433 | desc = irq_to_desc(irq); | |
2434 | if (!desc || !irq_settings_can_request(desc) || | |
2435 | !irq_settings_is_per_cpu_devid(desc)) | |
2436 | return -EINVAL; | |
2437 | ||
c80081b9 DL |
2438 | if (flags && flags != IRQF_TIMER) |
2439 | return -EINVAL; | |
2440 | ||
31d9d9b6 MZ |
2441 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
2442 | if (!action) | |
2443 | return -ENOMEM; | |
2444 | ||
2445 | action->handler = handler; | |
c80081b9 | 2446 | action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; |
31d9d9b6 MZ |
2447 | action->name = devname; |
2448 | action->percpu_dev_id = dev_id; | |
2449 | ||
be45beb2 | 2450 | retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c SL |
2451 | if (retval < 0) { |
2452 | kfree(action); | |
be45beb2 | 2453 | return retval; |
4396f46c | 2454 | } |
be45beb2 | 2455 | |
31d9d9b6 | 2456 | retval = __setup_irq(irq, desc, action); |
31d9d9b6 | 2457 | |
be45beb2 JH |
2458 | if (retval) { |
2459 | irq_chip_pm_put(&desc->irq_data); | |
31d9d9b6 | 2460 | kfree(action); |
be45beb2 | 2461 | } |
31d9d9b6 MZ |
2462 | |
2463 | return retval; | |
2464 | } | |
c80081b9 | 2465 | EXPORT_SYMBOL_GPL(__request_percpu_irq); |
1b7047ed | 2466 | |
4b078c3f JT |
2467 | /** |
2468 | * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery | |
2469 | * @irq: Interrupt line to allocate | |
2470 | * @handler: Function to be called when the IRQ occurs. | |
2471 | * @name: An ascii name for the claiming device | |
2472 | * @dev_id: A percpu cookie passed back to the handler function | |
2473 | * | |
2474 | * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs | |
a5186694 JT |
2475 | * have to be setup on each CPU by calling prepare_percpu_nmi() before |
2476 | * being enabled on the same CPU by using enable_percpu_nmi(). | |
4b078c3f JT |
2477 | * |
2478 | * Dev_id must be globally unique. It is a per-cpu variable, and | |
2479 | * the handler gets called with the interrupted CPU's instance of | |
2480 | * that variable. | |
2481 | * | |
2482 | * Interrupt lines requested for NMI delivering should have auto enabling | |
2483 | * setting disabled. | |
2484 | * | |
2485 | * If the interrupt line cannot be used to deliver NMIs, function | |
2486 | * will fail returning a negative value. | |
2487 | */ | |
2488 | int request_percpu_nmi(unsigned int irq, irq_handler_t handler, | |
2489 | const char *name, void __percpu *dev_id) | |
2490 | { | |
2491 | struct irqaction *action; | |
2492 | struct irq_desc *desc; | |
2493 | unsigned long flags; | |
2494 | int retval; | |
2495 | ||
2496 | if (!handler) | |
2497 | return -EINVAL; | |
2498 | ||
2499 | desc = irq_to_desc(irq); | |
2500 | ||
2501 | if (!desc || !irq_settings_can_request(desc) || | |
2502 | !irq_settings_is_per_cpu_devid(desc) || | |
2503 | irq_settings_can_autoenable(desc) || | |
2504 | !irq_supports_nmi(desc)) | |
2505 | return -EINVAL; | |
2506 | ||
2507 | /* The line cannot already be NMI */ | |
2508 | if (desc->istate & IRQS_NMI) | |
2509 | return -EINVAL; | |
2510 | ||
2511 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
2512 | if (!action) | |
2513 | return -ENOMEM; | |
2514 | ||
2515 | action->handler = handler; | |
2516 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD | |
2517 | | IRQF_NOBALANCING; | |
2518 | action->name = name; | |
2519 | action->percpu_dev_id = dev_id; | |
2520 | ||
2521 | retval = irq_chip_pm_get(&desc->irq_data); | |
2522 | if (retval < 0) | |
2523 | goto err_out; | |
2524 | ||
2525 | retval = __setup_irq(irq, desc, action); | |
2526 | if (retval) | |
2527 | goto err_irq_setup; | |
2528 | ||
2529 | raw_spin_lock_irqsave(&desc->lock, flags); | |
2530 | desc->istate |= IRQS_NMI; | |
2531 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
2532 | ||
2533 | return 0; | |
2534 | ||
2535 | err_irq_setup: | |
2536 | irq_chip_pm_put(&desc->irq_data); | |
2537 | err_out: | |
2538 | kfree(action); | |
2539 | ||
2540 | return retval; | |
2541 | } | |
2542 | ||
2543 | /** | |
2544 | * prepare_percpu_nmi - performs CPU local setup for NMI delivery | |
2545 | * @irq: Interrupt line to prepare for NMI delivery | |
2546 | * | |
2547 | * This call prepares an interrupt line to deliver NMI on the current CPU, | |
2548 | * before that interrupt line gets enabled with enable_percpu_nmi(). | |
2549 | * | |
2550 | * As a CPU local operation, this should be called from non-preemptible | |
2551 | * context. | |
2552 | * | |
2553 | * If the interrupt line cannot be used to deliver NMIs, function | |
2554 | * will fail returning a negative value. | |
2555 | */ | |
2556 | int prepare_percpu_nmi(unsigned int irq) | |
2557 | { | |
2558 | unsigned long flags; | |
2559 | struct irq_desc *desc; | |
2560 | int ret = 0; | |
2561 | ||
2562 | WARN_ON(preemptible()); | |
2563 | ||
2564 | desc = irq_get_desc_lock(irq, &flags, | |
2565 | IRQ_GET_DESC_CHECK_PERCPU); | |
2566 | if (!desc) | |
2567 | return -EINVAL; | |
2568 | ||
2569 | if (WARN(!(desc->istate & IRQS_NMI), | |
2570 | KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", | |
2571 | irq)) { | |
2572 | ret = -EINVAL; | |
2573 | goto out; | |
2574 | } | |
2575 | ||
2576 | ret = irq_nmi_setup(desc); | |
2577 | if (ret) { | |
2578 | pr_err("Failed to setup NMI delivery: irq %u\n", irq); | |
2579 | goto out; | |
2580 | } | |
2581 | ||
2582 | out: | |
2583 | irq_put_desc_unlock(desc, flags); | |
2584 | return ret; | |
2585 | } | |
2586 | ||
2587 | /** | |
2588 | * teardown_percpu_nmi - undoes NMI setup of IRQ line | |
2589 | * @irq: Interrupt line from which CPU local NMI configuration should be | |
2590 | * removed | |
2591 | * | |
2592 | * This call undoes the setup done by prepare_percpu_nmi(). | |
2593 | * | |
2594 | * IRQ line should not be enabled for the current CPU. | |
2595 | * | |
2596 | * As a CPU local operation, this should be called from non-preemptible | |
2597 | * context. | |
2598 | */ | |
2599 | void teardown_percpu_nmi(unsigned int irq) | |
2600 | { | |
2601 | unsigned long flags; | |
2602 | struct irq_desc *desc; | |
2603 | ||
2604 | WARN_ON(preemptible()); | |
2605 | ||
2606 | desc = irq_get_desc_lock(irq, &flags, | |
2607 | IRQ_GET_DESC_CHECK_PERCPU); | |
2608 | if (!desc) | |
2609 | return; | |
2610 | ||
2611 | if (WARN_ON(!(desc->istate & IRQS_NMI))) | |
2612 | goto out; | |
2613 | ||
2614 | irq_nmi_teardown(desc); | |
2615 | out: | |
2616 | irq_put_desc_unlock(desc, flags); | |
2617 | } | |
2618 | ||
62e04686 TG |
2619 | int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, |
2620 | bool *state) | |
2621 | { | |
2622 | struct irq_chip *chip; | |
2623 | int err = -EINVAL; | |
2624 | ||
2625 | do { | |
2626 | chip = irq_data_get_irq_chip(data); | |
2627 | if (chip->irq_get_irqchip_state) | |
2628 | break; | |
2629 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
2630 | data = data->parent_data; | |
2631 | #else | |
2632 | data = NULL; | |
2633 | #endif | |
2634 | } while (data); | |
2635 | ||
2636 | if (data) | |
2637 | err = chip->irq_get_irqchip_state(data, which, state); | |
2638 | return err; | |
2639 | } | |
2640 | ||
1b7047ed MZ |
2641 | /** |
2642 | * irq_get_irqchip_state - returns the irqchip state of a interrupt. | |
2643 | * @irq: Interrupt line that is forwarded to a VM | |
2644 | * @which: One of IRQCHIP_STATE_* the caller wants to know about | |
2645 | * @state: a pointer to a boolean where the state is to be storeed | |
2646 | * | |
2647 | * This call snapshots the internal irqchip state of an | |
2648 | * interrupt, returning into @state the bit corresponding to | |
2649 | * stage @which | |
2650 | * | |
2651 | * This function should be called with preemption disabled if the | |
2652 | * interrupt controller has per-cpu registers. | |
2653 | */ | |
2654 | int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
2655 | bool *state) | |
2656 | { | |
2657 | struct irq_desc *desc; | |
2658 | struct irq_data *data; | |
1b7047ed MZ |
2659 | unsigned long flags; |
2660 | int err = -EINVAL; | |
2661 | ||
2662 | desc = irq_get_desc_buslock(irq, &flags, 0); | |
2663 | if (!desc) | |
2664 | return err; | |
2665 | ||
2666 | data = irq_desc_get_irq_data(desc); | |
2667 | ||
62e04686 | 2668 | err = __irq_get_irqchip_state(data, which, state); |
1b7047ed MZ |
2669 | |
2670 | irq_put_desc_busunlock(desc, flags); | |
2671 | return err; | |
2672 | } | |
1ee4fb3e | 2673 | EXPORT_SYMBOL_GPL(irq_get_irqchip_state); |
1b7047ed MZ |
2674 | |
2675 | /** | |
2676 | * irq_set_irqchip_state - set the state of a forwarded interrupt. | |
2677 | * @irq: Interrupt line that is forwarded to a VM | |
2678 | * @which: State to be restored (one of IRQCHIP_STATE_*) | |
2679 | * @val: Value corresponding to @which | |
2680 | * | |
2681 | * This call sets the internal irqchip state of an interrupt, | |
2682 | * depending on the value of @which. | |
2683 | * | |
2684 | * This function should be called with preemption disabled if the | |
2685 | * interrupt controller has per-cpu registers. | |
2686 | */ | |
2687 | int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
2688 | bool val) | |
2689 | { | |
2690 | struct irq_desc *desc; | |
2691 | struct irq_data *data; | |
2692 | struct irq_chip *chip; | |
2693 | unsigned long flags; | |
2694 | int err = -EINVAL; | |
2695 | ||
2696 | desc = irq_get_desc_buslock(irq, &flags, 0); | |
2697 | if (!desc) | |
2698 | return err; | |
2699 | ||
2700 | data = irq_desc_get_irq_data(desc); | |
2701 | ||
2702 | do { | |
2703 | chip = irq_data_get_irq_chip(data); | |
2704 | if (chip->irq_set_irqchip_state) | |
2705 | break; | |
2706 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
2707 | data = data->parent_data; | |
2708 | #else | |
2709 | data = NULL; | |
2710 | #endif | |
2711 | } while (data); | |
2712 | ||
2713 | if (data) | |
2714 | err = chip->irq_set_irqchip_state(data, which, val); | |
2715 | ||
2716 | irq_put_desc_busunlock(desc, flags); | |
2717 | return err; | |
2718 | } | |
1ee4fb3e | 2719 | EXPORT_SYMBOL_GPL(irq_set_irqchip_state); |