]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/manage.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
6 | * |
7 | * This file contains driver APIs to the irq subsystem. | |
8 | */ | |
9 | ||
97fd75b7 AM |
10 | #define pr_fmt(fmt) "genirq: " fmt |
11 | ||
1da177e4 | 12 | #include <linux/irq.h> |
3aa551c9 | 13 | #include <linux/kthread.h> |
1da177e4 LT |
14 | #include <linux/module.h> |
15 | #include <linux/random.h> | |
16 | #include <linux/interrupt.h> | |
1aeb272c | 17 | #include <linux/slab.h> |
3aa551c9 | 18 | #include <linux/sched.h> |
8bd75c77 | 19 | #include <linux/sched/rt.h> |
0881e7bd | 20 | #include <linux/sched/task.h> |
ae7e81c0 | 21 | #include <uapi/linux/sched/types.h> |
4d1d61a6 | 22 | #include <linux/task_work.h> |
1da177e4 LT |
23 | |
24 | #include "internals.h" | |
25 | ||
8d32a307 TG |
26 | #ifdef CONFIG_IRQ_FORCED_THREADING |
27 | __read_mostly bool force_irqthreads; | |
28 | ||
29 | static int __init setup_forced_irqthreads(char *arg) | |
30 | { | |
31 | force_irqthreads = true; | |
32 | return 0; | |
33 | } | |
34 | early_param("threadirqs", setup_forced_irqthreads); | |
35 | #endif | |
36 | ||
18258f72 | 37 | static void __synchronize_hardirq(struct irq_desc *desc) |
1da177e4 | 38 | { |
32f4125e | 39 | bool inprogress; |
1da177e4 | 40 | |
a98ce5c6 HX |
41 | do { |
42 | unsigned long flags; | |
43 | ||
44 | /* | |
45 | * Wait until we're out of the critical section. This might | |
46 | * give the wrong answer due to the lack of memory barriers. | |
47 | */ | |
32f4125e | 48 | while (irqd_irq_inprogress(&desc->irq_data)) |
a98ce5c6 HX |
49 | cpu_relax(); |
50 | ||
51 | /* Ok, that indicated we're done: double-check carefully. */ | |
239007b8 | 52 | raw_spin_lock_irqsave(&desc->lock, flags); |
32f4125e | 53 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
239007b8 | 54 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
a98ce5c6 HX |
55 | |
56 | /* Oops, that failed? */ | |
32f4125e | 57 | } while (inprogress); |
18258f72 TG |
58 | } |
59 | ||
60 | /** | |
61 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) | |
62 | * @irq: interrupt number to wait for | |
63 | * | |
64 | * This function waits for any pending hard IRQ handlers for this | |
65 | * interrupt to complete before returning. If you use this | |
66 | * function while holding a resource the IRQ handler may need you | |
67 | * will deadlock. It does not take associated threaded handlers | |
68 | * into account. | |
69 | * | |
70 | * Do not use this for shutdown scenarios where you must be sure | |
71 | * that all parts (hardirq and threaded handler) have completed. | |
72 | * | |
02cea395 PZ |
73 | * Returns: false if a threaded handler is active. |
74 | * | |
18258f72 TG |
75 | * This function may be called - with care - from IRQ context. |
76 | */ | |
02cea395 | 77 | bool synchronize_hardirq(unsigned int irq) |
18258f72 TG |
78 | { |
79 | struct irq_desc *desc = irq_to_desc(irq); | |
3aa551c9 | 80 | |
02cea395 | 81 | if (desc) { |
18258f72 | 82 | __synchronize_hardirq(desc); |
02cea395 PZ |
83 | return !atomic_read(&desc->threads_active); |
84 | } | |
85 | ||
86 | return true; | |
18258f72 TG |
87 | } |
88 | EXPORT_SYMBOL(synchronize_hardirq); | |
89 | ||
90 | /** | |
91 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
92 | * @irq: interrupt number to wait for | |
93 | * | |
94 | * This function waits for any pending IRQ handlers for this interrupt | |
95 | * to complete before returning. If you use this function while | |
96 | * holding a resource the IRQ handler may need you will deadlock. | |
97 | * | |
98 | * This function may be called - with care - from IRQ context. | |
99 | */ | |
100 | void synchronize_irq(unsigned int irq) | |
101 | { | |
102 | struct irq_desc *desc = irq_to_desc(irq); | |
103 | ||
104 | if (desc) { | |
105 | __synchronize_hardirq(desc); | |
106 | /* | |
107 | * We made sure that no hardirq handler is | |
108 | * running. Now verify that no threaded handlers are | |
109 | * active. | |
110 | */ | |
111 | wait_event(desc->wait_for_threads, | |
112 | !atomic_read(&desc->threads_active)); | |
113 | } | |
1da177e4 | 114 | } |
1da177e4 LT |
115 | EXPORT_SYMBOL(synchronize_irq); |
116 | ||
3aa551c9 TG |
117 | #ifdef CONFIG_SMP |
118 | cpumask_var_t irq_default_affinity; | |
119 | ||
9c255583 | 120 | static bool __irq_can_set_affinity(struct irq_desc *desc) |
e019c249 JL |
121 | { |
122 | if (!desc || !irqd_can_balance(&desc->irq_data) || | |
123 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | |
9c255583 TG |
124 | return false; |
125 | return true; | |
e019c249 JL |
126 | } |
127 | ||
771ee3b0 TG |
128 | /** |
129 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
130 | * @irq: Interrupt to check | |
131 | * | |
132 | */ | |
133 | int irq_can_set_affinity(unsigned int irq) | |
134 | { | |
e019c249 | 135 | return __irq_can_set_affinity(irq_to_desc(irq)); |
771ee3b0 TG |
136 | } |
137 | ||
9c255583 TG |
138 | /** |
139 | * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space | |
140 | * @irq: Interrupt to check | |
141 | * | |
142 | * Like irq_can_set_affinity() above, but additionally checks for the | |
143 | * AFFINITY_MANAGED flag. | |
144 | */ | |
145 | bool irq_can_set_affinity_usr(unsigned int irq) | |
146 | { | |
147 | struct irq_desc *desc = irq_to_desc(irq); | |
148 | ||
149 | return __irq_can_set_affinity(desc) && | |
150 | !irqd_affinity_is_managed(&desc->irq_data); | |
151 | } | |
152 | ||
591d2fb0 TG |
153 | /** |
154 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | |
155 | * @desc: irq descriptor which has affitnity changed | |
156 | * | |
157 | * We just set IRQTF_AFFINITY and delegate the affinity setting | |
158 | * to the interrupt thread itself. We can not call | |
159 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | |
160 | * code can be called from hard interrupt context. | |
161 | */ | |
162 | void irq_set_thread_affinity(struct irq_desc *desc) | |
3aa551c9 | 163 | { |
f944b5a7 | 164 | struct irqaction *action; |
3aa551c9 | 165 | |
f944b5a7 | 166 | for_each_action_of_desc(desc, action) |
3aa551c9 | 167 | if (action->thread) |
591d2fb0 | 168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
3aa551c9 TG |
169 | } |
170 | ||
818b0f3b JL |
171 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
172 | bool force) | |
173 | { | |
174 | struct irq_desc *desc = irq_data_to_desc(data); | |
175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
176 | int ret; | |
177 | ||
01f8fa4f | 178 | ret = chip->irq_set_affinity(data, mask, force); |
818b0f3b JL |
179 | switch (ret) { |
180 | case IRQ_SET_MASK_OK: | |
2cb62547 | 181 | case IRQ_SET_MASK_OK_DONE: |
9df872fa | 182 | cpumask_copy(desc->irq_common_data.affinity, mask); |
818b0f3b JL |
183 | case IRQ_SET_MASK_OK_NOCOPY: |
184 | irq_set_thread_affinity(desc); | |
185 | ret = 0; | |
186 | } | |
187 | ||
188 | return ret; | |
189 | } | |
190 | ||
01f8fa4f TG |
191 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
192 | bool force) | |
771ee3b0 | 193 | { |
c2d0c555 DD |
194 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
195 | struct irq_desc *desc = irq_data_to_desc(data); | |
1fa46f1f | 196 | int ret = 0; |
771ee3b0 | 197 | |
c2d0c555 | 198 | if (!chip || !chip->irq_set_affinity) |
771ee3b0 TG |
199 | return -EINVAL; |
200 | ||
0ef5ca1e | 201 | if (irq_can_move_pcntxt(data)) { |
01f8fa4f | 202 | ret = irq_do_set_affinity(data, mask, force); |
1fa46f1f | 203 | } else { |
c2d0c555 | 204 | irqd_set_move_pending(data); |
1fa46f1f | 205 | irq_copy_pending(desc, mask); |
57b150cc | 206 | } |
1fa46f1f | 207 | |
cd7eab44 BH |
208 | if (desc->affinity_notify) { |
209 | kref_get(&desc->affinity_notify->kref); | |
210 | schedule_work(&desc->affinity_notify->work); | |
211 | } | |
c2d0c555 DD |
212 | irqd_set(data, IRQD_AFFINITY_SET); |
213 | ||
214 | return ret; | |
215 | } | |
216 | ||
01f8fa4f | 217 | int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
c2d0c555 DD |
218 | { |
219 | struct irq_desc *desc = irq_to_desc(irq); | |
220 | unsigned long flags; | |
221 | int ret; | |
222 | ||
223 | if (!desc) | |
224 | return -EINVAL; | |
225 | ||
226 | raw_spin_lock_irqsave(&desc->lock, flags); | |
01f8fa4f | 227 | ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
239007b8 | 228 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1fa46f1f | 229 | return ret; |
771ee3b0 TG |
230 | } |
231 | ||
e7a297b0 PWJ |
232 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
233 | { | |
e7a297b0 | 234 | unsigned long flags; |
31d9d9b6 | 235 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
e7a297b0 PWJ |
236 | |
237 | if (!desc) | |
238 | return -EINVAL; | |
e7a297b0 | 239 | desc->affinity_hint = m; |
02725e74 | 240 | irq_put_desc_unlock(desc, flags); |
e2e64a93 | 241 | /* set the initial affinity to prevent every interrupt being on CPU0 */ |
4fe7ffb7 JB |
242 | if (m) |
243 | __irq_set_affinity(irq, m, false); | |
e7a297b0 PWJ |
244 | return 0; |
245 | } | |
246 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | |
247 | ||
cd7eab44 BH |
248 | static void irq_affinity_notify(struct work_struct *work) |
249 | { | |
250 | struct irq_affinity_notify *notify = | |
251 | container_of(work, struct irq_affinity_notify, work); | |
252 | struct irq_desc *desc = irq_to_desc(notify->irq); | |
253 | cpumask_var_t cpumask; | |
254 | unsigned long flags; | |
255 | ||
1fa46f1f | 256 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
cd7eab44 BH |
257 | goto out; |
258 | ||
259 | raw_spin_lock_irqsave(&desc->lock, flags); | |
0ef5ca1e | 260 | if (irq_move_pending(&desc->irq_data)) |
1fa46f1f | 261 | irq_get_pending(cpumask, desc); |
cd7eab44 | 262 | else |
9df872fa | 263 | cpumask_copy(cpumask, desc->irq_common_data.affinity); |
cd7eab44 BH |
264 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
265 | ||
266 | notify->notify(notify, cpumask); | |
267 | ||
268 | free_cpumask_var(cpumask); | |
269 | out: | |
270 | kref_put(¬ify->kref, notify->release); | |
271 | } | |
272 | ||
273 | /** | |
274 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | |
275 | * @irq: Interrupt for which to enable/disable notification | |
276 | * @notify: Context for notification, or %NULL to disable | |
277 | * notification. Function pointers must be initialised; | |
278 | * the other fields will be initialised by this function. | |
279 | * | |
280 | * Must be called in process context. Notification may only be enabled | |
281 | * after the IRQ is allocated and must be disabled before the IRQ is | |
282 | * freed using free_irq(). | |
283 | */ | |
284 | int | |
285 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |
286 | { | |
287 | struct irq_desc *desc = irq_to_desc(irq); | |
288 | struct irq_affinity_notify *old_notify; | |
289 | unsigned long flags; | |
290 | ||
291 | /* The release function is promised process context */ | |
292 | might_sleep(); | |
293 | ||
294 | if (!desc) | |
295 | return -EINVAL; | |
296 | ||
297 | /* Complete initialisation of *notify */ | |
298 | if (notify) { | |
299 | notify->irq = irq; | |
300 | kref_init(¬ify->kref); | |
301 | INIT_WORK(¬ify->work, irq_affinity_notify); | |
302 | } | |
303 | ||
304 | raw_spin_lock_irqsave(&desc->lock, flags); | |
305 | old_notify = desc->affinity_notify; | |
306 | desc->affinity_notify = notify; | |
307 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
308 | ||
309 | if (old_notify) | |
310 | kref_put(&old_notify->kref, old_notify->release); | |
311 | ||
312 | return 0; | |
313 | } | |
314 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |
315 | ||
18404756 MK |
316 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
317 | /* | |
318 | * Generic version of the affinity autoselector. | |
319 | */ | |
43564bd9 | 320 | int irq_setup_affinity(struct irq_desc *desc) |
18404756 | 321 | { |
569bda8d | 322 | struct cpumask *set = irq_default_affinity; |
cba4235e TG |
323 | int ret, node = irq_desc_get_node(desc); |
324 | static DEFINE_RAW_SPINLOCK(mask_lock); | |
325 | static struct cpumask mask; | |
569bda8d | 326 | |
b008207c | 327 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
e019c249 | 328 | if (!__irq_can_set_affinity(desc)) |
18404756 MK |
329 | return 0; |
330 | ||
cba4235e | 331 | raw_spin_lock(&mask_lock); |
f6d87f4b | 332 | /* |
9332ef9d | 333 | * Preserve the managed affinity setting and a userspace affinity |
06ee6d57 | 334 | * setup, but make sure that one of the targets is online. |
f6d87f4b | 335 | */ |
06ee6d57 TG |
336 | if (irqd_affinity_is_managed(&desc->irq_data) || |
337 | irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | |
9df872fa | 338 | if (cpumask_intersects(desc->irq_common_data.affinity, |
569bda8d | 339 | cpu_online_mask)) |
9df872fa | 340 | set = desc->irq_common_data.affinity; |
0c6f8a8b | 341 | else |
2bdd1055 | 342 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
f6d87f4b | 343 | } |
18404756 | 344 | |
cba4235e | 345 | cpumask_and(&mask, cpu_online_mask, set); |
241fc640 PB |
346 | if (node != NUMA_NO_NODE) { |
347 | const struct cpumask *nodemask = cpumask_of_node(node); | |
348 | ||
349 | /* make sure at least one of the cpus in nodemask is online */ | |
cba4235e TG |
350 | if (cpumask_intersects(&mask, nodemask)) |
351 | cpumask_and(&mask, &mask, nodemask); | |
241fc640 | 352 | } |
cba4235e TG |
353 | ret = irq_do_set_affinity(&desc->irq_data, &mask, false); |
354 | raw_spin_unlock(&mask_lock); | |
355 | return ret; | |
18404756 | 356 | } |
f6d87f4b | 357 | #else |
a8a98eac | 358 | /* Wrapper for ALPHA specific affinity selector magic */ |
cba4235e | 359 | int irq_setup_affinity(struct irq_desc *desc) |
f6d87f4b | 360 | { |
cba4235e | 361 | return irq_select_affinity(irq_desc_get_irq(desc)); |
f6d87f4b | 362 | } |
18404756 MK |
363 | #endif |
364 | ||
f6d87f4b | 365 | /* |
cba4235e | 366 | * Called when a bogus affinity is set via /proc/irq |
f6d87f4b | 367 | */ |
cba4235e | 368 | int irq_select_affinity_usr(unsigned int irq) |
f6d87f4b TG |
369 | { |
370 | struct irq_desc *desc = irq_to_desc(irq); | |
371 | unsigned long flags; | |
372 | int ret; | |
373 | ||
239007b8 | 374 | raw_spin_lock_irqsave(&desc->lock, flags); |
cba4235e | 375 | ret = irq_setup_affinity(desc); |
239007b8 | 376 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
f6d87f4b TG |
377 | return ret; |
378 | } | |
1da177e4 LT |
379 | #endif |
380 | ||
fcf1ae2f FW |
381 | /** |
382 | * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt | |
383 | * @irq: interrupt number to set affinity | |
384 | * @vcpu_info: vCPU specific data | |
385 | * | |
386 | * This function uses the vCPU specific data to set the vCPU | |
387 | * affinity for an irq. The vCPU specific data is passed from | |
388 | * outside, such as KVM. One example code path is as below: | |
389 | * KVM -> IOMMU -> irq_set_vcpu_affinity(). | |
390 | */ | |
391 | int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) | |
392 | { | |
393 | unsigned long flags; | |
394 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | |
395 | struct irq_data *data; | |
396 | struct irq_chip *chip; | |
397 | int ret = -ENOSYS; | |
398 | ||
399 | if (!desc) | |
400 | return -EINVAL; | |
401 | ||
402 | data = irq_desc_get_irq_data(desc); | |
403 | chip = irq_data_get_irq_chip(data); | |
404 | if (chip && chip->irq_set_vcpu_affinity) | |
405 | ret = chip->irq_set_vcpu_affinity(data, vcpu_info); | |
406 | irq_put_desc_unlock(desc, flags); | |
407 | ||
408 | return ret; | |
409 | } | |
410 | EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); | |
411 | ||
79ff1cda | 412 | void __disable_irq(struct irq_desc *desc) |
0a0c5168 | 413 | { |
3aae994f | 414 | if (!desc->depth++) |
87923470 | 415 | irq_disable(desc); |
0a0c5168 RW |
416 | } |
417 | ||
02725e74 TG |
418 | static int __disable_irq_nosync(unsigned int irq) |
419 | { | |
420 | unsigned long flags; | |
31d9d9b6 | 421 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 TG |
422 | |
423 | if (!desc) | |
424 | return -EINVAL; | |
79ff1cda | 425 | __disable_irq(desc); |
02725e74 TG |
426 | irq_put_desc_busunlock(desc, flags); |
427 | return 0; | |
428 | } | |
429 | ||
1da177e4 LT |
430 | /** |
431 | * disable_irq_nosync - disable an irq without waiting | |
432 | * @irq: Interrupt to disable | |
433 | * | |
434 | * Disable the selected interrupt line. Disables and Enables are | |
435 | * nested. | |
436 | * Unlike disable_irq(), this function does not ensure existing | |
437 | * instances of the IRQ handler have completed before returning. | |
438 | * | |
439 | * This function may be called from IRQ context. | |
440 | */ | |
441 | void disable_irq_nosync(unsigned int irq) | |
442 | { | |
02725e74 | 443 | __disable_irq_nosync(irq); |
1da177e4 | 444 | } |
1da177e4 LT |
445 | EXPORT_SYMBOL(disable_irq_nosync); |
446 | ||
447 | /** | |
448 | * disable_irq - disable an irq and wait for completion | |
449 | * @irq: Interrupt to disable | |
450 | * | |
451 | * Disable the selected interrupt line. Enables and Disables are | |
452 | * nested. | |
453 | * This function waits for any pending IRQ handlers for this interrupt | |
454 | * to complete before returning. If you use this function while | |
455 | * holding a resource the IRQ handler may need you will deadlock. | |
456 | * | |
457 | * This function may be called - with care - from IRQ context. | |
458 | */ | |
459 | void disable_irq(unsigned int irq) | |
460 | { | |
02725e74 | 461 | if (!__disable_irq_nosync(irq)) |
1da177e4 LT |
462 | synchronize_irq(irq); |
463 | } | |
1da177e4 LT |
464 | EXPORT_SYMBOL(disable_irq); |
465 | ||
02cea395 PZ |
466 | /** |
467 | * disable_hardirq - disables an irq and waits for hardirq completion | |
468 | * @irq: Interrupt to disable | |
469 | * | |
470 | * Disable the selected interrupt line. Enables and Disables are | |
471 | * nested. | |
472 | * This function waits for any pending hard IRQ handlers for this | |
473 | * interrupt to complete before returning. If you use this function while | |
474 | * holding a resource the hard IRQ handler may need you will deadlock. | |
475 | * | |
476 | * When used to optimistically disable an interrupt from atomic context | |
477 | * the return value must be checked. | |
478 | * | |
479 | * Returns: false if a threaded handler is active. | |
480 | * | |
481 | * This function may be called - with care - from IRQ context. | |
482 | */ | |
483 | bool disable_hardirq(unsigned int irq) | |
484 | { | |
485 | if (!__disable_irq_nosync(irq)) | |
486 | return synchronize_hardirq(irq); | |
487 | ||
488 | return false; | |
489 | } | |
490 | EXPORT_SYMBOL_GPL(disable_hardirq); | |
491 | ||
79ff1cda | 492 | void __enable_irq(struct irq_desc *desc) |
1adb0850 TG |
493 | { |
494 | switch (desc->depth) { | |
495 | case 0: | |
0a0c5168 | 496 | err_out: |
79ff1cda JL |
497 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", |
498 | irq_desc_get_irq(desc)); | |
1adb0850 TG |
499 | break; |
500 | case 1: { | |
c531e836 | 501 | if (desc->istate & IRQS_SUSPENDED) |
0a0c5168 | 502 | goto err_out; |
1adb0850 | 503 | /* Prevent probing on this irq: */ |
1ccb4e61 | 504 | irq_settings_set_noprobe(desc); |
201d7f47 TG |
505 | /* |
506 | * Call irq_startup() not irq_enable() here because the | |
507 | * interrupt might be marked NOAUTOEN. So irq_startup() | |
508 | * needs to be invoked when it gets enabled the first | |
509 | * time. If it was already started up, then irq_startup() | |
510 | * will invoke irq_enable() under the hood. | |
511 | */ | |
4cde9c6b | 512 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
201d7f47 | 513 | break; |
1adb0850 TG |
514 | } |
515 | default: | |
516 | desc->depth--; | |
517 | } | |
518 | } | |
519 | ||
1da177e4 LT |
520 | /** |
521 | * enable_irq - enable handling of an irq | |
522 | * @irq: Interrupt to enable | |
523 | * | |
524 | * Undoes the effect of one call to disable_irq(). If this | |
525 | * matches the last disable, processing of interrupts on this | |
526 | * IRQ line is re-enabled. | |
527 | * | |
70aedd24 | 528 | * This function may be called from IRQ context only when |
6b8ff312 | 529 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
1da177e4 LT |
530 | */ |
531 | void enable_irq(unsigned int irq) | |
532 | { | |
1da177e4 | 533 | unsigned long flags; |
31d9d9b6 | 534 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
1da177e4 | 535 | |
7d94f7ca | 536 | if (!desc) |
c2b5a251 | 537 | return; |
50f7c032 TG |
538 | if (WARN(!desc->irq_data.chip, |
539 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | |
02725e74 | 540 | goto out; |
2656c366 | 541 | |
79ff1cda | 542 | __enable_irq(desc); |
02725e74 TG |
543 | out: |
544 | irq_put_desc_busunlock(desc, flags); | |
1da177e4 | 545 | } |
1da177e4 LT |
546 | EXPORT_SYMBOL(enable_irq); |
547 | ||
0c5d1eb7 | 548 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 549 | { |
08678b08 | 550 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
551 | int ret = -ENXIO; |
552 | ||
60f96b41 SS |
553 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) |
554 | return 0; | |
555 | ||
2f7e99bb TG |
556 | if (desc->irq_data.chip->irq_set_wake) |
557 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | |
2db87321 UKK |
558 | |
559 | return ret; | |
560 | } | |
561 | ||
ba9a2331 | 562 | /** |
a0cd9ca2 | 563 | * irq_set_irq_wake - control irq power management wakeup |
ba9a2331 TG |
564 | * @irq: interrupt to control |
565 | * @on: enable/disable power management wakeup | |
566 | * | |
15a647eb DB |
567 | * Enable/disable power management wakeup mode, which is |
568 | * disabled by default. Enables and disables must match, | |
569 | * just as they match for non-wakeup mode support. | |
570 | * | |
571 | * Wakeup mode lets this IRQ wake the system from sleep | |
572 | * states like "suspend to RAM". | |
ba9a2331 | 573 | */ |
a0cd9ca2 | 574 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
ba9a2331 | 575 | { |
ba9a2331 | 576 | unsigned long flags; |
31d9d9b6 | 577 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
2db87321 | 578 | int ret = 0; |
ba9a2331 | 579 | |
13863a66 JJ |
580 | if (!desc) |
581 | return -EINVAL; | |
582 | ||
15a647eb DB |
583 | /* wakeup-capable irqs can be shared between drivers that |
584 | * don't need to have the same sleep mode behaviors. | |
585 | */ | |
15a647eb | 586 | if (on) { |
2db87321 UKK |
587 | if (desc->wake_depth++ == 0) { |
588 | ret = set_irq_wake_real(irq, on); | |
589 | if (ret) | |
590 | desc->wake_depth = 0; | |
591 | else | |
7f94226f | 592 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db87321 | 593 | } |
15a647eb DB |
594 | } else { |
595 | if (desc->wake_depth == 0) { | |
7a2c4770 | 596 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
2db87321 UKK |
597 | } else if (--desc->wake_depth == 0) { |
598 | ret = set_irq_wake_real(irq, on); | |
599 | if (ret) | |
600 | desc->wake_depth = 1; | |
601 | else | |
7f94226f | 602 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db87321 | 603 | } |
15a647eb | 604 | } |
02725e74 | 605 | irq_put_desc_busunlock(desc, flags); |
ba9a2331 TG |
606 | return ret; |
607 | } | |
a0cd9ca2 | 608 | EXPORT_SYMBOL(irq_set_irq_wake); |
ba9a2331 | 609 | |
1da177e4 LT |
610 | /* |
611 | * Internal function that tells the architecture code whether a | |
612 | * particular irq has been exclusively allocated or is available | |
613 | * for driver use. | |
614 | */ | |
615 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
616 | { | |
cc8c3b78 | 617 | unsigned long flags; |
31d9d9b6 | 618 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
02725e74 | 619 | int canrequest = 0; |
1da177e4 | 620 | |
7d94f7ca YL |
621 | if (!desc) |
622 | return 0; | |
623 | ||
02725e74 | 624 | if (irq_settings_can_request(desc)) { |
2779db8d BH |
625 | if (!desc->action || |
626 | irqflags & desc->action->flags & IRQF_SHARED) | |
627 | canrequest = 1; | |
02725e74 TG |
628 | } |
629 | irq_put_desc_unlock(desc, flags); | |
630 | return canrequest; | |
1da177e4 LT |
631 | } |
632 | ||
a1ff541a | 633 | int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) |
82736f4d | 634 | { |
6b8ff312 | 635 | struct irq_chip *chip = desc->irq_data.chip; |
d4d5e089 | 636 | int ret, unmask = 0; |
82736f4d | 637 | |
b2ba2c30 | 638 | if (!chip || !chip->irq_set_type) { |
82736f4d UKK |
639 | /* |
640 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
641 | * flow-types? | |
642 | */ | |
a1ff541a JL |
643 | pr_debug("No set_type function for IRQ %d (%s)\n", |
644 | irq_desc_get_irq(desc), | |
f5d89470 | 645 | chip ? (chip->name ? : "unknown") : "unknown"); |
82736f4d UKK |
646 | return 0; |
647 | } | |
648 | ||
d4d5e089 | 649 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
32f4125e | 650 | if (!irqd_irq_masked(&desc->irq_data)) |
d4d5e089 | 651 | mask_irq(desc); |
32f4125e | 652 | if (!irqd_irq_disabled(&desc->irq_data)) |
d4d5e089 TG |
653 | unmask = 1; |
654 | } | |
655 | ||
00b992de AK |
656 | /* Mask all flags except trigger mode */ |
657 | flags &= IRQ_TYPE_SENSE_MASK; | |
b2ba2c30 | 658 | ret = chip->irq_set_type(&desc->irq_data, flags); |
82736f4d | 659 | |
876dbd4c TG |
660 | switch (ret) { |
661 | case IRQ_SET_MASK_OK: | |
2cb62547 | 662 | case IRQ_SET_MASK_OK_DONE: |
876dbd4c TG |
663 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
664 | irqd_set(&desc->irq_data, flags); | |
665 | ||
666 | case IRQ_SET_MASK_OK_NOCOPY: | |
667 | flags = irqd_get_trigger_type(&desc->irq_data); | |
668 | irq_settings_set_trigger_mask(desc, flags); | |
669 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | |
670 | irq_settings_clr_level(desc); | |
671 | if (flags & IRQ_TYPE_LEVEL_MASK) { | |
672 | irq_settings_set_level(desc); | |
673 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
674 | } | |
46732475 | 675 | |
d4d5e089 | 676 | ret = 0; |
8fff39e0 | 677 | break; |
876dbd4c | 678 | default: |
97fd75b7 | 679 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", |
a1ff541a | 680 | flags, irq_desc_get_irq(desc), chip->irq_set_type); |
0c5d1eb7 | 681 | } |
d4d5e089 TG |
682 | if (unmask) |
683 | unmask_irq(desc); | |
82736f4d UKK |
684 | return ret; |
685 | } | |
686 | ||
293a7a0a TG |
687 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
688 | int irq_set_parent(int irq, int parent_irq) | |
689 | { | |
690 | unsigned long flags; | |
691 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | |
692 | ||
693 | if (!desc) | |
694 | return -EINVAL; | |
695 | ||
696 | desc->parent_irq = parent_irq; | |
697 | ||
698 | irq_put_desc_unlock(desc, flags); | |
699 | return 0; | |
700 | } | |
3118dac5 | 701 | EXPORT_SYMBOL_GPL(irq_set_parent); |
293a7a0a TG |
702 | #endif |
703 | ||
b25c340c TG |
704 | /* |
705 | * Default primary interrupt handler for threaded interrupts. Is | |
706 | * assigned as primary handler when request_threaded_irq is called | |
707 | * with handler == NULL. Useful for oneshot interrupts. | |
708 | */ | |
709 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
710 | { | |
711 | return IRQ_WAKE_THREAD; | |
712 | } | |
713 | ||
399b5da2 TG |
714 | /* |
715 | * Primary handler for nested threaded interrupts. Should never be | |
716 | * called. | |
717 | */ | |
718 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
719 | { | |
720 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
721 | return IRQ_NONE; | |
722 | } | |
723 | ||
2a1d3ab8 TG |
724 | static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) |
725 | { | |
726 | WARN(1, "Secondary action handler called for irq %d\n", irq); | |
727 | return IRQ_NONE; | |
728 | } | |
729 | ||
3aa551c9 TG |
730 | static int irq_wait_for_interrupt(struct irqaction *action) |
731 | { | |
550acb19 IY |
732 | set_current_state(TASK_INTERRUPTIBLE); |
733 | ||
3aa551c9 | 734 | while (!kthread_should_stop()) { |
f48fe81e TG |
735 | |
736 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
737 | &action->thread_flags)) { | |
3aa551c9 TG |
738 | __set_current_state(TASK_RUNNING); |
739 | return 0; | |
f48fe81e TG |
740 | } |
741 | schedule(); | |
550acb19 | 742 | set_current_state(TASK_INTERRUPTIBLE); |
3aa551c9 | 743 | } |
550acb19 | 744 | __set_current_state(TASK_RUNNING); |
3aa551c9 TG |
745 | return -1; |
746 | } | |
747 | ||
b25c340c TG |
748 | /* |
749 | * Oneshot interrupts keep the irq line masked until the threaded | |
750 | * handler finished. unmask if the interrupt has not been disabled and | |
751 | * is marked MASKED. | |
752 | */ | |
b5faba21 | 753 | static void irq_finalize_oneshot(struct irq_desc *desc, |
f3f79e38 | 754 | struct irqaction *action) |
b25c340c | 755 | { |
2a1d3ab8 TG |
756 | if (!(desc->istate & IRQS_ONESHOT) || |
757 | action->handler == irq_forced_secondary_handler) | |
b5faba21 | 758 | return; |
0b1adaa0 | 759 | again: |
3876ec9e | 760 | chip_bus_lock(desc); |
239007b8 | 761 | raw_spin_lock_irq(&desc->lock); |
0b1adaa0 TG |
762 | |
763 | /* | |
764 | * Implausible though it may be we need to protect us against | |
765 | * the following scenario: | |
766 | * | |
767 | * The thread is faster done than the hard interrupt handler | |
768 | * on the other CPU. If we unmask the irq line then the | |
769 | * interrupt can come in again and masks the line, leaves due | |
009b4c3b | 770 | * to IRQS_INPROGRESS and the irq line is masked forever. |
b5faba21 TG |
771 | * |
772 | * This also serializes the state of shared oneshot handlers | |
773 | * versus "desc->threads_onehsot |= action->thread_mask;" in | |
774 | * irq_wake_thread(). See the comment there which explains the | |
775 | * serialization. | |
0b1adaa0 | 776 | */ |
32f4125e | 777 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
0b1adaa0 | 778 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 779 | chip_bus_sync_unlock(desc); |
0b1adaa0 TG |
780 | cpu_relax(); |
781 | goto again; | |
782 | } | |
783 | ||
b5faba21 TG |
784 | /* |
785 | * Now check again, whether the thread should run. Otherwise | |
786 | * we would clear the threads_oneshot bit of this thread which | |
787 | * was just set. | |
788 | */ | |
f3f79e38 | 789 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
b5faba21 TG |
790 | goto out_unlock; |
791 | ||
792 | desc->threads_oneshot &= ~action->thread_mask; | |
793 | ||
32f4125e TG |
794 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
795 | irqd_irq_masked(&desc->irq_data)) | |
328a4978 | 796 | unmask_threaded_irq(desc); |
32f4125e | 797 | |
b5faba21 | 798 | out_unlock: |
239007b8 | 799 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 800 | chip_bus_sync_unlock(desc); |
b25c340c TG |
801 | } |
802 | ||
61f38261 | 803 | #ifdef CONFIG_SMP |
591d2fb0 | 804 | /* |
b04c644e | 805 | * Check whether we need to change the affinity of the interrupt thread. |
591d2fb0 TG |
806 | */ |
807 | static void | |
808 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
809 | { | |
810 | cpumask_var_t mask; | |
04aa530e | 811 | bool valid = true; |
591d2fb0 TG |
812 | |
813 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
814 | return; | |
815 | ||
816 | /* | |
817 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
818 | * try again next time | |
819 | */ | |
820 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
821 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
822 | return; | |
823 | } | |
824 | ||
239007b8 | 825 | raw_spin_lock_irq(&desc->lock); |
04aa530e TG |
826 | /* |
827 | * This code is triggered unconditionally. Check the affinity | |
828 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | |
829 | */ | |
d170fe7d | 830 | if (cpumask_available(desc->irq_common_data.affinity)) |
9df872fa | 831 | cpumask_copy(mask, desc->irq_common_data.affinity); |
04aa530e TG |
832 | else |
833 | valid = false; | |
239007b8 | 834 | raw_spin_unlock_irq(&desc->lock); |
591d2fb0 | 835 | |
04aa530e TG |
836 | if (valid) |
837 | set_cpus_allowed_ptr(current, mask); | |
591d2fb0 TG |
838 | free_cpumask_var(mask); |
839 | } | |
61f38261 BP |
840 | #else |
841 | static inline void | |
842 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
843 | #endif | |
591d2fb0 | 844 | |
8d32a307 TG |
845 | /* |
846 | * Interrupts which are not explicitely requested as threaded | |
847 | * interrupts rely on the implicit bh/preempt disable of the hard irq | |
848 | * context. So we need to disable bh here to avoid deadlocks and other | |
849 | * side effects. | |
850 | */ | |
3a43e05f | 851 | static irqreturn_t |
8d32a307 TG |
852 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
853 | { | |
3a43e05f SAS |
854 | irqreturn_t ret; |
855 | ||
8d32a307 | 856 | local_bh_disable(); |
3a43e05f | 857 | ret = action->thread_fn(action->irq, action->dev_id); |
f3f79e38 | 858 | irq_finalize_oneshot(desc, action); |
8d32a307 | 859 | local_bh_enable(); |
3a43e05f | 860 | return ret; |
8d32a307 TG |
861 | } |
862 | ||
863 | /* | |
f788e7bf | 864 | * Interrupts explicitly requested as threaded interrupts want to be |
8d32a307 TG |
865 | * preemtible - many of them need to sleep and wait for slow busses to |
866 | * complete. | |
867 | */ | |
3a43e05f SAS |
868 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
869 | struct irqaction *action) | |
8d32a307 | 870 | { |
3a43e05f SAS |
871 | irqreturn_t ret; |
872 | ||
873 | ret = action->thread_fn(action->irq, action->dev_id); | |
f3f79e38 | 874 | irq_finalize_oneshot(desc, action); |
3a43e05f | 875 | return ret; |
8d32a307 TG |
876 | } |
877 | ||
7140ea19 IY |
878 | static void wake_threads_waitq(struct irq_desc *desc) |
879 | { | |
c685689f | 880 | if (atomic_dec_and_test(&desc->threads_active)) |
7140ea19 IY |
881 | wake_up(&desc->wait_for_threads); |
882 | } | |
883 | ||
67d12145 | 884 | static void irq_thread_dtor(struct callback_head *unused) |
4d1d61a6 ON |
885 | { |
886 | struct task_struct *tsk = current; | |
887 | struct irq_desc *desc; | |
888 | struct irqaction *action; | |
889 | ||
890 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | |
891 | return; | |
892 | ||
893 | action = kthread_data(tsk); | |
894 | ||
fb21affa | 895 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
19af395d | 896 | tsk->comm, tsk->pid, action->irq); |
4d1d61a6 ON |
897 | |
898 | ||
899 | desc = irq_to_desc(action->irq); | |
900 | /* | |
901 | * If IRQTF_RUNTHREAD is set, we need to decrement | |
902 | * desc->threads_active and wake possible waiters. | |
903 | */ | |
904 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | |
905 | wake_threads_waitq(desc); | |
906 | ||
907 | /* Prevent a stale desc->threads_oneshot */ | |
908 | irq_finalize_oneshot(desc, action); | |
909 | } | |
910 | ||
2a1d3ab8 TG |
911 | static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) |
912 | { | |
913 | struct irqaction *secondary = action->secondary; | |
914 | ||
915 | if (WARN_ON_ONCE(!secondary)) | |
916 | return; | |
917 | ||
918 | raw_spin_lock_irq(&desc->lock); | |
919 | __irq_wake_thread(desc, secondary); | |
920 | raw_spin_unlock_irq(&desc->lock); | |
921 | } | |
922 | ||
3aa551c9 TG |
923 | /* |
924 | * Interrupt handler thread | |
925 | */ | |
926 | static int irq_thread(void *data) | |
927 | { | |
67d12145 | 928 | struct callback_head on_exit_work; |
3aa551c9 TG |
929 | struct irqaction *action = data; |
930 | struct irq_desc *desc = irq_to_desc(action->irq); | |
3a43e05f SAS |
931 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
932 | struct irqaction *action); | |
3aa551c9 | 933 | |
540b60e2 | 934 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
8d32a307 TG |
935 | &action->thread_flags)) |
936 | handler_fn = irq_forced_thread_fn; | |
937 | else | |
938 | handler_fn = irq_thread_fn; | |
939 | ||
41f9d29f | 940 | init_task_work(&on_exit_work, irq_thread_dtor); |
4d1d61a6 | 941 | task_work_add(current, &on_exit_work, false); |
3aa551c9 | 942 | |
f3de44ed SM |
943 | irq_thread_check_affinity(desc, action); |
944 | ||
3aa551c9 | 945 | while (!irq_wait_for_interrupt(action)) { |
7140ea19 | 946 | irqreturn_t action_ret; |
3aa551c9 | 947 | |
591d2fb0 TG |
948 | irq_thread_check_affinity(desc, action); |
949 | ||
7140ea19 | 950 | action_ret = handler_fn(desc, action); |
1e77d0a1 TG |
951 | if (action_ret == IRQ_HANDLED) |
952 | atomic_inc(&desc->threads_handled); | |
2a1d3ab8 TG |
953 | if (action_ret == IRQ_WAKE_THREAD) |
954 | irq_wake_secondary(desc, action); | |
3aa551c9 | 955 | |
7140ea19 | 956 | wake_threads_waitq(desc); |
3aa551c9 TG |
957 | } |
958 | ||
7140ea19 IY |
959 | /* |
960 | * This is the regular exit path. __free_irq() is stopping the | |
961 | * thread via kthread_stop() after calling | |
962 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the | |
e04268b0 TG |
963 | * oneshot mask bit can be set. We cannot verify that as we |
964 | * cannot touch the oneshot mask at this point anymore as | |
965 | * __setup_irq() might have given out currents thread_mask | |
966 | * again. | |
3aa551c9 | 967 | */ |
4d1d61a6 | 968 | task_work_cancel(current, irq_thread_dtor); |
3aa551c9 TG |
969 | return 0; |
970 | } | |
971 | ||
a92444c6 TG |
972 | /** |
973 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | |
974 | * @irq: Interrupt line | |
975 | * @dev_id: Device identity for which the thread should be woken | |
976 | * | |
977 | */ | |
978 | void irq_wake_thread(unsigned int irq, void *dev_id) | |
979 | { | |
980 | struct irq_desc *desc = irq_to_desc(irq); | |
981 | struct irqaction *action; | |
982 | unsigned long flags; | |
983 | ||
984 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
985 | return; | |
986 | ||
987 | raw_spin_lock_irqsave(&desc->lock, flags); | |
f944b5a7 | 988 | for_each_action_of_desc(desc, action) { |
a92444c6 TG |
989 | if (action->dev_id == dev_id) { |
990 | if (action->thread) | |
991 | __irq_wake_thread(desc, action); | |
992 | break; | |
993 | } | |
994 | } | |
995 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
996 | } | |
997 | EXPORT_SYMBOL_GPL(irq_wake_thread); | |
998 | ||
2a1d3ab8 | 999 | static int irq_setup_forced_threading(struct irqaction *new) |
8d32a307 TG |
1000 | { |
1001 | if (!force_irqthreads) | |
2a1d3ab8 | 1002 | return 0; |
8d32a307 | 1003 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
2a1d3ab8 | 1004 | return 0; |
8d32a307 TG |
1005 | |
1006 | new->flags |= IRQF_ONESHOT; | |
1007 | ||
2a1d3ab8 TG |
1008 | /* |
1009 | * Handle the case where we have a real primary handler and a | |
1010 | * thread handler. We force thread them as well by creating a | |
1011 | * secondary action. | |
1012 | */ | |
1013 | if (new->handler != irq_default_primary_handler && new->thread_fn) { | |
1014 | /* Allocate the secondary action */ | |
1015 | new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
1016 | if (!new->secondary) | |
1017 | return -ENOMEM; | |
1018 | new->secondary->handler = irq_forced_secondary_handler; | |
1019 | new->secondary->thread_fn = new->thread_fn; | |
1020 | new->secondary->dev_id = new->dev_id; | |
1021 | new->secondary->irq = new->irq; | |
1022 | new->secondary->name = new->name; | |
8d32a307 | 1023 | } |
2a1d3ab8 TG |
1024 | /* Deal with the primary handler */ |
1025 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | |
1026 | new->thread_fn = new->handler; | |
1027 | new->handler = irq_default_primary_handler; | |
1028 | return 0; | |
8d32a307 TG |
1029 | } |
1030 | ||
c1bacbae TG |
1031 | static int irq_request_resources(struct irq_desc *desc) |
1032 | { | |
1033 | struct irq_data *d = &desc->irq_data; | |
1034 | struct irq_chip *c = d->chip; | |
1035 | ||
1036 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | |
1037 | } | |
1038 | ||
1039 | static void irq_release_resources(struct irq_desc *desc) | |
1040 | { | |
1041 | struct irq_data *d = &desc->irq_data; | |
1042 | struct irq_chip *c = d->chip; | |
1043 | ||
1044 | if (c->irq_release_resources) | |
1045 | c->irq_release_resources(d); | |
1046 | } | |
1047 | ||
2a1d3ab8 TG |
1048 | static int |
1049 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | |
1050 | { | |
1051 | struct task_struct *t; | |
1052 | struct sched_param param = { | |
1053 | .sched_priority = MAX_USER_RT_PRIO/2, | |
1054 | }; | |
1055 | ||
1056 | if (!secondary) { | |
1057 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
1058 | new->name); | |
1059 | } else { | |
1060 | t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, | |
1061 | new->name); | |
1062 | param.sched_priority -= 1; | |
1063 | } | |
1064 | ||
1065 | if (IS_ERR(t)) | |
1066 | return PTR_ERR(t); | |
1067 | ||
1068 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); | |
1069 | ||
1070 | /* | |
1071 | * We keep the reference to the task struct even if | |
1072 | * the thread dies to avoid that the interrupt code | |
1073 | * references an already freed task_struct. | |
1074 | */ | |
1075 | get_task_struct(t); | |
1076 | new->thread = t; | |
1077 | /* | |
1078 | * Tell the thread to set its affinity. This is | |
1079 | * important for shared interrupt handlers as we do | |
1080 | * not invoke setup_affinity() for the secondary | |
1081 | * handlers as everything is already set up. Even for | |
1082 | * interrupts marked with IRQF_NO_BALANCE this is | |
1083 | * correct as we want the thread to move to the cpu(s) | |
1084 | * on which the requesting code placed the interrupt. | |
1085 | */ | |
1086 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | |
1087 | return 0; | |
1088 | } | |
1089 | ||
1da177e4 LT |
1090 | /* |
1091 | * Internal function to register an irqaction - typically used to | |
1092 | * allocate special interrupts that are part of the architecture. | |
1093 | */ | |
d3c60047 | 1094 | static int |
327ec569 | 1095 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 1096 | { |
f17c7545 | 1097 | struct irqaction *old, **old_ptr; |
b5faba21 | 1098 | unsigned long flags, thread_mask = 0; |
3b8249e7 | 1099 | int ret, nested, shared = 0; |
1da177e4 | 1100 | |
7d94f7ca | 1101 | if (!desc) |
c2b5a251 MW |
1102 | return -EINVAL; |
1103 | ||
6b8ff312 | 1104 | if (desc->irq_data.chip == &no_irq_chip) |
1da177e4 | 1105 | return -ENOSYS; |
b6873807 SAS |
1106 | if (!try_module_get(desc->owner)) |
1107 | return -ENODEV; | |
1da177e4 | 1108 | |
2a1d3ab8 TG |
1109 | new->irq = irq; |
1110 | ||
4b357dae JH |
1111 | /* |
1112 | * If the trigger type is not specified by the caller, | |
1113 | * then use the default for this interrupt. | |
1114 | */ | |
1115 | if (!(new->flags & IRQF_TRIGGER_MASK)) | |
1116 | new->flags |= irqd_get_trigger_type(&desc->irq_data); | |
1117 | ||
3aa551c9 | 1118 | /* |
399b5da2 TG |
1119 | * Check whether the interrupt nests into another interrupt |
1120 | * thread. | |
1121 | */ | |
1ccb4e61 | 1122 | nested = irq_settings_is_nested_thread(desc); |
399b5da2 | 1123 | if (nested) { |
b6873807 SAS |
1124 | if (!new->thread_fn) { |
1125 | ret = -EINVAL; | |
1126 | goto out_mput; | |
1127 | } | |
399b5da2 TG |
1128 | /* |
1129 | * Replace the primary handler which was provided from | |
1130 | * the driver for non nested interrupt handling by the | |
1131 | * dummy function which warns when called. | |
1132 | */ | |
1133 | new->handler = irq_nested_primary_handler; | |
8d32a307 | 1134 | } else { |
2a1d3ab8 TG |
1135 | if (irq_settings_can_thread(desc)) { |
1136 | ret = irq_setup_forced_threading(new); | |
1137 | if (ret) | |
1138 | goto out_mput; | |
1139 | } | |
399b5da2 TG |
1140 | } |
1141 | ||
3aa551c9 | 1142 | /* |
399b5da2 TG |
1143 | * Create a handler thread when a thread function is supplied |
1144 | * and the interrupt does not nest into another interrupt | |
1145 | * thread. | |
3aa551c9 | 1146 | */ |
399b5da2 | 1147 | if (new->thread_fn && !nested) { |
2a1d3ab8 TG |
1148 | ret = setup_irq_thread(new, irq, false); |
1149 | if (ret) | |
b6873807 | 1150 | goto out_mput; |
2a1d3ab8 TG |
1151 | if (new->secondary) { |
1152 | ret = setup_irq_thread(new->secondary, irq, true); | |
1153 | if (ret) | |
1154 | goto out_thread; | |
b6873807 | 1155 | } |
3aa551c9 TG |
1156 | } |
1157 | ||
dc9b229a TG |
1158 | /* |
1159 | * Drivers are often written to work w/o knowledge about the | |
1160 | * underlying irq chip implementation, so a request for a | |
1161 | * threaded irq without a primary hard irq context handler | |
1162 | * requires the ONESHOT flag to be set. Some irq chips like | |
1163 | * MSI based interrupts are per se one shot safe. Check the | |
1164 | * chip flags, so we can avoid the unmask dance at the end of | |
1165 | * the threaded handler for those. | |
1166 | */ | |
1167 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) | |
1168 | new->flags &= ~IRQF_ONESHOT; | |
1169 | ||
1da177e4 LT |
1170 | /* |
1171 | * The following block of code has to be executed atomically | |
1172 | */ | |
239007b8 | 1173 | raw_spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
1174 | old_ptr = &desc->action; |
1175 | old = *old_ptr; | |
06fcb0c6 | 1176 | if (old) { |
e76de9f8 TG |
1177 | /* |
1178 | * Can't share interrupts unless both agree to and are | |
1179 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 1180 | * fields must have IRQF_SHARED set and the bits which |
9d591edd TG |
1181 | * set the trigger type must match. Also all must |
1182 | * agree on ONESHOT. | |
e76de9f8 | 1183 | */ |
382bd4de HG |
1184 | unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data); |
1185 | ||
3cca53b0 | 1186 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
382bd4de | 1187 | (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || |
f5d89470 | 1188 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) |
f5163427 DS |
1189 | goto mismatch; |
1190 | ||
f5163427 | 1191 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
1192 | if ((old->flags & IRQF_PERCPU) != |
1193 | (new->flags & IRQF_PERCPU)) | |
f5163427 | 1194 | goto mismatch; |
1da177e4 LT |
1195 | |
1196 | /* add new interrupt at end of irq queue */ | |
1197 | do { | |
52abb700 TG |
1198 | /* |
1199 | * Or all existing action->thread_mask bits, | |
1200 | * so we can find the next zero bit for this | |
1201 | * new action. | |
1202 | */ | |
b5faba21 | 1203 | thread_mask |= old->thread_mask; |
f17c7545 IM |
1204 | old_ptr = &old->next; |
1205 | old = *old_ptr; | |
1da177e4 LT |
1206 | } while (old); |
1207 | shared = 1; | |
1208 | } | |
1209 | ||
b5faba21 | 1210 | /* |
52abb700 TG |
1211 | * Setup the thread mask for this irqaction for ONESHOT. For |
1212 | * !ONESHOT irqs the thread mask is 0 so we can avoid a | |
1213 | * conditional in irq_wake_thread(). | |
b5faba21 | 1214 | */ |
52abb700 TG |
1215 | if (new->flags & IRQF_ONESHOT) { |
1216 | /* | |
1217 | * Unlikely to have 32 resp 64 irqs sharing one line, | |
1218 | * but who knows. | |
1219 | */ | |
1220 | if (thread_mask == ~0UL) { | |
1221 | ret = -EBUSY; | |
cba4235e | 1222 | goto out_unlock; |
52abb700 TG |
1223 | } |
1224 | /* | |
1225 | * The thread_mask for the action is or'ed to | |
1226 | * desc->thread_active to indicate that the | |
1227 | * IRQF_ONESHOT thread handler has been woken, but not | |
1228 | * yet finished. The bit is cleared when a thread | |
1229 | * completes. When all threads of a shared interrupt | |
1230 | * line have completed desc->threads_active becomes | |
1231 | * zero and the interrupt line is unmasked. See | |
1232 | * handle.c:irq_wake_thread() for further information. | |
1233 | * | |
1234 | * If no thread is woken by primary (hard irq context) | |
1235 | * interrupt handlers, then desc->threads_active is | |
1236 | * also checked for zero to unmask the irq line in the | |
1237 | * affected hard irq flow handlers | |
1238 | * (handle_[fasteoi|level]_irq). | |
1239 | * | |
1240 | * The new action gets the first zero bit of | |
1241 | * thread_mask assigned. See the loop above which or's | |
1242 | * all existing action->thread_mask bits. | |
1243 | */ | |
1244 | new->thread_mask = 1 << ffz(thread_mask); | |
1c6c6952 | 1245 | |
dc9b229a TG |
1246 | } else if (new->handler == irq_default_primary_handler && |
1247 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { | |
1c6c6952 TG |
1248 | /* |
1249 | * The interrupt was requested with handler = NULL, so | |
1250 | * we use the default primary handler for it. But it | |
1251 | * does not have the oneshot flag set. In combination | |
1252 | * with level interrupts this is deadly, because the | |
1253 | * default primary handler just wakes the thread, then | |
1254 | * the irq lines is reenabled, but the device still | |
1255 | * has the level irq asserted. Rinse and repeat.... | |
1256 | * | |
1257 | * While this works for edge type interrupts, we play | |
1258 | * it safe and reject unconditionally because we can't | |
1259 | * say for sure which type this interrupt really | |
1260 | * has. The type flags are unreliable as the | |
1261 | * underlying chip implementation can override them. | |
1262 | */ | |
97fd75b7 | 1263 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
1c6c6952 TG |
1264 | irq); |
1265 | ret = -EINVAL; | |
cba4235e | 1266 | goto out_unlock; |
b5faba21 | 1267 | } |
b5faba21 | 1268 | |
1da177e4 | 1269 | if (!shared) { |
c1bacbae TG |
1270 | ret = irq_request_resources(desc); |
1271 | if (ret) { | |
1272 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | |
1273 | new->name, irq, desc->irq_data.chip->name); | |
cba4235e | 1274 | goto out_unlock; |
c1bacbae TG |
1275 | } |
1276 | ||
3aa551c9 TG |
1277 | init_waitqueue_head(&desc->wait_for_threads); |
1278 | ||
e76de9f8 | 1279 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 1280 | if (new->flags & IRQF_TRIGGER_MASK) { |
a1ff541a JL |
1281 | ret = __irq_set_trigger(desc, |
1282 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 1283 | |
fa07ab72 HK |
1284 | if (ret) { |
1285 | irq_release_resources(desc); | |
cba4235e | 1286 | goto out_unlock; |
fa07ab72 | 1287 | } |
091738a2 | 1288 | } |
6a6de9ef | 1289 | |
009b4c3b | 1290 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
32f4125e TG |
1291 | IRQS_ONESHOT | IRQS_WAITING); |
1292 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
94d39e1f | 1293 | |
a005677b TG |
1294 | if (new->flags & IRQF_PERCPU) { |
1295 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
1296 | irq_settings_set_per_cpu(desc); | |
1297 | } | |
6a58fb3b | 1298 | |
b25c340c | 1299 | if (new->flags & IRQF_ONESHOT) |
3d67baec | 1300 | desc->istate |= IRQS_ONESHOT; |
b25c340c | 1301 | |
2e051552 TG |
1302 | /* Exclude IRQ from balancing if requested */ |
1303 | if (new->flags & IRQF_NOBALANCING) { | |
1304 | irq_settings_set_no_balancing(desc); | |
1305 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
1306 | } | |
1307 | ||
04c848d3 | 1308 | if (irq_settings_can_autoenable(desc)) { |
4cde9c6b | 1309 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
04c848d3 TG |
1310 | } else { |
1311 | /* | |
1312 | * Shared interrupts do not go well with disabling | |
1313 | * auto enable. The sharing interrupt might request | |
1314 | * it while it's still disabled and then wait for | |
1315 | * interrupts forever. | |
1316 | */ | |
1317 | WARN_ON_ONCE(new->flags & IRQF_SHARED); | |
e76de9f8 TG |
1318 | /* Undo nested disables: */ |
1319 | desc->depth = 1; | |
04c848d3 | 1320 | } |
18404756 | 1321 | |
876dbd4c TG |
1322 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1323 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | |
7ee7e87d | 1324 | unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); |
876dbd4c TG |
1325 | |
1326 | if (nmsk != omsk) | |
1327 | /* hope the handler works with current trigger mode */ | |
a395d6a7 | 1328 | pr_warn("irq %d uses trigger mode %u; requested %u\n", |
7ee7e87d | 1329 | irq, omsk, nmsk); |
1da177e4 | 1330 | } |
82736f4d | 1331 | |
f17c7545 | 1332 | *old_ptr = new; |
82736f4d | 1333 | |
cab303be TG |
1334 | irq_pm_install_action(desc, new); |
1335 | ||
8528b0f1 LT |
1336 | /* Reset broken irq detection when installing new handler */ |
1337 | desc->irq_count = 0; | |
1338 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
1339 | |
1340 | /* | |
1341 | * Check whether we disabled the irq via the spurious handler | |
1342 | * before. Reenable it and give it another chance. | |
1343 | */ | |
7acdd53e TG |
1344 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1345 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | |
79ff1cda | 1346 | __enable_irq(desc); |
1adb0850 TG |
1347 | } |
1348 | ||
239007b8 | 1349 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 1350 | |
b2d3d61a DL |
1351 | irq_setup_timings(desc, new); |
1352 | ||
69ab8494 TG |
1353 | /* |
1354 | * Strictly no need to wake it up, but hung_task complains | |
1355 | * when no hard interrupt wakes the thread up. | |
1356 | */ | |
1357 | if (new->thread) | |
1358 | wake_up_process(new->thread); | |
2a1d3ab8 TG |
1359 | if (new->secondary) |
1360 | wake_up_process(new->secondary->thread); | |
69ab8494 | 1361 | |
2c6927a3 | 1362 | register_irq_proc(irq, desc); |
087cdfb6 | 1363 | irq_add_debugfs_entry(irq, desc); |
1da177e4 LT |
1364 | new->dir = NULL; |
1365 | register_handler_proc(irq, new); | |
1da177e4 | 1366 | return 0; |
f5163427 DS |
1367 | |
1368 | mismatch: | |
3cca53b0 | 1369 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
97fd75b7 | 1370 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
f5d89470 TG |
1371 | irq, new->flags, new->name, old->flags, old->name); |
1372 | #ifdef CONFIG_DEBUG_SHIRQ | |
13e87ec6 | 1373 | dump_stack(); |
3f050447 | 1374 | #endif |
f5d89470 | 1375 | } |
3aa551c9 TG |
1376 | ret = -EBUSY; |
1377 | ||
cba4235e | 1378 | out_unlock: |
1c389795 | 1379 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3b8249e7 | 1380 | |
3aa551c9 | 1381 | out_thread: |
3aa551c9 TG |
1382 | if (new->thread) { |
1383 | struct task_struct *t = new->thread; | |
1384 | ||
1385 | new->thread = NULL; | |
05d74efa | 1386 | kthread_stop(t); |
3aa551c9 TG |
1387 | put_task_struct(t); |
1388 | } | |
2a1d3ab8 TG |
1389 | if (new->secondary && new->secondary->thread) { |
1390 | struct task_struct *t = new->secondary->thread; | |
1391 | ||
1392 | new->secondary->thread = NULL; | |
1393 | kthread_stop(t); | |
1394 | put_task_struct(t); | |
1395 | } | |
b6873807 SAS |
1396 | out_mput: |
1397 | module_put(desc->owner); | |
3aa551c9 | 1398 | return ret; |
1da177e4 LT |
1399 | } |
1400 | ||
d3c60047 TG |
1401 | /** |
1402 | * setup_irq - setup an interrupt | |
1403 | * @irq: Interrupt line to setup | |
1404 | * @act: irqaction for the interrupt | |
1405 | * | |
1406 | * Used to statically setup interrupts in the early boot process. | |
1407 | */ | |
1408 | int setup_irq(unsigned int irq, struct irqaction *act) | |
1409 | { | |
986c011d | 1410 | int retval; |
d3c60047 TG |
1411 | struct irq_desc *desc = irq_to_desc(irq); |
1412 | ||
9b5d585d | 1413 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
31d9d9b6 | 1414 | return -EINVAL; |
be45beb2 JH |
1415 | |
1416 | retval = irq_chip_pm_get(&desc->irq_data); | |
1417 | if (retval < 0) | |
1418 | return retval; | |
1419 | ||
986c011d DD |
1420 | chip_bus_lock(desc); |
1421 | retval = __setup_irq(irq, desc, act); | |
1422 | chip_bus_sync_unlock(desc); | |
1423 | ||
be45beb2 JH |
1424 | if (retval) |
1425 | irq_chip_pm_put(&desc->irq_data); | |
1426 | ||
986c011d | 1427 | return retval; |
d3c60047 | 1428 | } |
eb53b4e8 | 1429 | EXPORT_SYMBOL_GPL(setup_irq); |
d3c60047 | 1430 | |
31d9d9b6 | 1431 | /* |
cbf94f06 MD |
1432 | * Internal function to unregister an irqaction - used to free |
1433 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 1434 | */ |
cbf94f06 | 1435 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1da177e4 | 1436 | { |
d3c60047 | 1437 | struct irq_desc *desc = irq_to_desc(irq); |
f17c7545 | 1438 | struct irqaction *action, **action_ptr; |
1da177e4 LT |
1439 | unsigned long flags; |
1440 | ||
ae88a23b | 1441 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 1442 | |
7d94f7ca | 1443 | if (!desc) |
f21cfb25 | 1444 | return NULL; |
1da177e4 | 1445 | |
abc7e40c | 1446 | chip_bus_lock(desc); |
239007b8 | 1447 | raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
1448 | |
1449 | /* | |
1450 | * There can be multiple actions per IRQ descriptor, find the right | |
1451 | * one based on the dev_id: | |
1452 | */ | |
f17c7545 | 1453 | action_ptr = &desc->action; |
1da177e4 | 1454 | for (;;) { |
f17c7545 | 1455 | action = *action_ptr; |
1da177e4 | 1456 | |
ae88a23b IM |
1457 | if (!action) { |
1458 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
239007b8 | 1459 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
abc7e40c | 1460 | chip_bus_sync_unlock(desc); |
f21cfb25 | 1461 | return NULL; |
ae88a23b | 1462 | } |
1da177e4 | 1463 | |
8316e381 IM |
1464 | if (action->dev_id == dev_id) |
1465 | break; | |
f17c7545 | 1466 | action_ptr = &action->next; |
ae88a23b | 1467 | } |
dbce706e | 1468 | |
ae88a23b | 1469 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 1470 | *action_ptr = action->next; |
ae88a23b | 1471 | |
cab303be TG |
1472 | irq_pm_remove_action(desc, action); |
1473 | ||
ae88a23b | 1474 | /* If this was the last handler, shut down the IRQ line: */ |
c1bacbae | 1475 | if (!desc->action) { |
e9849777 | 1476 | irq_settings_clr_disable_unlazy(desc); |
46999238 | 1477 | irq_shutdown(desc); |
c1bacbae | 1478 | irq_release_resources(desc); |
b2d3d61a | 1479 | irq_remove_timings(desc); |
c1bacbae | 1480 | } |
3aa551c9 | 1481 | |
e7a297b0 PWJ |
1482 | #ifdef CONFIG_SMP |
1483 | /* make sure affinity_hint is cleaned up */ | |
1484 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
1485 | desc->affinity_hint = NULL; | |
1486 | #endif | |
1487 | ||
239007b8 | 1488 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
abc7e40c | 1489 | chip_bus_sync_unlock(desc); |
ae88a23b IM |
1490 | |
1491 | unregister_handler_proc(irq, action); | |
1492 | ||
1493 | /* Make sure it's not being used on another CPU: */ | |
1494 | synchronize_irq(irq); | |
1da177e4 | 1495 | |
70edcd77 | 1496 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
1497 | /* |
1498 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
1499 | * event to happen even now it's being freed, so let's make sure that | |
1500 | * is so by doing an extra call to the handler .... | |
1501 | * | |
1502 | * ( We do this after actually deregistering it, to make sure that a | |
1503 | * 'real' IRQ doesn't run in * parallel with our fake. ) | |
1504 | */ | |
1505 | if (action->flags & IRQF_SHARED) { | |
1506 | local_irq_save(flags); | |
1507 | action->handler(irq, dev_id); | |
1508 | local_irq_restore(flags); | |
1da177e4 | 1509 | } |
ae88a23b | 1510 | #endif |
2d860ad7 LT |
1511 | |
1512 | if (action->thread) { | |
05d74efa | 1513 | kthread_stop(action->thread); |
2d860ad7 | 1514 | put_task_struct(action->thread); |
2a1d3ab8 TG |
1515 | if (action->secondary && action->secondary->thread) { |
1516 | kthread_stop(action->secondary->thread); | |
1517 | put_task_struct(action->secondary->thread); | |
1518 | } | |
2d860ad7 LT |
1519 | } |
1520 | ||
be45beb2 | 1521 | irq_chip_pm_put(&desc->irq_data); |
b6873807 | 1522 | module_put(desc->owner); |
2a1d3ab8 | 1523 | kfree(action->secondary); |
f21cfb25 MD |
1524 | return action; |
1525 | } | |
1526 | ||
cbf94f06 MD |
1527 | /** |
1528 | * remove_irq - free an interrupt | |
1529 | * @irq: Interrupt line to free | |
1530 | * @act: irqaction for the interrupt | |
1531 | * | |
1532 | * Used to remove interrupts statically setup by the early boot process. | |
1533 | */ | |
1534 | void remove_irq(unsigned int irq, struct irqaction *act) | |
1535 | { | |
31d9d9b6 MZ |
1536 | struct irq_desc *desc = irq_to_desc(irq); |
1537 | ||
1538 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
a7e60e55 | 1539 | __free_irq(irq, act->dev_id); |
cbf94f06 | 1540 | } |
eb53b4e8 | 1541 | EXPORT_SYMBOL_GPL(remove_irq); |
cbf94f06 | 1542 | |
f21cfb25 MD |
1543 | /** |
1544 | * free_irq - free an interrupt allocated with request_irq | |
1545 | * @irq: Interrupt line to free | |
1546 | * @dev_id: Device identity to free | |
1547 | * | |
1548 | * Remove an interrupt handler. The handler is removed and if the | |
1549 | * interrupt line is no longer in use by any driver it is disabled. | |
1550 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
1551 | * on the card it drives before calling this function. The function | |
1552 | * does not return until any executing interrupts for this IRQ | |
1553 | * have completed. | |
1554 | * | |
1555 | * This function must not be called from interrupt context. | |
25ce4be7 CH |
1556 | * |
1557 | * Returns the devname argument passed to request_irq. | |
f21cfb25 | 1558 | */ |
25ce4be7 | 1559 | const void *free_irq(unsigned int irq, void *dev_id) |
f21cfb25 | 1560 | { |
70aedd24 | 1561 | struct irq_desc *desc = irq_to_desc(irq); |
25ce4be7 CH |
1562 | struct irqaction *action; |
1563 | const char *devname; | |
70aedd24 | 1564 | |
31d9d9b6 | 1565 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
25ce4be7 | 1566 | return NULL; |
70aedd24 | 1567 | |
cd7eab44 BH |
1568 | #ifdef CONFIG_SMP |
1569 | if (WARN_ON(desc->affinity_notify)) | |
1570 | desc->affinity_notify = NULL; | |
1571 | #endif | |
1572 | ||
25ce4be7 CH |
1573 | action = __free_irq(irq, dev_id); |
1574 | devname = action->name; | |
1575 | kfree(action); | |
1576 | return devname; | |
1da177e4 | 1577 | } |
1da177e4 LT |
1578 | EXPORT_SYMBOL(free_irq); |
1579 | ||
1580 | /** | |
3aa551c9 | 1581 | * request_threaded_irq - allocate an interrupt line |
1da177e4 | 1582 | * @irq: Interrupt line to allocate |
3aa551c9 TG |
1583 | * @handler: Function to be called when the IRQ occurs. |
1584 | * Primary handler for threaded interrupts | |
b25c340c TG |
1585 | * If NULL and thread_fn != NULL the default |
1586 | * primary handler is installed | |
f48fe81e TG |
1587 | * @thread_fn: Function called from the irq handler thread |
1588 | * If NULL, no irq thread is created | |
1da177e4 LT |
1589 | * @irqflags: Interrupt type flags |
1590 | * @devname: An ascii name for the claiming device | |
1591 | * @dev_id: A cookie passed back to the handler function | |
1592 | * | |
1593 | * This call allocates interrupt resources and enables the | |
1594 | * interrupt line and IRQ handling. From the point this | |
1595 | * call is made your handler function may be invoked. Since | |
1596 | * your handler function must clear any interrupt the board | |
1597 | * raises, you must take care both to initialise your hardware | |
1598 | * and to set up the interrupt handler in the right order. | |
1599 | * | |
3aa551c9 | 1600 | * If you want to set up a threaded irq handler for your device |
6d21af4f | 1601 | * then you need to supply @handler and @thread_fn. @handler is |
3aa551c9 TG |
1602 | * still called in hard interrupt context and has to check |
1603 | * whether the interrupt originates from the device. If yes it | |
1604 | * needs to disable the interrupt on the device and return | |
39a2eddb | 1605 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
3aa551c9 TG |
1606 | * @thread_fn. This split handler design is necessary to support |
1607 | * shared interrupts. | |
1608 | * | |
1da177e4 LT |
1609 | * Dev_id must be globally unique. Normally the address of the |
1610 | * device data structure is used as the cookie. Since the handler | |
1611 | * receives this value it makes sense to use it. | |
1612 | * | |
1613 | * If your interrupt is shared you must pass a non NULL dev_id | |
1614 | * as this is required when freeing the interrupt. | |
1615 | * | |
1616 | * Flags: | |
1617 | * | |
3cca53b0 | 1618 | * IRQF_SHARED Interrupt is shared |
0c5d1eb7 | 1619 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4 LT |
1620 | * |
1621 | */ | |
3aa551c9 TG |
1622 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1623 | irq_handler_t thread_fn, unsigned long irqflags, | |
1624 | const char *devname, void *dev_id) | |
1da177e4 | 1625 | { |
06fcb0c6 | 1626 | struct irqaction *action; |
08678b08 | 1627 | struct irq_desc *desc; |
d3c60047 | 1628 | int retval; |
1da177e4 | 1629 | |
e237a551 CF |
1630 | if (irq == IRQ_NOTCONNECTED) |
1631 | return -ENOTCONN; | |
1632 | ||
1da177e4 LT |
1633 | /* |
1634 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
1635 | * otherwise we'll have trouble later trying to figure out | |
1636 | * which interrupt is which (messes up the interrupt freeing | |
1637 | * logic etc). | |
17f48034 RW |
1638 | * |
1639 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and | |
1640 | * it cannot be set along with IRQF_NO_SUSPEND. | |
1da177e4 | 1641 | */ |
17f48034 RW |
1642 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
1643 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || | |
1644 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) | |
1da177e4 | 1645 | return -EINVAL; |
7d94f7ca | 1646 | |
cb5bc832 | 1647 | desc = irq_to_desc(irq); |
7d94f7ca | 1648 | if (!desc) |
1da177e4 | 1649 | return -EINVAL; |
7d94f7ca | 1650 | |
31d9d9b6 MZ |
1651 | if (!irq_settings_can_request(desc) || |
1652 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
6550c775 | 1653 | return -EINVAL; |
b25c340c TG |
1654 | |
1655 | if (!handler) { | |
1656 | if (!thread_fn) | |
1657 | return -EINVAL; | |
1658 | handler = irq_default_primary_handler; | |
1659 | } | |
1da177e4 | 1660 | |
45535732 | 1661 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
1662 | if (!action) |
1663 | return -ENOMEM; | |
1664 | ||
1665 | action->handler = handler; | |
3aa551c9 | 1666 | action->thread_fn = thread_fn; |
1da177e4 | 1667 | action->flags = irqflags; |
1da177e4 | 1668 | action->name = devname; |
1da177e4 LT |
1669 | action->dev_id = dev_id; |
1670 | ||
be45beb2 | 1671 | retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c SL |
1672 | if (retval < 0) { |
1673 | kfree(action); | |
be45beb2 | 1674 | return retval; |
4396f46c | 1675 | } |
be45beb2 | 1676 | |
3876ec9e | 1677 | chip_bus_lock(desc); |
d3c60047 | 1678 | retval = __setup_irq(irq, desc, action); |
3876ec9e | 1679 | chip_bus_sync_unlock(desc); |
70aedd24 | 1680 | |
2a1d3ab8 | 1681 | if (retval) { |
be45beb2 | 1682 | irq_chip_pm_put(&desc->irq_data); |
2a1d3ab8 | 1683 | kfree(action->secondary); |
377bf1e4 | 1684 | kfree(action); |
2a1d3ab8 | 1685 | } |
377bf1e4 | 1686 | |
6d83f94d | 1687 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
6ce51c43 | 1688 | if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b8 DW |
1689 | /* |
1690 | * It's a shared IRQ -- the driver ought to be prepared for it | |
1691 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
1692 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1693 | * run in parallel with our fake. | |
a304e1b8 | 1694 | */ |
59845b1f | 1695 | unsigned long flags; |
a304e1b8 | 1696 | |
377bf1e4 | 1697 | disable_irq(irq); |
59845b1f | 1698 | local_irq_save(flags); |
377bf1e4 | 1699 | |
59845b1f | 1700 | handler(irq, dev_id); |
377bf1e4 | 1701 | |
59845b1f | 1702 | local_irq_restore(flags); |
377bf1e4 | 1703 | enable_irq(irq); |
a304e1b8 DW |
1704 | } |
1705 | #endif | |
1da177e4 LT |
1706 | return retval; |
1707 | } | |
3aa551c9 | 1708 | EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d MZ |
1709 | |
1710 | /** | |
1711 | * request_any_context_irq - allocate an interrupt line | |
1712 | * @irq: Interrupt line to allocate | |
1713 | * @handler: Function to be called when the IRQ occurs. | |
1714 | * Threaded handler for threaded interrupts. | |
1715 | * @flags: Interrupt type flags | |
1716 | * @name: An ascii name for the claiming device | |
1717 | * @dev_id: A cookie passed back to the handler function | |
1718 | * | |
1719 | * This call allocates interrupt resources and enables the | |
1720 | * interrupt line and IRQ handling. It selects either a | |
1721 | * hardirq or threaded handling method depending on the | |
1722 | * context. | |
1723 | * | |
1724 | * On failure, it returns a negative value. On success, | |
1725 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
1726 | */ | |
1727 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
1728 | unsigned long flags, const char *name, void *dev_id) | |
1729 | { | |
e237a551 | 1730 | struct irq_desc *desc; |
ae731f8d MZ |
1731 | int ret; |
1732 | ||
e237a551 CF |
1733 | if (irq == IRQ_NOTCONNECTED) |
1734 | return -ENOTCONN; | |
1735 | ||
1736 | desc = irq_to_desc(irq); | |
ae731f8d MZ |
1737 | if (!desc) |
1738 | return -EINVAL; | |
1739 | ||
1ccb4e61 | 1740 | if (irq_settings_is_nested_thread(desc)) { |
ae731f8d MZ |
1741 | ret = request_threaded_irq(irq, NULL, handler, |
1742 | flags, name, dev_id); | |
1743 | return !ret ? IRQC_IS_NESTED : ret; | |
1744 | } | |
1745 | ||
1746 | ret = request_irq(irq, handler, flags, name, dev_id); | |
1747 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
1748 | } | |
1749 | EXPORT_SYMBOL_GPL(request_any_context_irq); | |
31d9d9b6 | 1750 | |
1e7c5fd2 | 1751 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
31d9d9b6 MZ |
1752 | { |
1753 | unsigned int cpu = smp_processor_id(); | |
1754 | unsigned long flags; | |
1755 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
1756 | ||
1757 | if (!desc) | |
1758 | return; | |
1759 | ||
f35ad083 MZ |
1760 | /* |
1761 | * If the trigger type is not specified by the caller, then | |
1762 | * use the default for this interrupt. | |
1763 | */ | |
1e7c5fd2 | 1764 | type &= IRQ_TYPE_SENSE_MASK; |
f35ad083 MZ |
1765 | if (type == IRQ_TYPE_NONE) |
1766 | type = irqd_get_trigger_type(&desc->irq_data); | |
1767 | ||
1e7c5fd2 MZ |
1768 | if (type != IRQ_TYPE_NONE) { |
1769 | int ret; | |
1770 | ||
a1ff541a | 1771 | ret = __irq_set_trigger(desc, type); |
1e7c5fd2 MZ |
1772 | |
1773 | if (ret) { | |
32cffdde | 1774 | WARN(1, "failed to set type for IRQ%d\n", irq); |
1e7c5fd2 MZ |
1775 | goto out; |
1776 | } | |
1777 | } | |
1778 | ||
31d9d9b6 | 1779 | irq_percpu_enable(desc, cpu); |
1e7c5fd2 | 1780 | out: |
31d9d9b6 MZ |
1781 | irq_put_desc_unlock(desc, flags); |
1782 | } | |
36a5df85 | 1783 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
31d9d9b6 | 1784 | |
f0cb3220 TP |
1785 | /** |
1786 | * irq_percpu_is_enabled - Check whether the per cpu irq is enabled | |
1787 | * @irq: Linux irq number to check for | |
1788 | * | |
1789 | * Must be called from a non migratable context. Returns the enable | |
1790 | * state of a per cpu interrupt on the current cpu. | |
1791 | */ | |
1792 | bool irq_percpu_is_enabled(unsigned int irq) | |
1793 | { | |
1794 | unsigned int cpu = smp_processor_id(); | |
1795 | struct irq_desc *desc; | |
1796 | unsigned long flags; | |
1797 | bool is_enabled; | |
1798 | ||
1799 | desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
1800 | if (!desc) | |
1801 | return false; | |
1802 | ||
1803 | is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); | |
1804 | irq_put_desc_unlock(desc, flags); | |
1805 | ||
1806 | return is_enabled; | |
1807 | } | |
1808 | EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); | |
1809 | ||
31d9d9b6 MZ |
1810 | void disable_percpu_irq(unsigned int irq) |
1811 | { | |
1812 | unsigned int cpu = smp_processor_id(); | |
1813 | unsigned long flags; | |
1814 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
1815 | ||
1816 | if (!desc) | |
1817 | return; | |
1818 | ||
1819 | irq_percpu_disable(desc, cpu); | |
1820 | irq_put_desc_unlock(desc, flags); | |
1821 | } | |
36a5df85 | 1822 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
31d9d9b6 MZ |
1823 | |
1824 | /* | |
1825 | * Internal function to unregister a percpu irqaction. | |
1826 | */ | |
1827 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
1828 | { | |
1829 | struct irq_desc *desc = irq_to_desc(irq); | |
1830 | struct irqaction *action; | |
1831 | unsigned long flags; | |
1832 | ||
1833 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | |
1834 | ||
1835 | if (!desc) | |
1836 | return NULL; | |
1837 | ||
1838 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1839 | ||
1840 | action = desc->action; | |
1841 | if (!action || action->percpu_dev_id != dev_id) { | |
1842 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
1843 | goto bad; | |
1844 | } | |
1845 | ||
1846 | if (!cpumask_empty(desc->percpu_enabled)) { | |
1847 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | |
1848 | irq, cpumask_first(desc->percpu_enabled)); | |
1849 | goto bad; | |
1850 | } | |
1851 | ||
1852 | /* Found it - now remove it from the list of entries: */ | |
1853 | desc->action = NULL; | |
1854 | ||
1855 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1856 | ||
1857 | unregister_handler_proc(irq, action); | |
1858 | ||
be45beb2 | 1859 | irq_chip_pm_put(&desc->irq_data); |
31d9d9b6 MZ |
1860 | module_put(desc->owner); |
1861 | return action; | |
1862 | ||
1863 | bad: | |
1864 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1865 | return NULL; | |
1866 | } | |
1867 | ||
1868 | /** | |
1869 | * remove_percpu_irq - free a per-cpu interrupt | |
1870 | * @irq: Interrupt line to free | |
1871 | * @act: irqaction for the interrupt | |
1872 | * | |
1873 | * Used to remove interrupts statically setup by the early boot process. | |
1874 | */ | |
1875 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) | |
1876 | { | |
1877 | struct irq_desc *desc = irq_to_desc(irq); | |
1878 | ||
1879 | if (desc && irq_settings_is_per_cpu_devid(desc)) | |
1880 | __free_percpu_irq(irq, act->percpu_dev_id); | |
1881 | } | |
1882 | ||
1883 | /** | |
1884 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq | |
1885 | * @irq: Interrupt line to free | |
1886 | * @dev_id: Device identity to free | |
1887 | * | |
1888 | * Remove a percpu interrupt handler. The handler is removed, but | |
1889 | * the interrupt line is not disabled. This must be done on each | |
1890 | * CPU before calling this function. The function does not return | |
1891 | * until any executing interrupts for this IRQ have completed. | |
1892 | * | |
1893 | * This function must not be called from interrupt context. | |
1894 | */ | |
1895 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
1896 | { | |
1897 | struct irq_desc *desc = irq_to_desc(irq); | |
1898 | ||
1899 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
1900 | return; | |
1901 | ||
1902 | chip_bus_lock(desc); | |
1903 | kfree(__free_percpu_irq(irq, dev_id)); | |
1904 | chip_bus_sync_unlock(desc); | |
1905 | } | |
aec2e2ad | 1906 | EXPORT_SYMBOL_GPL(free_percpu_irq); |
31d9d9b6 MZ |
1907 | |
1908 | /** | |
1909 | * setup_percpu_irq - setup a per-cpu interrupt | |
1910 | * @irq: Interrupt line to setup | |
1911 | * @act: irqaction for the interrupt | |
1912 | * | |
1913 | * Used to statically setup per-cpu interrupts in the early boot process. | |
1914 | */ | |
1915 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | |
1916 | { | |
1917 | struct irq_desc *desc = irq_to_desc(irq); | |
1918 | int retval; | |
1919 | ||
1920 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
1921 | return -EINVAL; | |
be45beb2 JH |
1922 | |
1923 | retval = irq_chip_pm_get(&desc->irq_data); | |
1924 | if (retval < 0) | |
1925 | return retval; | |
1926 | ||
31d9d9b6 MZ |
1927 | chip_bus_lock(desc); |
1928 | retval = __setup_irq(irq, desc, act); | |
1929 | chip_bus_sync_unlock(desc); | |
1930 | ||
be45beb2 JH |
1931 | if (retval) |
1932 | irq_chip_pm_put(&desc->irq_data); | |
1933 | ||
31d9d9b6 MZ |
1934 | return retval; |
1935 | } | |
1936 | ||
1937 | /** | |
1938 | * request_percpu_irq - allocate a percpu interrupt line | |
1939 | * @irq: Interrupt line to allocate | |
1940 | * @handler: Function to be called when the IRQ occurs. | |
1941 | * @devname: An ascii name for the claiming device | |
1942 | * @dev_id: A percpu cookie passed back to the handler function | |
1943 | * | |
a1b7febd MR |
1944 | * This call allocates interrupt resources and enables the |
1945 | * interrupt on the local CPU. If the interrupt is supposed to be | |
1946 | * enabled on other CPUs, it has to be done on each CPU using | |
1947 | * enable_percpu_irq(). | |
31d9d9b6 MZ |
1948 | * |
1949 | * Dev_id must be globally unique. It is a per-cpu variable, and | |
1950 | * the handler gets called with the interrupted CPU's instance of | |
1951 | * that variable. | |
1952 | */ | |
1953 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, | |
1954 | const char *devname, void __percpu *dev_id) | |
1955 | { | |
1956 | struct irqaction *action; | |
1957 | struct irq_desc *desc; | |
1958 | int retval; | |
1959 | ||
1960 | if (!dev_id) | |
1961 | return -EINVAL; | |
1962 | ||
1963 | desc = irq_to_desc(irq); | |
1964 | if (!desc || !irq_settings_can_request(desc) || | |
1965 | !irq_settings_is_per_cpu_devid(desc)) | |
1966 | return -EINVAL; | |
1967 | ||
1968 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
1969 | if (!action) | |
1970 | return -ENOMEM; | |
1971 | ||
1972 | action->handler = handler; | |
2ed0e645 | 1973 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; |
31d9d9b6 MZ |
1974 | action->name = devname; |
1975 | action->percpu_dev_id = dev_id; | |
1976 | ||
be45beb2 | 1977 | retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c SL |
1978 | if (retval < 0) { |
1979 | kfree(action); | |
be45beb2 | 1980 | return retval; |
4396f46c | 1981 | } |
be45beb2 | 1982 | |
31d9d9b6 MZ |
1983 | chip_bus_lock(desc); |
1984 | retval = __setup_irq(irq, desc, action); | |
1985 | chip_bus_sync_unlock(desc); | |
1986 | ||
be45beb2 JH |
1987 | if (retval) { |
1988 | irq_chip_pm_put(&desc->irq_data); | |
31d9d9b6 | 1989 | kfree(action); |
be45beb2 | 1990 | } |
31d9d9b6 MZ |
1991 | |
1992 | return retval; | |
1993 | } | |
aec2e2ad | 1994 | EXPORT_SYMBOL_GPL(request_percpu_irq); |
1b7047ed MZ |
1995 | |
1996 | /** | |
1997 | * irq_get_irqchip_state - returns the irqchip state of a interrupt. | |
1998 | * @irq: Interrupt line that is forwarded to a VM | |
1999 | * @which: One of IRQCHIP_STATE_* the caller wants to know about | |
2000 | * @state: a pointer to a boolean where the state is to be storeed | |
2001 | * | |
2002 | * This call snapshots the internal irqchip state of an | |
2003 | * interrupt, returning into @state the bit corresponding to | |
2004 | * stage @which | |
2005 | * | |
2006 | * This function should be called with preemption disabled if the | |
2007 | * interrupt controller has per-cpu registers. | |
2008 | */ | |
2009 | int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
2010 | bool *state) | |
2011 | { | |
2012 | struct irq_desc *desc; | |
2013 | struct irq_data *data; | |
2014 | struct irq_chip *chip; | |
2015 | unsigned long flags; | |
2016 | int err = -EINVAL; | |
2017 | ||
2018 | desc = irq_get_desc_buslock(irq, &flags, 0); | |
2019 | if (!desc) | |
2020 | return err; | |
2021 | ||
2022 | data = irq_desc_get_irq_data(desc); | |
2023 | ||
2024 | do { | |
2025 | chip = irq_data_get_irq_chip(data); | |
2026 | if (chip->irq_get_irqchip_state) | |
2027 | break; | |
2028 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
2029 | data = data->parent_data; | |
2030 | #else | |
2031 | data = NULL; | |
2032 | #endif | |
2033 | } while (data); | |
2034 | ||
2035 | if (data) | |
2036 | err = chip->irq_get_irqchip_state(data, which, state); | |
2037 | ||
2038 | irq_put_desc_busunlock(desc, flags); | |
2039 | return err; | |
2040 | } | |
1ee4fb3e | 2041 | EXPORT_SYMBOL_GPL(irq_get_irqchip_state); |
1b7047ed MZ |
2042 | |
2043 | /** | |
2044 | * irq_set_irqchip_state - set the state of a forwarded interrupt. | |
2045 | * @irq: Interrupt line that is forwarded to a VM | |
2046 | * @which: State to be restored (one of IRQCHIP_STATE_*) | |
2047 | * @val: Value corresponding to @which | |
2048 | * | |
2049 | * This call sets the internal irqchip state of an interrupt, | |
2050 | * depending on the value of @which. | |
2051 | * | |
2052 | * This function should be called with preemption disabled if the | |
2053 | * interrupt controller has per-cpu registers. | |
2054 | */ | |
2055 | int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | |
2056 | bool val) | |
2057 | { | |
2058 | struct irq_desc *desc; | |
2059 | struct irq_data *data; | |
2060 | struct irq_chip *chip; | |
2061 | unsigned long flags; | |
2062 | int err = -EINVAL; | |
2063 | ||
2064 | desc = irq_get_desc_buslock(irq, &flags, 0); | |
2065 | if (!desc) | |
2066 | return err; | |
2067 | ||
2068 | data = irq_desc_get_irq_data(desc); | |
2069 | ||
2070 | do { | |
2071 | chip = irq_data_get_irq_chip(data); | |
2072 | if (chip->irq_set_irqchip_state) | |
2073 | break; | |
2074 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
2075 | data = data->parent_data; | |
2076 | #else | |
2077 | data = NULL; | |
2078 | #endif | |
2079 | } while (data); | |
2080 | ||
2081 | if (data) | |
2082 | err = chip->irq_set_irqchip_state(data, which, val); | |
2083 | ||
2084 | irq_put_desc_busunlock(desc, flags); | |
2085 | return err; | |
2086 | } | |
1ee4fb3e | 2087 | EXPORT_SYMBOL_GPL(irq_set_irqchip_state); |