]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/manage.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
6 | * |
7 | * This file contains driver APIs to the irq subsystem. | |
8 | */ | |
9 | ||
97fd75b7 AM |
10 | #define pr_fmt(fmt) "genirq: " fmt |
11 | ||
1da177e4 | 12 | #include <linux/irq.h> |
3aa551c9 | 13 | #include <linux/kthread.h> |
1da177e4 LT |
14 | #include <linux/module.h> |
15 | #include <linux/random.h> | |
16 | #include <linux/interrupt.h> | |
1aeb272c | 17 | #include <linux/slab.h> |
3aa551c9 | 18 | #include <linux/sched.h> |
8bd75c77 | 19 | #include <linux/sched/rt.h> |
4d1d61a6 | 20 | #include <linux/task_work.h> |
1da177e4 LT |
21 | |
22 | #include "internals.h" | |
23 | ||
8d32a307 TG |
24 | #ifdef CONFIG_IRQ_FORCED_THREADING |
25 | __read_mostly bool force_irqthreads; | |
26 | ||
27 | static int __init setup_forced_irqthreads(char *arg) | |
28 | { | |
29 | force_irqthreads = true; | |
30 | return 0; | |
31 | } | |
32 | early_param("threadirqs", setup_forced_irqthreads); | |
33 | #endif | |
34 | ||
18258f72 | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
1da177e4 | 36 | { |
32f4125e | 37 | bool inprogress; |
1da177e4 | 38 | |
a98ce5c6 HX |
39 | do { |
40 | unsigned long flags; | |
41 | ||
42 | /* | |
43 | * Wait until we're out of the critical section. This might | |
44 | * give the wrong answer due to the lack of memory barriers. | |
45 | */ | |
32f4125e | 46 | while (irqd_irq_inprogress(&desc->irq_data)) |
a98ce5c6 HX |
47 | cpu_relax(); |
48 | ||
49 | /* Ok, that indicated we're done: double-check carefully. */ | |
239007b8 | 50 | raw_spin_lock_irqsave(&desc->lock, flags); |
32f4125e | 51 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
239007b8 | 52 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
a98ce5c6 HX |
53 | |
54 | /* Oops, that failed? */ | |
32f4125e | 55 | } while (inprogress); |
18258f72 TG |
56 | } |
57 | ||
58 | /** | |
59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) | |
60 | * @irq: interrupt number to wait for | |
61 | * | |
62 | * This function waits for any pending hard IRQ handlers for this | |
63 | * interrupt to complete before returning. If you use this | |
64 | * function while holding a resource the IRQ handler may need you | |
65 | * will deadlock. It does not take associated threaded handlers | |
66 | * into account. | |
67 | * | |
68 | * Do not use this for shutdown scenarios where you must be sure | |
69 | * that all parts (hardirq and threaded handler) have completed. | |
70 | * | |
71 | * This function may be called - with care - from IRQ context. | |
72 | */ | |
73 | void synchronize_hardirq(unsigned int irq) | |
74 | { | |
75 | struct irq_desc *desc = irq_to_desc(irq); | |
3aa551c9 | 76 | |
18258f72 TG |
77 | if (desc) |
78 | __synchronize_hardirq(desc); | |
79 | } | |
80 | EXPORT_SYMBOL(synchronize_hardirq); | |
81 | ||
82 | /** | |
83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
84 | * @irq: interrupt number to wait for | |
85 | * | |
86 | * This function waits for any pending IRQ handlers for this interrupt | |
87 | * to complete before returning. If you use this function while | |
88 | * holding a resource the IRQ handler may need you will deadlock. | |
89 | * | |
90 | * This function may be called - with care - from IRQ context. | |
91 | */ | |
92 | void synchronize_irq(unsigned int irq) | |
93 | { | |
94 | struct irq_desc *desc = irq_to_desc(irq); | |
95 | ||
96 | if (desc) { | |
97 | __synchronize_hardirq(desc); | |
98 | /* | |
99 | * We made sure that no hardirq handler is | |
100 | * running. Now verify that no threaded handlers are | |
101 | * active. | |
102 | */ | |
103 | wait_event(desc->wait_for_threads, | |
104 | !atomic_read(&desc->threads_active)); | |
105 | } | |
1da177e4 | 106 | } |
1da177e4 LT |
107 | EXPORT_SYMBOL(synchronize_irq); |
108 | ||
3aa551c9 TG |
109 | #ifdef CONFIG_SMP |
110 | cpumask_var_t irq_default_affinity; | |
111 | ||
771ee3b0 TG |
112 | /** |
113 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
114 | * @irq: Interrupt to check | |
115 | * | |
116 | */ | |
117 | int irq_can_set_affinity(unsigned int irq) | |
118 | { | |
08678b08 | 119 | struct irq_desc *desc = irq_to_desc(irq); |
771ee3b0 | 120 | |
bce43032 TG |
121 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
122 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | |
771ee3b0 TG |
123 | return 0; |
124 | ||
125 | return 1; | |
126 | } | |
127 | ||
591d2fb0 TG |
128 | /** |
129 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | |
130 | * @desc: irq descriptor which has affitnity changed | |
131 | * | |
132 | * We just set IRQTF_AFFINITY and delegate the affinity setting | |
133 | * to the interrupt thread itself. We can not call | |
134 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | |
135 | * code can be called from hard interrupt context. | |
136 | */ | |
137 | void irq_set_thread_affinity(struct irq_desc *desc) | |
3aa551c9 TG |
138 | { |
139 | struct irqaction *action = desc->action; | |
140 | ||
141 | while (action) { | |
142 | if (action->thread) | |
591d2fb0 | 143 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
3aa551c9 TG |
144 | action = action->next; |
145 | } | |
146 | } | |
147 | ||
1fa46f1f | 148 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
0ef5ca1e | 149 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
1fa46f1f | 150 | { |
0ef5ca1e | 151 | return irqd_can_move_in_process_context(data); |
1fa46f1f | 152 | } |
0ef5ca1e | 153 | static inline bool irq_move_pending(struct irq_data *data) |
1fa46f1f | 154 | { |
0ef5ca1e | 155 | return irqd_is_setaffinity_pending(data); |
1fa46f1f TG |
156 | } |
157 | static inline void | |
158 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | |
159 | { | |
160 | cpumask_copy(desc->pending_mask, mask); | |
161 | } | |
162 | static inline void | |
163 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | |
164 | { | |
165 | cpumask_copy(mask, desc->pending_mask); | |
166 | } | |
167 | #else | |
0ef5ca1e | 168 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } |
cd22c0e4 | 169 | static inline bool irq_move_pending(struct irq_data *data) { return false; } |
1fa46f1f TG |
170 | static inline void |
171 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | |
172 | static inline void | |
173 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | |
174 | #endif | |
175 | ||
818b0f3b JL |
176 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
177 | bool force) | |
178 | { | |
179 | struct irq_desc *desc = irq_data_to_desc(data); | |
180 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
181 | int ret; | |
182 | ||
01f8fa4f | 183 | ret = chip->irq_set_affinity(data, mask, force); |
818b0f3b JL |
184 | switch (ret) { |
185 | case IRQ_SET_MASK_OK: | |
186 | cpumask_copy(data->affinity, mask); | |
187 | case IRQ_SET_MASK_OK_NOCOPY: | |
188 | irq_set_thread_affinity(desc); | |
189 | ret = 0; | |
190 | } | |
191 | ||
192 | return ret; | |
193 | } | |
194 | ||
01f8fa4f TG |
195 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
196 | bool force) | |
771ee3b0 | 197 | { |
c2d0c555 DD |
198 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
199 | struct irq_desc *desc = irq_data_to_desc(data); | |
1fa46f1f | 200 | int ret = 0; |
771ee3b0 | 201 | |
c2d0c555 | 202 | if (!chip || !chip->irq_set_affinity) |
771ee3b0 TG |
203 | return -EINVAL; |
204 | ||
0ef5ca1e | 205 | if (irq_can_move_pcntxt(data)) { |
01f8fa4f | 206 | ret = irq_do_set_affinity(data, mask, force); |
1fa46f1f | 207 | } else { |
c2d0c555 | 208 | irqd_set_move_pending(data); |
1fa46f1f | 209 | irq_copy_pending(desc, mask); |
57b150cc | 210 | } |
1fa46f1f | 211 | |
cd7eab44 BH |
212 | if (desc->affinity_notify) { |
213 | kref_get(&desc->affinity_notify->kref); | |
214 | schedule_work(&desc->affinity_notify->work); | |
215 | } | |
c2d0c555 DD |
216 | irqd_set(data, IRQD_AFFINITY_SET); |
217 | ||
218 | return ret; | |
219 | } | |
220 | ||
01f8fa4f | 221 | int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
c2d0c555 DD |
222 | { |
223 | struct irq_desc *desc = irq_to_desc(irq); | |
224 | unsigned long flags; | |
225 | int ret; | |
226 | ||
227 | if (!desc) | |
228 | return -EINVAL; | |
229 | ||
230 | raw_spin_lock_irqsave(&desc->lock, flags); | |
01f8fa4f | 231 | ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
239007b8 | 232 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1fa46f1f | 233 | return ret; |
771ee3b0 TG |
234 | } |
235 | ||
e7a297b0 PWJ |
236 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
237 | { | |
e7a297b0 | 238 | unsigned long flags; |
31d9d9b6 | 239 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
e7a297b0 PWJ |
240 | |
241 | if (!desc) | |
242 | return -EINVAL; | |
e7a297b0 | 243 | desc->affinity_hint = m; |
02725e74 | 244 | irq_put_desc_unlock(desc, flags); |
e7a297b0 PWJ |
245 | return 0; |
246 | } | |
247 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | |
248 | ||
cd7eab44 BH |
249 | static void irq_affinity_notify(struct work_struct *work) |
250 | { | |
251 | struct irq_affinity_notify *notify = | |
252 | container_of(work, struct irq_affinity_notify, work); | |
253 | struct irq_desc *desc = irq_to_desc(notify->irq); | |
254 | cpumask_var_t cpumask; | |
255 | unsigned long flags; | |
256 | ||
1fa46f1f | 257 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
cd7eab44 BH |
258 | goto out; |
259 | ||
260 | raw_spin_lock_irqsave(&desc->lock, flags); | |
0ef5ca1e | 261 | if (irq_move_pending(&desc->irq_data)) |
1fa46f1f | 262 | irq_get_pending(cpumask, desc); |
cd7eab44 | 263 | else |
1fb0ef31 | 264 | cpumask_copy(cpumask, desc->irq_data.affinity); |
cd7eab44 BH |
265 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
266 | ||
267 | notify->notify(notify, cpumask); | |
268 | ||
269 | free_cpumask_var(cpumask); | |
270 | out: | |
271 | kref_put(¬ify->kref, notify->release); | |
272 | } | |
273 | ||
274 | /** | |
275 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | |
276 | * @irq: Interrupt for which to enable/disable notification | |
277 | * @notify: Context for notification, or %NULL to disable | |
278 | * notification. Function pointers must be initialised; | |
279 | * the other fields will be initialised by this function. | |
280 | * | |
281 | * Must be called in process context. Notification may only be enabled | |
282 | * after the IRQ is allocated and must be disabled before the IRQ is | |
283 | * freed using free_irq(). | |
284 | */ | |
285 | int | |
286 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |
287 | { | |
288 | struct irq_desc *desc = irq_to_desc(irq); | |
289 | struct irq_affinity_notify *old_notify; | |
290 | unsigned long flags; | |
291 | ||
292 | /* The release function is promised process context */ | |
293 | might_sleep(); | |
294 | ||
295 | if (!desc) | |
296 | return -EINVAL; | |
297 | ||
298 | /* Complete initialisation of *notify */ | |
299 | if (notify) { | |
300 | notify->irq = irq; | |
301 | kref_init(¬ify->kref); | |
302 | INIT_WORK(¬ify->work, irq_affinity_notify); | |
303 | } | |
304 | ||
305 | raw_spin_lock_irqsave(&desc->lock, flags); | |
306 | old_notify = desc->affinity_notify; | |
307 | desc->affinity_notify = notify; | |
308 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
309 | ||
310 | if (old_notify) | |
311 | kref_put(&old_notify->kref, old_notify->release); | |
312 | ||
313 | return 0; | |
314 | } | |
315 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |
316 | ||
18404756 MK |
317 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
318 | /* | |
319 | * Generic version of the affinity autoselector. | |
320 | */ | |
3b8249e7 TG |
321 | static int |
322 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |
18404756 | 323 | { |
569bda8d | 324 | struct cpumask *set = irq_default_affinity; |
818b0f3b | 325 | int node = desc->irq_data.node; |
569bda8d | 326 | |
b008207c | 327 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
18404756 MK |
328 | if (!irq_can_set_affinity(irq)) |
329 | return 0; | |
330 | ||
f6d87f4b TG |
331 | /* |
332 | * Preserve an userspace affinity setup, but make sure that | |
333 | * one of the targets is online. | |
334 | */ | |
2bdd1055 | 335 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
569bda8d TG |
336 | if (cpumask_intersects(desc->irq_data.affinity, |
337 | cpu_online_mask)) | |
338 | set = desc->irq_data.affinity; | |
0c6f8a8b | 339 | else |
2bdd1055 | 340 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
f6d87f4b | 341 | } |
18404756 | 342 | |
3b8249e7 | 343 | cpumask_and(mask, cpu_online_mask, set); |
241fc640 PB |
344 | if (node != NUMA_NO_NODE) { |
345 | const struct cpumask *nodemask = cpumask_of_node(node); | |
346 | ||
347 | /* make sure at least one of the cpus in nodemask is online */ | |
348 | if (cpumask_intersects(mask, nodemask)) | |
349 | cpumask_and(mask, mask, nodemask); | |
350 | } | |
818b0f3b | 351 | irq_do_set_affinity(&desc->irq_data, mask, false); |
18404756 MK |
352 | return 0; |
353 | } | |
f6d87f4b | 354 | #else |
3b8249e7 TG |
355 | static inline int |
356 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | |
f6d87f4b TG |
357 | { |
358 | return irq_select_affinity(irq); | |
359 | } | |
18404756 MK |
360 | #endif |
361 | ||
f6d87f4b TG |
362 | /* |
363 | * Called when affinity is set via /proc/irq | |
364 | */ | |
3b8249e7 | 365 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
f6d87f4b TG |
366 | { |
367 | struct irq_desc *desc = irq_to_desc(irq); | |
368 | unsigned long flags; | |
369 | int ret; | |
370 | ||
239007b8 | 371 | raw_spin_lock_irqsave(&desc->lock, flags); |
3b8249e7 | 372 | ret = setup_affinity(irq, desc, mask); |
239007b8 | 373 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
f6d87f4b TG |
374 | return ret; |
375 | } | |
376 | ||
377 | #else | |
3b8249e7 TG |
378 | static inline int |
379 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |
f6d87f4b TG |
380 | { |
381 | return 0; | |
382 | } | |
1da177e4 LT |
383 | #endif |
384 | ||
0a0c5168 RW |
385 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
386 | { | |
387 | if (suspend) { | |
685fd0b4 | 388 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
0a0c5168 | 389 | return; |
c531e836 | 390 | desc->istate |= IRQS_SUSPENDED; |
0a0c5168 RW |
391 | } |
392 | ||
3aae994f | 393 | if (!desc->depth++) |
87923470 | 394 | irq_disable(desc); |
0a0c5168 RW |
395 | } |
396 | ||
02725e74 TG |
397 | static int __disable_irq_nosync(unsigned int irq) |
398 | { | |
399 | unsigned long flags; | |
31d9d9b6 | 400 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 TG |
401 | |
402 | if (!desc) | |
403 | return -EINVAL; | |
404 | __disable_irq(desc, irq, false); | |
405 | irq_put_desc_busunlock(desc, flags); | |
406 | return 0; | |
407 | } | |
408 | ||
1da177e4 LT |
409 | /** |
410 | * disable_irq_nosync - disable an irq without waiting | |
411 | * @irq: Interrupt to disable | |
412 | * | |
413 | * Disable the selected interrupt line. Disables and Enables are | |
414 | * nested. | |
415 | * Unlike disable_irq(), this function does not ensure existing | |
416 | * instances of the IRQ handler have completed before returning. | |
417 | * | |
418 | * This function may be called from IRQ context. | |
419 | */ | |
420 | void disable_irq_nosync(unsigned int irq) | |
421 | { | |
02725e74 | 422 | __disable_irq_nosync(irq); |
1da177e4 | 423 | } |
1da177e4 LT |
424 | EXPORT_SYMBOL(disable_irq_nosync); |
425 | ||
426 | /** | |
427 | * disable_irq - disable an irq and wait for completion | |
428 | * @irq: Interrupt to disable | |
429 | * | |
430 | * Disable the selected interrupt line. Enables and Disables are | |
431 | * nested. | |
432 | * This function waits for any pending IRQ handlers for this interrupt | |
433 | * to complete before returning. If you use this function while | |
434 | * holding a resource the IRQ handler may need you will deadlock. | |
435 | * | |
436 | * This function may be called - with care - from IRQ context. | |
437 | */ | |
438 | void disable_irq(unsigned int irq) | |
439 | { | |
02725e74 | 440 | if (!__disable_irq_nosync(irq)) |
1da177e4 LT |
441 | synchronize_irq(irq); |
442 | } | |
1da177e4 LT |
443 | EXPORT_SYMBOL(disable_irq); |
444 | ||
0a0c5168 | 445 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
1adb0850 | 446 | { |
dc5f219e | 447 | if (resume) { |
c531e836 | 448 | if (!(desc->istate & IRQS_SUSPENDED)) { |
dc5f219e TG |
449 | if (!desc->action) |
450 | return; | |
451 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | |
452 | return; | |
453 | /* Pretend that it got disabled ! */ | |
454 | desc->depth++; | |
455 | } | |
c531e836 | 456 | desc->istate &= ~IRQS_SUSPENDED; |
dc5f219e | 457 | } |
0a0c5168 | 458 | |
1adb0850 TG |
459 | switch (desc->depth) { |
460 | case 0: | |
0a0c5168 | 461 | err_out: |
b8c512f6 | 462 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
1adb0850 TG |
463 | break; |
464 | case 1: { | |
c531e836 | 465 | if (desc->istate & IRQS_SUSPENDED) |
0a0c5168 | 466 | goto err_out; |
1adb0850 | 467 | /* Prevent probing on this irq: */ |
1ccb4e61 | 468 | irq_settings_set_noprobe(desc); |
3aae994f | 469 | irq_enable(desc); |
1adb0850 TG |
470 | check_irq_resend(desc, irq); |
471 | /* fall-through */ | |
472 | } | |
473 | default: | |
474 | desc->depth--; | |
475 | } | |
476 | } | |
477 | ||
1da177e4 LT |
478 | /** |
479 | * enable_irq - enable handling of an irq | |
480 | * @irq: Interrupt to enable | |
481 | * | |
482 | * Undoes the effect of one call to disable_irq(). If this | |
483 | * matches the last disable, processing of interrupts on this | |
484 | * IRQ line is re-enabled. | |
485 | * | |
70aedd24 | 486 | * This function may be called from IRQ context only when |
6b8ff312 | 487 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
1da177e4 LT |
488 | */ |
489 | void enable_irq(unsigned int irq) | |
490 | { | |
1da177e4 | 491 | unsigned long flags; |
31d9d9b6 | 492 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
1da177e4 | 493 | |
7d94f7ca | 494 | if (!desc) |
c2b5a251 | 495 | return; |
50f7c032 TG |
496 | if (WARN(!desc->irq_data.chip, |
497 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | |
02725e74 | 498 | goto out; |
2656c366 | 499 | |
0a0c5168 | 500 | __enable_irq(desc, irq, false); |
02725e74 TG |
501 | out: |
502 | irq_put_desc_busunlock(desc, flags); | |
1da177e4 | 503 | } |
1da177e4 LT |
504 | EXPORT_SYMBOL(enable_irq); |
505 | ||
0c5d1eb7 | 506 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 507 | { |
08678b08 | 508 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
509 | int ret = -ENXIO; |
510 | ||
60f96b41 SS |
511 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) |
512 | return 0; | |
513 | ||
2f7e99bb TG |
514 | if (desc->irq_data.chip->irq_set_wake) |
515 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | |
2db87321 UKK |
516 | |
517 | return ret; | |
518 | } | |
519 | ||
ba9a2331 | 520 | /** |
a0cd9ca2 | 521 | * irq_set_irq_wake - control irq power management wakeup |
ba9a2331 TG |
522 | * @irq: interrupt to control |
523 | * @on: enable/disable power management wakeup | |
524 | * | |
15a647eb DB |
525 | * Enable/disable power management wakeup mode, which is |
526 | * disabled by default. Enables and disables must match, | |
527 | * just as they match for non-wakeup mode support. | |
528 | * | |
529 | * Wakeup mode lets this IRQ wake the system from sleep | |
530 | * states like "suspend to RAM". | |
ba9a2331 | 531 | */ |
a0cd9ca2 | 532 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
ba9a2331 | 533 | { |
ba9a2331 | 534 | unsigned long flags; |
31d9d9b6 | 535 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
2db87321 | 536 | int ret = 0; |
ba9a2331 | 537 | |
13863a66 JJ |
538 | if (!desc) |
539 | return -EINVAL; | |
540 | ||
15a647eb DB |
541 | /* wakeup-capable irqs can be shared between drivers that |
542 | * don't need to have the same sleep mode behaviors. | |
543 | */ | |
15a647eb | 544 | if (on) { |
2db87321 UKK |
545 | if (desc->wake_depth++ == 0) { |
546 | ret = set_irq_wake_real(irq, on); | |
547 | if (ret) | |
548 | desc->wake_depth = 0; | |
549 | else | |
7f94226f | 550 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db87321 | 551 | } |
15a647eb DB |
552 | } else { |
553 | if (desc->wake_depth == 0) { | |
7a2c4770 | 554 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
2db87321 UKK |
555 | } else if (--desc->wake_depth == 0) { |
556 | ret = set_irq_wake_real(irq, on); | |
557 | if (ret) | |
558 | desc->wake_depth = 1; | |
559 | else | |
7f94226f | 560 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db87321 | 561 | } |
15a647eb | 562 | } |
02725e74 | 563 | irq_put_desc_busunlock(desc, flags); |
ba9a2331 TG |
564 | return ret; |
565 | } | |
a0cd9ca2 | 566 | EXPORT_SYMBOL(irq_set_irq_wake); |
ba9a2331 | 567 | |
1da177e4 LT |
568 | /* |
569 | * Internal function that tells the architecture code whether a | |
570 | * particular irq has been exclusively allocated or is available | |
571 | * for driver use. | |
572 | */ | |
573 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
574 | { | |
cc8c3b78 | 575 | unsigned long flags; |
31d9d9b6 | 576 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
02725e74 | 577 | int canrequest = 0; |
1da177e4 | 578 | |
7d94f7ca YL |
579 | if (!desc) |
580 | return 0; | |
581 | ||
02725e74 | 582 | if (irq_settings_can_request(desc)) { |
2779db8d BH |
583 | if (!desc->action || |
584 | irqflags & desc->action->flags & IRQF_SHARED) | |
585 | canrequest = 1; | |
02725e74 TG |
586 | } |
587 | irq_put_desc_unlock(desc, flags); | |
588 | return canrequest; | |
1da177e4 LT |
589 | } |
590 | ||
0c5d1eb7 | 591 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
b2ba2c30 | 592 | unsigned long flags) |
82736f4d | 593 | { |
6b8ff312 | 594 | struct irq_chip *chip = desc->irq_data.chip; |
d4d5e089 | 595 | int ret, unmask = 0; |
82736f4d | 596 | |
b2ba2c30 | 597 | if (!chip || !chip->irq_set_type) { |
82736f4d UKK |
598 | /* |
599 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
600 | * flow-types? | |
601 | */ | |
97fd75b7 | 602 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
f5d89470 | 603 | chip ? (chip->name ? : "unknown") : "unknown"); |
82736f4d UKK |
604 | return 0; |
605 | } | |
606 | ||
876dbd4c | 607 | flags &= IRQ_TYPE_SENSE_MASK; |
d4d5e089 TG |
608 | |
609 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | |
32f4125e | 610 | if (!irqd_irq_masked(&desc->irq_data)) |
d4d5e089 | 611 | mask_irq(desc); |
32f4125e | 612 | if (!irqd_irq_disabled(&desc->irq_data)) |
d4d5e089 TG |
613 | unmask = 1; |
614 | } | |
615 | ||
f2b662da | 616 | /* caller masked out all except trigger mode flags */ |
b2ba2c30 | 617 | ret = chip->irq_set_type(&desc->irq_data, flags); |
82736f4d | 618 | |
876dbd4c TG |
619 | switch (ret) { |
620 | case IRQ_SET_MASK_OK: | |
621 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | |
622 | irqd_set(&desc->irq_data, flags); | |
623 | ||
624 | case IRQ_SET_MASK_OK_NOCOPY: | |
625 | flags = irqd_get_trigger_type(&desc->irq_data); | |
626 | irq_settings_set_trigger_mask(desc, flags); | |
627 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | |
628 | irq_settings_clr_level(desc); | |
629 | if (flags & IRQ_TYPE_LEVEL_MASK) { | |
630 | irq_settings_set_level(desc); | |
631 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
632 | } | |
46732475 | 633 | |
d4d5e089 | 634 | ret = 0; |
8fff39e0 | 635 | break; |
876dbd4c | 636 | default: |
97fd75b7 | 637 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", |
876dbd4c | 638 | flags, irq, chip->irq_set_type); |
0c5d1eb7 | 639 | } |
d4d5e089 TG |
640 | if (unmask) |
641 | unmask_irq(desc); | |
82736f4d UKK |
642 | return ret; |
643 | } | |
644 | ||
293a7a0a TG |
645 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
646 | int irq_set_parent(int irq, int parent_irq) | |
647 | { | |
648 | unsigned long flags; | |
649 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | |
650 | ||
651 | if (!desc) | |
652 | return -EINVAL; | |
653 | ||
654 | desc->parent_irq = parent_irq; | |
655 | ||
656 | irq_put_desc_unlock(desc, flags); | |
657 | return 0; | |
658 | } | |
659 | #endif | |
660 | ||
b25c340c TG |
661 | /* |
662 | * Default primary interrupt handler for threaded interrupts. Is | |
663 | * assigned as primary handler when request_threaded_irq is called | |
664 | * with handler == NULL. Useful for oneshot interrupts. | |
665 | */ | |
666 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
667 | { | |
668 | return IRQ_WAKE_THREAD; | |
669 | } | |
670 | ||
399b5da2 TG |
671 | /* |
672 | * Primary handler for nested threaded interrupts. Should never be | |
673 | * called. | |
674 | */ | |
675 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
676 | { | |
677 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
678 | return IRQ_NONE; | |
679 | } | |
680 | ||
3aa551c9 TG |
681 | static int irq_wait_for_interrupt(struct irqaction *action) |
682 | { | |
550acb19 IY |
683 | set_current_state(TASK_INTERRUPTIBLE); |
684 | ||
3aa551c9 | 685 | while (!kthread_should_stop()) { |
f48fe81e TG |
686 | |
687 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
688 | &action->thread_flags)) { | |
3aa551c9 TG |
689 | __set_current_state(TASK_RUNNING); |
690 | return 0; | |
f48fe81e TG |
691 | } |
692 | schedule(); | |
550acb19 | 693 | set_current_state(TASK_INTERRUPTIBLE); |
3aa551c9 | 694 | } |
550acb19 | 695 | __set_current_state(TASK_RUNNING); |
3aa551c9 TG |
696 | return -1; |
697 | } | |
698 | ||
b25c340c TG |
699 | /* |
700 | * Oneshot interrupts keep the irq line masked until the threaded | |
701 | * handler finished. unmask if the interrupt has not been disabled and | |
702 | * is marked MASKED. | |
703 | */ | |
b5faba21 | 704 | static void irq_finalize_oneshot(struct irq_desc *desc, |
f3f79e38 | 705 | struct irqaction *action) |
b25c340c | 706 | { |
b5faba21 TG |
707 | if (!(desc->istate & IRQS_ONESHOT)) |
708 | return; | |
0b1adaa0 | 709 | again: |
3876ec9e | 710 | chip_bus_lock(desc); |
239007b8 | 711 | raw_spin_lock_irq(&desc->lock); |
0b1adaa0 TG |
712 | |
713 | /* | |
714 | * Implausible though it may be we need to protect us against | |
715 | * the following scenario: | |
716 | * | |
717 | * The thread is faster done than the hard interrupt handler | |
718 | * on the other CPU. If we unmask the irq line then the | |
719 | * interrupt can come in again and masks the line, leaves due | |
009b4c3b | 720 | * to IRQS_INPROGRESS and the irq line is masked forever. |
b5faba21 TG |
721 | * |
722 | * This also serializes the state of shared oneshot handlers | |
723 | * versus "desc->threads_onehsot |= action->thread_mask;" in | |
724 | * irq_wake_thread(). See the comment there which explains the | |
725 | * serialization. | |
0b1adaa0 | 726 | */ |
32f4125e | 727 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
0b1adaa0 | 728 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 729 | chip_bus_sync_unlock(desc); |
0b1adaa0 TG |
730 | cpu_relax(); |
731 | goto again; | |
732 | } | |
733 | ||
b5faba21 TG |
734 | /* |
735 | * Now check again, whether the thread should run. Otherwise | |
736 | * we would clear the threads_oneshot bit of this thread which | |
737 | * was just set. | |
738 | */ | |
f3f79e38 | 739 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
b5faba21 TG |
740 | goto out_unlock; |
741 | ||
742 | desc->threads_oneshot &= ~action->thread_mask; | |
743 | ||
32f4125e TG |
744 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
745 | irqd_irq_masked(&desc->irq_data)) | |
328a4978 | 746 | unmask_threaded_irq(desc); |
32f4125e | 747 | |
b5faba21 | 748 | out_unlock: |
239007b8 | 749 | raw_spin_unlock_irq(&desc->lock); |
3876ec9e | 750 | chip_bus_sync_unlock(desc); |
b25c340c TG |
751 | } |
752 | ||
61f38261 | 753 | #ifdef CONFIG_SMP |
591d2fb0 | 754 | /* |
b04c644e | 755 | * Check whether we need to change the affinity of the interrupt thread. |
591d2fb0 TG |
756 | */ |
757 | static void | |
758 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
759 | { | |
760 | cpumask_var_t mask; | |
04aa530e | 761 | bool valid = true; |
591d2fb0 TG |
762 | |
763 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
764 | return; | |
765 | ||
766 | /* | |
767 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
768 | * try again next time | |
769 | */ | |
770 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
771 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
772 | return; | |
773 | } | |
774 | ||
239007b8 | 775 | raw_spin_lock_irq(&desc->lock); |
04aa530e TG |
776 | /* |
777 | * This code is triggered unconditionally. Check the affinity | |
778 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | |
779 | */ | |
780 | if (desc->irq_data.affinity) | |
781 | cpumask_copy(mask, desc->irq_data.affinity); | |
782 | else | |
783 | valid = false; | |
239007b8 | 784 | raw_spin_unlock_irq(&desc->lock); |
591d2fb0 | 785 | |
04aa530e TG |
786 | if (valid) |
787 | set_cpus_allowed_ptr(current, mask); | |
591d2fb0 TG |
788 | free_cpumask_var(mask); |
789 | } | |
61f38261 BP |
790 | #else |
791 | static inline void | |
792 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
793 | #endif | |
591d2fb0 | 794 | |
8d32a307 TG |
795 | /* |
796 | * Interrupts which are not explicitely requested as threaded | |
797 | * interrupts rely on the implicit bh/preempt disable of the hard irq | |
798 | * context. So we need to disable bh here to avoid deadlocks and other | |
799 | * side effects. | |
800 | */ | |
3a43e05f | 801 | static irqreturn_t |
8d32a307 TG |
802 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
803 | { | |
3a43e05f SAS |
804 | irqreturn_t ret; |
805 | ||
8d32a307 | 806 | local_bh_disable(); |
3a43e05f | 807 | ret = action->thread_fn(action->irq, action->dev_id); |
f3f79e38 | 808 | irq_finalize_oneshot(desc, action); |
8d32a307 | 809 | local_bh_enable(); |
3a43e05f | 810 | return ret; |
8d32a307 TG |
811 | } |
812 | ||
813 | /* | |
f788e7bf | 814 | * Interrupts explicitly requested as threaded interrupts want to be |
8d32a307 TG |
815 | * preemtible - many of them need to sleep and wait for slow busses to |
816 | * complete. | |
817 | */ | |
3a43e05f SAS |
818 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
819 | struct irqaction *action) | |
8d32a307 | 820 | { |
3a43e05f SAS |
821 | irqreturn_t ret; |
822 | ||
823 | ret = action->thread_fn(action->irq, action->dev_id); | |
f3f79e38 | 824 | irq_finalize_oneshot(desc, action); |
3a43e05f | 825 | return ret; |
8d32a307 TG |
826 | } |
827 | ||
7140ea19 IY |
828 | static void wake_threads_waitq(struct irq_desc *desc) |
829 | { | |
c685689f | 830 | if (atomic_dec_and_test(&desc->threads_active)) |
7140ea19 IY |
831 | wake_up(&desc->wait_for_threads); |
832 | } | |
833 | ||
67d12145 | 834 | static void irq_thread_dtor(struct callback_head *unused) |
4d1d61a6 ON |
835 | { |
836 | struct task_struct *tsk = current; | |
837 | struct irq_desc *desc; | |
838 | struct irqaction *action; | |
839 | ||
840 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | |
841 | return; | |
842 | ||
843 | action = kthread_data(tsk); | |
844 | ||
fb21affa | 845 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
19af395d | 846 | tsk->comm, tsk->pid, action->irq); |
4d1d61a6 ON |
847 | |
848 | ||
849 | desc = irq_to_desc(action->irq); | |
850 | /* | |
851 | * If IRQTF_RUNTHREAD is set, we need to decrement | |
852 | * desc->threads_active and wake possible waiters. | |
853 | */ | |
854 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | |
855 | wake_threads_waitq(desc); | |
856 | ||
857 | /* Prevent a stale desc->threads_oneshot */ | |
858 | irq_finalize_oneshot(desc, action); | |
859 | } | |
860 | ||
3aa551c9 TG |
861 | /* |
862 | * Interrupt handler thread | |
863 | */ | |
864 | static int irq_thread(void *data) | |
865 | { | |
67d12145 | 866 | struct callback_head on_exit_work; |
3aa551c9 TG |
867 | struct irqaction *action = data; |
868 | struct irq_desc *desc = irq_to_desc(action->irq); | |
3a43e05f SAS |
869 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
870 | struct irqaction *action); | |
3aa551c9 | 871 | |
540b60e2 | 872 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
8d32a307 TG |
873 | &action->thread_flags)) |
874 | handler_fn = irq_forced_thread_fn; | |
875 | else | |
876 | handler_fn = irq_thread_fn; | |
877 | ||
41f9d29f | 878 | init_task_work(&on_exit_work, irq_thread_dtor); |
4d1d61a6 | 879 | task_work_add(current, &on_exit_work, false); |
3aa551c9 | 880 | |
f3de44ed SM |
881 | irq_thread_check_affinity(desc, action); |
882 | ||
3aa551c9 | 883 | while (!irq_wait_for_interrupt(action)) { |
7140ea19 | 884 | irqreturn_t action_ret; |
3aa551c9 | 885 | |
591d2fb0 TG |
886 | irq_thread_check_affinity(desc, action); |
887 | ||
7140ea19 IY |
888 | action_ret = handler_fn(desc, action); |
889 | if (!noirqdebug) | |
890 | note_interrupt(action->irq, desc, action_ret); | |
3aa551c9 | 891 | |
7140ea19 | 892 | wake_threads_waitq(desc); |
3aa551c9 TG |
893 | } |
894 | ||
7140ea19 IY |
895 | /* |
896 | * This is the regular exit path. __free_irq() is stopping the | |
897 | * thread via kthread_stop() after calling | |
898 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the | |
e04268b0 TG |
899 | * oneshot mask bit can be set. We cannot verify that as we |
900 | * cannot touch the oneshot mask at this point anymore as | |
901 | * __setup_irq() might have given out currents thread_mask | |
902 | * again. | |
3aa551c9 | 903 | */ |
4d1d61a6 | 904 | task_work_cancel(current, irq_thread_dtor); |
3aa551c9 TG |
905 | return 0; |
906 | } | |
907 | ||
a92444c6 TG |
908 | /** |
909 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | |
910 | * @irq: Interrupt line | |
911 | * @dev_id: Device identity for which the thread should be woken | |
912 | * | |
913 | */ | |
914 | void irq_wake_thread(unsigned int irq, void *dev_id) | |
915 | { | |
916 | struct irq_desc *desc = irq_to_desc(irq); | |
917 | struct irqaction *action; | |
918 | unsigned long flags; | |
919 | ||
920 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
921 | return; | |
922 | ||
923 | raw_spin_lock_irqsave(&desc->lock, flags); | |
924 | for (action = desc->action; action; action = action->next) { | |
925 | if (action->dev_id == dev_id) { | |
926 | if (action->thread) | |
927 | __irq_wake_thread(desc, action); | |
928 | break; | |
929 | } | |
930 | } | |
931 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
932 | } | |
933 | EXPORT_SYMBOL_GPL(irq_wake_thread); | |
934 | ||
8d32a307 TG |
935 | static void irq_setup_forced_threading(struct irqaction *new) |
936 | { | |
937 | if (!force_irqthreads) | |
938 | return; | |
939 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | |
940 | return; | |
941 | ||
942 | new->flags |= IRQF_ONESHOT; | |
943 | ||
944 | if (!new->thread_fn) { | |
945 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | |
946 | new->thread_fn = new->handler; | |
947 | new->handler = irq_default_primary_handler; | |
948 | } | |
949 | } | |
950 | ||
c1bacbae TG |
951 | static int irq_request_resources(struct irq_desc *desc) |
952 | { | |
953 | struct irq_data *d = &desc->irq_data; | |
954 | struct irq_chip *c = d->chip; | |
955 | ||
956 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | |
957 | } | |
958 | ||
959 | static void irq_release_resources(struct irq_desc *desc) | |
960 | { | |
961 | struct irq_data *d = &desc->irq_data; | |
962 | struct irq_chip *c = d->chip; | |
963 | ||
964 | if (c->irq_release_resources) | |
965 | c->irq_release_resources(d); | |
966 | } | |
967 | ||
1da177e4 LT |
968 | /* |
969 | * Internal function to register an irqaction - typically used to | |
970 | * allocate special interrupts that are part of the architecture. | |
971 | */ | |
d3c60047 | 972 | static int |
327ec569 | 973 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 974 | { |
f17c7545 | 975 | struct irqaction *old, **old_ptr; |
b5faba21 | 976 | unsigned long flags, thread_mask = 0; |
3b8249e7 TG |
977 | int ret, nested, shared = 0; |
978 | cpumask_var_t mask; | |
1da177e4 | 979 | |
7d94f7ca | 980 | if (!desc) |
c2b5a251 MW |
981 | return -EINVAL; |
982 | ||
6b8ff312 | 983 | if (desc->irq_data.chip == &no_irq_chip) |
1da177e4 | 984 | return -ENOSYS; |
b6873807 SAS |
985 | if (!try_module_get(desc->owner)) |
986 | return -ENODEV; | |
1da177e4 | 987 | |
3aa551c9 | 988 | /* |
399b5da2 TG |
989 | * Check whether the interrupt nests into another interrupt |
990 | * thread. | |
991 | */ | |
1ccb4e61 | 992 | nested = irq_settings_is_nested_thread(desc); |
399b5da2 | 993 | if (nested) { |
b6873807 SAS |
994 | if (!new->thread_fn) { |
995 | ret = -EINVAL; | |
996 | goto out_mput; | |
997 | } | |
399b5da2 TG |
998 | /* |
999 | * Replace the primary handler which was provided from | |
1000 | * the driver for non nested interrupt handling by the | |
1001 | * dummy function which warns when called. | |
1002 | */ | |
1003 | new->handler = irq_nested_primary_handler; | |
8d32a307 | 1004 | } else { |
7f1b1244 PM |
1005 | if (irq_settings_can_thread(desc)) |
1006 | irq_setup_forced_threading(new); | |
399b5da2 TG |
1007 | } |
1008 | ||
3aa551c9 | 1009 | /* |
399b5da2 TG |
1010 | * Create a handler thread when a thread function is supplied |
1011 | * and the interrupt does not nest into another interrupt | |
1012 | * thread. | |
3aa551c9 | 1013 | */ |
399b5da2 | 1014 | if (new->thread_fn && !nested) { |
3aa551c9 | 1015 | struct task_struct *t; |
ee238713 IS |
1016 | static const struct sched_param param = { |
1017 | .sched_priority = MAX_USER_RT_PRIO/2, | |
1018 | }; | |
3aa551c9 TG |
1019 | |
1020 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
1021 | new->name); | |
b6873807 SAS |
1022 | if (IS_ERR(t)) { |
1023 | ret = PTR_ERR(t); | |
1024 | goto out_mput; | |
1025 | } | |
ee238713 | 1026 | |
bbfe65c2 | 1027 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); |
ee238713 | 1028 | |
3aa551c9 TG |
1029 | /* |
1030 | * We keep the reference to the task struct even if | |
1031 | * the thread dies to avoid that the interrupt code | |
1032 | * references an already freed task_struct. | |
1033 | */ | |
1034 | get_task_struct(t); | |
1035 | new->thread = t; | |
04aa530e TG |
1036 | /* |
1037 | * Tell the thread to set its affinity. This is | |
1038 | * important for shared interrupt handlers as we do | |
1039 | * not invoke setup_affinity() for the secondary | |
1040 | * handlers as everything is already set up. Even for | |
1041 | * interrupts marked with IRQF_NO_BALANCE this is | |
1042 | * correct as we want the thread to move to the cpu(s) | |
1043 | * on which the requesting code placed the interrupt. | |
1044 | */ | |
1045 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | |
3aa551c9 TG |
1046 | } |
1047 | ||
3b8249e7 TG |
1048 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
1049 | ret = -ENOMEM; | |
1050 | goto out_thread; | |
1051 | } | |
1052 | ||
dc9b229a TG |
1053 | /* |
1054 | * Drivers are often written to work w/o knowledge about the | |
1055 | * underlying irq chip implementation, so a request for a | |
1056 | * threaded irq without a primary hard irq context handler | |
1057 | * requires the ONESHOT flag to be set. Some irq chips like | |
1058 | * MSI based interrupts are per se one shot safe. Check the | |
1059 | * chip flags, so we can avoid the unmask dance at the end of | |
1060 | * the threaded handler for those. | |
1061 | */ | |
1062 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) | |
1063 | new->flags &= ~IRQF_ONESHOT; | |
1064 | ||
1da177e4 LT |
1065 | /* |
1066 | * The following block of code has to be executed atomically | |
1067 | */ | |
239007b8 | 1068 | raw_spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
1069 | old_ptr = &desc->action; |
1070 | old = *old_ptr; | |
06fcb0c6 | 1071 | if (old) { |
e76de9f8 TG |
1072 | /* |
1073 | * Can't share interrupts unless both agree to and are | |
1074 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 1075 | * fields must have IRQF_SHARED set and the bits which |
9d591edd TG |
1076 | * set the trigger type must match. Also all must |
1077 | * agree on ONESHOT. | |
e76de9f8 | 1078 | */ |
3cca53b0 | 1079 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
9d591edd | 1080 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
f5d89470 | 1081 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) |
f5163427 DS |
1082 | goto mismatch; |
1083 | ||
f5163427 | 1084 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
1085 | if ((old->flags & IRQF_PERCPU) != |
1086 | (new->flags & IRQF_PERCPU)) | |
f5163427 | 1087 | goto mismatch; |
1da177e4 LT |
1088 | |
1089 | /* add new interrupt at end of irq queue */ | |
1090 | do { | |
52abb700 TG |
1091 | /* |
1092 | * Or all existing action->thread_mask bits, | |
1093 | * so we can find the next zero bit for this | |
1094 | * new action. | |
1095 | */ | |
b5faba21 | 1096 | thread_mask |= old->thread_mask; |
f17c7545 IM |
1097 | old_ptr = &old->next; |
1098 | old = *old_ptr; | |
1da177e4 LT |
1099 | } while (old); |
1100 | shared = 1; | |
1101 | } | |
1102 | ||
b5faba21 | 1103 | /* |
52abb700 TG |
1104 | * Setup the thread mask for this irqaction for ONESHOT. For |
1105 | * !ONESHOT irqs the thread mask is 0 so we can avoid a | |
1106 | * conditional in irq_wake_thread(). | |
b5faba21 | 1107 | */ |
52abb700 TG |
1108 | if (new->flags & IRQF_ONESHOT) { |
1109 | /* | |
1110 | * Unlikely to have 32 resp 64 irqs sharing one line, | |
1111 | * but who knows. | |
1112 | */ | |
1113 | if (thread_mask == ~0UL) { | |
1114 | ret = -EBUSY; | |
1115 | goto out_mask; | |
1116 | } | |
1117 | /* | |
1118 | * The thread_mask for the action is or'ed to | |
1119 | * desc->thread_active to indicate that the | |
1120 | * IRQF_ONESHOT thread handler has been woken, but not | |
1121 | * yet finished. The bit is cleared when a thread | |
1122 | * completes. When all threads of a shared interrupt | |
1123 | * line have completed desc->threads_active becomes | |
1124 | * zero and the interrupt line is unmasked. See | |
1125 | * handle.c:irq_wake_thread() for further information. | |
1126 | * | |
1127 | * If no thread is woken by primary (hard irq context) | |
1128 | * interrupt handlers, then desc->threads_active is | |
1129 | * also checked for zero to unmask the irq line in the | |
1130 | * affected hard irq flow handlers | |
1131 | * (handle_[fasteoi|level]_irq). | |
1132 | * | |
1133 | * The new action gets the first zero bit of | |
1134 | * thread_mask assigned. See the loop above which or's | |
1135 | * all existing action->thread_mask bits. | |
1136 | */ | |
1137 | new->thread_mask = 1 << ffz(thread_mask); | |
1c6c6952 | 1138 | |
dc9b229a TG |
1139 | } else if (new->handler == irq_default_primary_handler && |
1140 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { | |
1c6c6952 TG |
1141 | /* |
1142 | * The interrupt was requested with handler = NULL, so | |
1143 | * we use the default primary handler for it. But it | |
1144 | * does not have the oneshot flag set. In combination | |
1145 | * with level interrupts this is deadly, because the | |
1146 | * default primary handler just wakes the thread, then | |
1147 | * the irq lines is reenabled, but the device still | |
1148 | * has the level irq asserted. Rinse and repeat.... | |
1149 | * | |
1150 | * While this works for edge type interrupts, we play | |
1151 | * it safe and reject unconditionally because we can't | |
1152 | * say for sure which type this interrupt really | |
1153 | * has. The type flags are unreliable as the | |
1154 | * underlying chip implementation can override them. | |
1155 | */ | |
97fd75b7 | 1156 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
1c6c6952 TG |
1157 | irq); |
1158 | ret = -EINVAL; | |
1159 | goto out_mask; | |
b5faba21 | 1160 | } |
b5faba21 | 1161 | |
1da177e4 | 1162 | if (!shared) { |
c1bacbae TG |
1163 | ret = irq_request_resources(desc); |
1164 | if (ret) { | |
1165 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | |
1166 | new->name, irq, desc->irq_data.chip->name); | |
1167 | goto out_mask; | |
1168 | } | |
1169 | ||
3aa551c9 TG |
1170 | init_waitqueue_head(&desc->wait_for_threads); |
1171 | ||
e76de9f8 | 1172 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 1173 | if (new->flags & IRQF_TRIGGER_MASK) { |
f2b662da DB |
1174 | ret = __irq_set_trigger(desc, irq, |
1175 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 1176 | |
3aa551c9 | 1177 | if (ret) |
3b8249e7 | 1178 | goto out_mask; |
091738a2 | 1179 | } |
6a6de9ef | 1180 | |
009b4c3b | 1181 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
32f4125e TG |
1182 | IRQS_ONESHOT | IRQS_WAITING); |
1183 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
94d39e1f | 1184 | |
a005677b TG |
1185 | if (new->flags & IRQF_PERCPU) { |
1186 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
1187 | irq_settings_set_per_cpu(desc); | |
1188 | } | |
6a58fb3b | 1189 | |
b25c340c | 1190 | if (new->flags & IRQF_ONESHOT) |
3d67baec | 1191 | desc->istate |= IRQS_ONESHOT; |
b25c340c | 1192 | |
1ccb4e61 | 1193 | if (irq_settings_can_autoenable(desc)) |
b4bc724e | 1194 | irq_startup(desc, true); |
46999238 | 1195 | else |
e76de9f8 TG |
1196 | /* Undo nested disables: */ |
1197 | desc->depth = 1; | |
18404756 | 1198 | |
612e3684 | 1199 | /* Exclude IRQ from balancing if requested */ |
a005677b TG |
1200 | if (new->flags & IRQF_NOBALANCING) { |
1201 | irq_settings_set_no_balancing(desc); | |
1202 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
1203 | } | |
612e3684 | 1204 | |
18404756 | 1205 | /* Set default affinity mask once everything is setup */ |
3b8249e7 | 1206 | setup_affinity(irq, desc, mask); |
0c5d1eb7 | 1207 | |
876dbd4c TG |
1208 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1209 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | |
1210 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | |
1211 | ||
1212 | if (nmsk != omsk) | |
1213 | /* hope the handler works with current trigger mode */ | |
97fd75b7 | 1214 | pr_warning("irq %d uses trigger mode %u; requested %u\n", |
876dbd4c | 1215 | irq, nmsk, omsk); |
1da177e4 | 1216 | } |
82736f4d | 1217 | |
69ab8494 | 1218 | new->irq = irq; |
f17c7545 | 1219 | *old_ptr = new; |
82736f4d | 1220 | |
8528b0f1 LT |
1221 | /* Reset broken irq detection when installing new handler */ |
1222 | desc->irq_count = 0; | |
1223 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
1224 | |
1225 | /* | |
1226 | * Check whether we disabled the irq via the spurious handler | |
1227 | * before. Reenable it and give it another chance. | |
1228 | */ | |
7acdd53e TG |
1229 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1230 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | |
0a0c5168 | 1231 | __enable_irq(desc, irq, false); |
1adb0850 TG |
1232 | } |
1233 | ||
239007b8 | 1234 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 1235 | |
69ab8494 TG |
1236 | /* |
1237 | * Strictly no need to wake it up, but hung_task complains | |
1238 | * when no hard interrupt wakes the thread up. | |
1239 | */ | |
1240 | if (new->thread) | |
1241 | wake_up_process(new->thread); | |
1242 | ||
2c6927a3 | 1243 | register_irq_proc(irq, desc); |
1da177e4 LT |
1244 | new->dir = NULL; |
1245 | register_handler_proc(irq, new); | |
4f5058c3 | 1246 | free_cpumask_var(mask); |
1da177e4 LT |
1247 | |
1248 | return 0; | |
f5163427 DS |
1249 | |
1250 | mismatch: | |
3cca53b0 | 1251 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
97fd75b7 | 1252 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
f5d89470 TG |
1253 | irq, new->flags, new->name, old->flags, old->name); |
1254 | #ifdef CONFIG_DEBUG_SHIRQ | |
13e87ec6 | 1255 | dump_stack(); |
3f050447 | 1256 | #endif |
f5d89470 | 1257 | } |
3aa551c9 TG |
1258 | ret = -EBUSY; |
1259 | ||
3b8249e7 | 1260 | out_mask: |
1c389795 | 1261 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3b8249e7 TG |
1262 | free_cpumask_var(mask); |
1263 | ||
3aa551c9 | 1264 | out_thread: |
3aa551c9 TG |
1265 | if (new->thread) { |
1266 | struct task_struct *t = new->thread; | |
1267 | ||
1268 | new->thread = NULL; | |
05d74efa | 1269 | kthread_stop(t); |
3aa551c9 TG |
1270 | put_task_struct(t); |
1271 | } | |
b6873807 SAS |
1272 | out_mput: |
1273 | module_put(desc->owner); | |
3aa551c9 | 1274 | return ret; |
1da177e4 LT |
1275 | } |
1276 | ||
d3c60047 TG |
1277 | /** |
1278 | * setup_irq - setup an interrupt | |
1279 | * @irq: Interrupt line to setup | |
1280 | * @act: irqaction for the interrupt | |
1281 | * | |
1282 | * Used to statically setup interrupts in the early boot process. | |
1283 | */ | |
1284 | int setup_irq(unsigned int irq, struct irqaction *act) | |
1285 | { | |
986c011d | 1286 | int retval; |
d3c60047 TG |
1287 | struct irq_desc *desc = irq_to_desc(irq); |
1288 | ||
31d9d9b6 MZ |
1289 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1290 | return -EINVAL; | |
986c011d DD |
1291 | chip_bus_lock(desc); |
1292 | retval = __setup_irq(irq, desc, act); | |
1293 | chip_bus_sync_unlock(desc); | |
1294 | ||
1295 | return retval; | |
d3c60047 | 1296 | } |
eb53b4e8 | 1297 | EXPORT_SYMBOL_GPL(setup_irq); |
d3c60047 | 1298 | |
31d9d9b6 | 1299 | /* |
cbf94f06 MD |
1300 | * Internal function to unregister an irqaction - used to free |
1301 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 1302 | */ |
cbf94f06 | 1303 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1da177e4 | 1304 | { |
d3c60047 | 1305 | struct irq_desc *desc = irq_to_desc(irq); |
f17c7545 | 1306 | struct irqaction *action, **action_ptr; |
1da177e4 LT |
1307 | unsigned long flags; |
1308 | ||
ae88a23b | 1309 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 1310 | |
7d94f7ca | 1311 | if (!desc) |
f21cfb25 | 1312 | return NULL; |
1da177e4 | 1313 | |
239007b8 | 1314 | raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
1315 | |
1316 | /* | |
1317 | * There can be multiple actions per IRQ descriptor, find the right | |
1318 | * one based on the dev_id: | |
1319 | */ | |
f17c7545 | 1320 | action_ptr = &desc->action; |
1da177e4 | 1321 | for (;;) { |
f17c7545 | 1322 | action = *action_ptr; |
1da177e4 | 1323 | |
ae88a23b IM |
1324 | if (!action) { |
1325 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
239007b8 | 1326 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 1327 | |
f21cfb25 | 1328 | return NULL; |
ae88a23b | 1329 | } |
1da177e4 | 1330 | |
8316e381 IM |
1331 | if (action->dev_id == dev_id) |
1332 | break; | |
f17c7545 | 1333 | action_ptr = &action->next; |
ae88a23b | 1334 | } |
dbce706e | 1335 | |
ae88a23b | 1336 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 1337 | *action_ptr = action->next; |
ae88a23b | 1338 | |
ae88a23b | 1339 | /* If this was the last handler, shut down the IRQ line: */ |
c1bacbae | 1340 | if (!desc->action) { |
46999238 | 1341 | irq_shutdown(desc); |
c1bacbae TG |
1342 | irq_release_resources(desc); |
1343 | } | |
3aa551c9 | 1344 | |
e7a297b0 PWJ |
1345 | #ifdef CONFIG_SMP |
1346 | /* make sure affinity_hint is cleaned up */ | |
1347 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
1348 | desc->affinity_hint = NULL; | |
1349 | #endif | |
1350 | ||
239007b8 | 1351 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
ae88a23b IM |
1352 | |
1353 | unregister_handler_proc(irq, action); | |
1354 | ||
1355 | /* Make sure it's not being used on another CPU: */ | |
1356 | synchronize_irq(irq); | |
1da177e4 | 1357 | |
70edcd77 | 1358 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
1359 | /* |
1360 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
1361 | * event to happen even now it's being freed, so let's make sure that | |
1362 | * is so by doing an extra call to the handler .... | |
1363 | * | |
1364 | * ( We do this after actually deregistering it, to make sure that a | |
1365 | * 'real' IRQ doesn't run in * parallel with our fake. ) | |
1366 | */ | |
1367 | if (action->flags & IRQF_SHARED) { | |
1368 | local_irq_save(flags); | |
1369 | action->handler(irq, dev_id); | |
1370 | local_irq_restore(flags); | |
1da177e4 | 1371 | } |
ae88a23b | 1372 | #endif |
2d860ad7 LT |
1373 | |
1374 | if (action->thread) { | |
05d74efa | 1375 | kthread_stop(action->thread); |
2d860ad7 LT |
1376 | put_task_struct(action->thread); |
1377 | } | |
1378 | ||
b6873807 | 1379 | module_put(desc->owner); |
f21cfb25 MD |
1380 | return action; |
1381 | } | |
1382 | ||
cbf94f06 MD |
1383 | /** |
1384 | * remove_irq - free an interrupt | |
1385 | * @irq: Interrupt line to free | |
1386 | * @act: irqaction for the interrupt | |
1387 | * | |
1388 | * Used to remove interrupts statically setup by the early boot process. | |
1389 | */ | |
1390 | void remove_irq(unsigned int irq, struct irqaction *act) | |
1391 | { | |
31d9d9b6 MZ |
1392 | struct irq_desc *desc = irq_to_desc(irq); |
1393 | ||
1394 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
1395 | __free_irq(irq, act->dev_id); | |
cbf94f06 | 1396 | } |
eb53b4e8 | 1397 | EXPORT_SYMBOL_GPL(remove_irq); |
cbf94f06 | 1398 | |
f21cfb25 MD |
1399 | /** |
1400 | * free_irq - free an interrupt allocated with request_irq | |
1401 | * @irq: Interrupt line to free | |
1402 | * @dev_id: Device identity to free | |
1403 | * | |
1404 | * Remove an interrupt handler. The handler is removed and if the | |
1405 | * interrupt line is no longer in use by any driver it is disabled. | |
1406 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
1407 | * on the card it drives before calling this function. The function | |
1408 | * does not return until any executing interrupts for this IRQ | |
1409 | * have completed. | |
1410 | * | |
1411 | * This function must not be called from interrupt context. | |
1412 | */ | |
1413 | void free_irq(unsigned int irq, void *dev_id) | |
1414 | { | |
70aedd24 TG |
1415 | struct irq_desc *desc = irq_to_desc(irq); |
1416 | ||
31d9d9b6 | 1417 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
70aedd24 TG |
1418 | return; |
1419 | ||
cd7eab44 BH |
1420 | #ifdef CONFIG_SMP |
1421 | if (WARN_ON(desc->affinity_notify)) | |
1422 | desc->affinity_notify = NULL; | |
1423 | #endif | |
1424 | ||
3876ec9e | 1425 | chip_bus_lock(desc); |
cbf94f06 | 1426 | kfree(__free_irq(irq, dev_id)); |
3876ec9e | 1427 | chip_bus_sync_unlock(desc); |
1da177e4 | 1428 | } |
1da177e4 LT |
1429 | EXPORT_SYMBOL(free_irq); |
1430 | ||
1431 | /** | |
3aa551c9 | 1432 | * request_threaded_irq - allocate an interrupt line |
1da177e4 | 1433 | * @irq: Interrupt line to allocate |
3aa551c9 TG |
1434 | * @handler: Function to be called when the IRQ occurs. |
1435 | * Primary handler for threaded interrupts | |
b25c340c TG |
1436 | * If NULL and thread_fn != NULL the default |
1437 | * primary handler is installed | |
f48fe81e TG |
1438 | * @thread_fn: Function called from the irq handler thread |
1439 | * If NULL, no irq thread is created | |
1da177e4 LT |
1440 | * @irqflags: Interrupt type flags |
1441 | * @devname: An ascii name for the claiming device | |
1442 | * @dev_id: A cookie passed back to the handler function | |
1443 | * | |
1444 | * This call allocates interrupt resources and enables the | |
1445 | * interrupt line and IRQ handling. From the point this | |
1446 | * call is made your handler function may be invoked. Since | |
1447 | * your handler function must clear any interrupt the board | |
1448 | * raises, you must take care both to initialise your hardware | |
1449 | * and to set up the interrupt handler in the right order. | |
1450 | * | |
3aa551c9 | 1451 | * If you want to set up a threaded irq handler for your device |
6d21af4f | 1452 | * then you need to supply @handler and @thread_fn. @handler is |
3aa551c9 TG |
1453 | * still called in hard interrupt context and has to check |
1454 | * whether the interrupt originates from the device. If yes it | |
1455 | * needs to disable the interrupt on the device and return | |
39a2eddb | 1456 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
3aa551c9 TG |
1457 | * @thread_fn. This split handler design is necessary to support |
1458 | * shared interrupts. | |
1459 | * | |
1da177e4 LT |
1460 | * Dev_id must be globally unique. Normally the address of the |
1461 | * device data structure is used as the cookie. Since the handler | |
1462 | * receives this value it makes sense to use it. | |
1463 | * | |
1464 | * If your interrupt is shared you must pass a non NULL dev_id | |
1465 | * as this is required when freeing the interrupt. | |
1466 | * | |
1467 | * Flags: | |
1468 | * | |
3cca53b0 | 1469 | * IRQF_SHARED Interrupt is shared |
0c5d1eb7 | 1470 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4 LT |
1471 | * |
1472 | */ | |
3aa551c9 TG |
1473 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1474 | irq_handler_t thread_fn, unsigned long irqflags, | |
1475 | const char *devname, void *dev_id) | |
1da177e4 | 1476 | { |
06fcb0c6 | 1477 | struct irqaction *action; |
08678b08 | 1478 | struct irq_desc *desc; |
d3c60047 | 1479 | int retval; |
1da177e4 LT |
1480 | |
1481 | /* | |
1482 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
1483 | * otherwise we'll have trouble later trying to figure out | |
1484 | * which interrupt is which (messes up the interrupt freeing | |
1485 | * logic etc). | |
1486 | */ | |
3cca53b0 | 1487 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1da177e4 | 1488 | return -EINVAL; |
7d94f7ca | 1489 | |
cb5bc832 | 1490 | desc = irq_to_desc(irq); |
7d94f7ca | 1491 | if (!desc) |
1da177e4 | 1492 | return -EINVAL; |
7d94f7ca | 1493 | |
31d9d9b6 MZ |
1494 | if (!irq_settings_can_request(desc) || |
1495 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | |
6550c775 | 1496 | return -EINVAL; |
b25c340c TG |
1497 | |
1498 | if (!handler) { | |
1499 | if (!thread_fn) | |
1500 | return -EINVAL; | |
1501 | handler = irq_default_primary_handler; | |
1502 | } | |
1da177e4 | 1503 | |
45535732 | 1504 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
1505 | if (!action) |
1506 | return -ENOMEM; | |
1507 | ||
1508 | action->handler = handler; | |
3aa551c9 | 1509 | action->thread_fn = thread_fn; |
1da177e4 | 1510 | action->flags = irqflags; |
1da177e4 | 1511 | action->name = devname; |
1da177e4 LT |
1512 | action->dev_id = dev_id; |
1513 | ||
3876ec9e | 1514 | chip_bus_lock(desc); |
d3c60047 | 1515 | retval = __setup_irq(irq, desc, action); |
3876ec9e | 1516 | chip_bus_sync_unlock(desc); |
70aedd24 | 1517 | |
377bf1e4 AV |
1518 | if (retval) |
1519 | kfree(action); | |
1520 | ||
6d83f94d | 1521 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
6ce51c43 | 1522 | if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b8 DW |
1523 | /* |
1524 | * It's a shared IRQ -- the driver ought to be prepared for it | |
1525 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
1526 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1527 | * run in parallel with our fake. | |
a304e1b8 | 1528 | */ |
59845b1f | 1529 | unsigned long flags; |
a304e1b8 | 1530 | |
377bf1e4 | 1531 | disable_irq(irq); |
59845b1f | 1532 | local_irq_save(flags); |
377bf1e4 | 1533 | |
59845b1f | 1534 | handler(irq, dev_id); |
377bf1e4 | 1535 | |
59845b1f | 1536 | local_irq_restore(flags); |
377bf1e4 | 1537 | enable_irq(irq); |
a304e1b8 DW |
1538 | } |
1539 | #endif | |
1da177e4 LT |
1540 | return retval; |
1541 | } | |
3aa551c9 | 1542 | EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d MZ |
1543 | |
1544 | /** | |
1545 | * request_any_context_irq - allocate an interrupt line | |
1546 | * @irq: Interrupt line to allocate | |
1547 | * @handler: Function to be called when the IRQ occurs. | |
1548 | * Threaded handler for threaded interrupts. | |
1549 | * @flags: Interrupt type flags | |
1550 | * @name: An ascii name for the claiming device | |
1551 | * @dev_id: A cookie passed back to the handler function | |
1552 | * | |
1553 | * This call allocates interrupt resources and enables the | |
1554 | * interrupt line and IRQ handling. It selects either a | |
1555 | * hardirq or threaded handling method depending on the | |
1556 | * context. | |
1557 | * | |
1558 | * On failure, it returns a negative value. On success, | |
1559 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
1560 | */ | |
1561 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
1562 | unsigned long flags, const char *name, void *dev_id) | |
1563 | { | |
1564 | struct irq_desc *desc = irq_to_desc(irq); | |
1565 | int ret; | |
1566 | ||
1567 | if (!desc) | |
1568 | return -EINVAL; | |
1569 | ||
1ccb4e61 | 1570 | if (irq_settings_is_nested_thread(desc)) { |
ae731f8d MZ |
1571 | ret = request_threaded_irq(irq, NULL, handler, |
1572 | flags, name, dev_id); | |
1573 | return !ret ? IRQC_IS_NESTED : ret; | |
1574 | } | |
1575 | ||
1576 | ret = request_irq(irq, handler, flags, name, dev_id); | |
1577 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
1578 | } | |
1579 | EXPORT_SYMBOL_GPL(request_any_context_irq); | |
31d9d9b6 | 1580 | |
1e7c5fd2 | 1581 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
31d9d9b6 MZ |
1582 | { |
1583 | unsigned int cpu = smp_processor_id(); | |
1584 | unsigned long flags; | |
1585 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
1586 | ||
1587 | if (!desc) | |
1588 | return; | |
1589 | ||
1e7c5fd2 MZ |
1590 | type &= IRQ_TYPE_SENSE_MASK; |
1591 | if (type != IRQ_TYPE_NONE) { | |
1592 | int ret; | |
1593 | ||
1594 | ret = __irq_set_trigger(desc, irq, type); | |
1595 | ||
1596 | if (ret) { | |
32cffdde | 1597 | WARN(1, "failed to set type for IRQ%d\n", irq); |
1e7c5fd2 MZ |
1598 | goto out; |
1599 | } | |
1600 | } | |
1601 | ||
31d9d9b6 | 1602 | irq_percpu_enable(desc, cpu); |
1e7c5fd2 | 1603 | out: |
31d9d9b6 MZ |
1604 | irq_put_desc_unlock(desc, flags); |
1605 | } | |
36a5df85 | 1606 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
31d9d9b6 MZ |
1607 | |
1608 | void disable_percpu_irq(unsigned int irq) | |
1609 | { | |
1610 | unsigned int cpu = smp_processor_id(); | |
1611 | unsigned long flags; | |
1612 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | |
1613 | ||
1614 | if (!desc) | |
1615 | return; | |
1616 | ||
1617 | irq_percpu_disable(desc, cpu); | |
1618 | irq_put_desc_unlock(desc, flags); | |
1619 | } | |
36a5df85 | 1620 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
31d9d9b6 MZ |
1621 | |
1622 | /* | |
1623 | * Internal function to unregister a percpu irqaction. | |
1624 | */ | |
1625 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
1626 | { | |
1627 | struct irq_desc *desc = irq_to_desc(irq); | |
1628 | struct irqaction *action; | |
1629 | unsigned long flags; | |
1630 | ||
1631 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | |
1632 | ||
1633 | if (!desc) | |
1634 | return NULL; | |
1635 | ||
1636 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1637 | ||
1638 | action = desc->action; | |
1639 | if (!action || action->percpu_dev_id != dev_id) { | |
1640 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
1641 | goto bad; | |
1642 | } | |
1643 | ||
1644 | if (!cpumask_empty(desc->percpu_enabled)) { | |
1645 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | |
1646 | irq, cpumask_first(desc->percpu_enabled)); | |
1647 | goto bad; | |
1648 | } | |
1649 | ||
1650 | /* Found it - now remove it from the list of entries: */ | |
1651 | desc->action = NULL; | |
1652 | ||
1653 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1654 | ||
1655 | unregister_handler_proc(irq, action); | |
1656 | ||
1657 | module_put(desc->owner); | |
1658 | return action; | |
1659 | ||
1660 | bad: | |
1661 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1662 | return NULL; | |
1663 | } | |
1664 | ||
1665 | /** | |
1666 | * remove_percpu_irq - free a per-cpu interrupt | |
1667 | * @irq: Interrupt line to free | |
1668 | * @act: irqaction for the interrupt | |
1669 | * | |
1670 | * Used to remove interrupts statically setup by the early boot process. | |
1671 | */ | |
1672 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) | |
1673 | { | |
1674 | struct irq_desc *desc = irq_to_desc(irq); | |
1675 | ||
1676 | if (desc && irq_settings_is_per_cpu_devid(desc)) | |
1677 | __free_percpu_irq(irq, act->percpu_dev_id); | |
1678 | } | |
1679 | ||
1680 | /** | |
1681 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq | |
1682 | * @irq: Interrupt line to free | |
1683 | * @dev_id: Device identity to free | |
1684 | * | |
1685 | * Remove a percpu interrupt handler. The handler is removed, but | |
1686 | * the interrupt line is not disabled. This must be done on each | |
1687 | * CPU before calling this function. The function does not return | |
1688 | * until any executing interrupts for this IRQ have completed. | |
1689 | * | |
1690 | * This function must not be called from interrupt context. | |
1691 | */ | |
1692 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |
1693 | { | |
1694 | struct irq_desc *desc = irq_to_desc(irq); | |
1695 | ||
1696 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
1697 | return; | |
1698 | ||
1699 | chip_bus_lock(desc); | |
1700 | kfree(__free_percpu_irq(irq, dev_id)); | |
1701 | chip_bus_sync_unlock(desc); | |
1702 | } | |
1703 | ||
1704 | /** | |
1705 | * setup_percpu_irq - setup a per-cpu interrupt | |
1706 | * @irq: Interrupt line to setup | |
1707 | * @act: irqaction for the interrupt | |
1708 | * | |
1709 | * Used to statically setup per-cpu interrupts in the early boot process. | |
1710 | */ | |
1711 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | |
1712 | { | |
1713 | struct irq_desc *desc = irq_to_desc(irq); | |
1714 | int retval; | |
1715 | ||
1716 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | |
1717 | return -EINVAL; | |
1718 | chip_bus_lock(desc); | |
1719 | retval = __setup_irq(irq, desc, act); | |
1720 | chip_bus_sync_unlock(desc); | |
1721 | ||
1722 | return retval; | |
1723 | } | |
1724 | ||
1725 | /** | |
1726 | * request_percpu_irq - allocate a percpu interrupt line | |
1727 | * @irq: Interrupt line to allocate | |
1728 | * @handler: Function to be called when the IRQ occurs. | |
1729 | * @devname: An ascii name for the claiming device | |
1730 | * @dev_id: A percpu cookie passed back to the handler function | |
1731 | * | |
1732 | * This call allocates interrupt resources, but doesn't | |
1733 | * automatically enable the interrupt. It has to be done on each | |
1734 | * CPU using enable_percpu_irq(). | |
1735 | * | |
1736 | * Dev_id must be globally unique. It is a per-cpu variable, and | |
1737 | * the handler gets called with the interrupted CPU's instance of | |
1738 | * that variable. | |
1739 | */ | |
1740 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, | |
1741 | const char *devname, void __percpu *dev_id) | |
1742 | { | |
1743 | struct irqaction *action; | |
1744 | struct irq_desc *desc; | |
1745 | int retval; | |
1746 | ||
1747 | if (!dev_id) | |
1748 | return -EINVAL; | |
1749 | ||
1750 | desc = irq_to_desc(irq); | |
1751 | if (!desc || !irq_settings_can_request(desc) || | |
1752 | !irq_settings_is_per_cpu_devid(desc)) | |
1753 | return -EINVAL; | |
1754 | ||
1755 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
1756 | if (!action) | |
1757 | return -ENOMEM; | |
1758 | ||
1759 | action->handler = handler; | |
2ed0e645 | 1760 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; |
31d9d9b6 MZ |
1761 | action->name = devname; |
1762 | action->percpu_dev_id = dev_id; | |
1763 | ||
1764 | chip_bus_lock(desc); | |
1765 | retval = __setup_irq(irq, desc, action); | |
1766 | chip_bus_sync_unlock(desc); | |
1767 | ||
1768 | if (retval) | |
1769 | kfree(action); | |
1770 | ||
1771 | return retval; | |
1772 | } |