]>
Commit | Line | Data |
---|---|---|
c777ac55 | 1 | |
d824e66a | 2 | #include <linux/irq.h> |
57b150cc YL |
3 | #include <linux/interrupt.h> |
4 | ||
5 | #include "internals.h" | |
c777ac55 | 6 | |
a439520f | 7 | void irq_move_masked_irq(struct irq_data *idata) |
c777ac55 | 8 | { |
a439520f TG |
9 | struct irq_desc *desc = irq_data_to_desc(idata); |
10 | struct irq_chip *chip = idata->chip; | |
c777ac55 | 11 | |
f230b6d5 | 12 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) |
c777ac55 AM |
13 | return; |
14 | ||
501f2499 BH |
15 | /* |
16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | |
17 | */ | |
a005677b | 18 | if (!irqd_can_balance(&desc->irq_data)) { |
501f2499 BH |
19 | WARN_ON(1); |
20 | return; | |
21 | } | |
22 | ||
f230b6d5 | 23 | irqd_clr_move_pending(&desc->irq_data); |
c777ac55 | 24 | |
7f7ace0c | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) |
c777ac55 AM |
26 | return; |
27 | ||
c96b3b3c | 28 | if (!chip->irq_set_affinity) |
c777ac55 AM |
29 | return; |
30 | ||
239007b8 | 31 | assert_raw_spin_locked(&desc->lock); |
501f2499 | 32 | |
c777ac55 AM |
33 | /* |
34 | * If there was a valid mask to work with, please | |
35 | * do the disable, re-program, enable sequence. | |
36 | * This is *not* particularly important for level triggered | |
37 | * but in a edge trigger case, we might be setting rte | |
38 | * when an active trigger is comming in. This could | |
39 | * cause some ioapics to mal-function. | |
40 | * Being paranoid i guess! | |
e7b946e9 EB |
41 | * |
42 | * For correct operation this depends on the caller | |
43 | * masking the irqs. | |
c777ac55 | 44 | */ |
7f7ace0c | 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
57b150cc | 46 | < nr_cpu_ids)) |
c96b3b3c TG |
47 | if (!chip->irq_set_affinity(&desc->irq_data, |
48 | desc->pending_mask, false)) { | |
6b8ff312 | 49 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); |
591d2fb0 | 50 | irq_set_thread_affinity(desc); |
57b150cc YL |
51 | } |
52 | ||
7f7ace0c | 53 | cpumask_clear(desc->pending_mask); |
c777ac55 | 54 | } |
e7b946e9 | 55 | |
a439520f TG |
56 | void move_masked_irq(int irq) |
57 | { | |
58 | irq_move_masked_irq(irq_get_irq_data(irq)); | |
59 | } | |
60 | ||
61 | void irq_move_irq(struct irq_data *idata) | |
e7b946e9 | 62 | { |
a439520f | 63 | struct irq_desc *desc = irq_data_to_desc(idata); |
f1a06390 | 64 | bool masked; |
e7b946e9 | 65 | |
a439520f | 66 | if (likely(!irqd_is_setaffinity_pending(idata))) |
e7b946e9 EB |
67 | return; |
68 | ||
c1594b77 | 69 | if (unlikely(desc->istate & IRQS_DISABLED)) |
2a786b45 | 70 | return; |
e7b946e9 | 71 | |
f1a06390 TG |
72 | /* |
73 | * Be careful vs. already masked interrupts. If this is a | |
74 | * threaded interrupt with ONESHOT set, we can end up with an | |
75 | * interrupt storm. | |
76 | */ | |
6e40262e | 77 | masked = desc->istate & IRQS_MASKED; |
f1a06390 | 78 | if (!masked) |
a439520f TG |
79 | idata->chip->irq_mask(idata); |
80 | irq_move_masked_irq(idata); | |
f1a06390 | 81 | if (!masked) |
a439520f TG |
82 | idata->chip->irq_unmask(idata); |
83 | } | |
84 | ||
85 | void move_native_irq(int irq) | |
86 | { | |
87 | irq_move_irq(irq_get_irq_data(irq)); | |
e7b946e9 | 88 | } |