]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __irq_h |
2 | #define __irq_h | |
3 | ||
4 | /* | |
5 | * Please do not include this file in generic code. There is currently | |
6 | * no requirement for any architecture to implement anything held | |
7 | * within this file. | |
8 | * | |
9 | * Thanks. --rmk | |
10 | */ | |
11 | ||
12 | #include <linux/config.h> | |
13 | ||
14 | #if !defined(CONFIG_ARCH_S390) | |
15 | ||
16 | #include <linux/linkage.h> | |
17 | #include <linux/cache.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/cpumask.h> | |
20 | ||
21 | #include <asm/irq.h> | |
22 | #include <asm/ptrace.h> | |
23 | ||
24 | /* | |
25 | * IRQ line status. | |
26 | */ | |
27 | #define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */ | |
28 | #define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */ | |
29 | #define IRQ_PENDING 4 /* IRQ pending - replay on enable */ | |
30 | #define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ | |
31 | #define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ | |
32 | #define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ | |
33 | #define IRQ_LEVEL 64 /* IRQ level triggered */ | |
34 | #define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ | |
35 | #define IRQ_PER_CPU 256 /* IRQ is per CPU */ | |
36 | ||
37 | /* | |
38 | * Interrupt controller descriptor. This is all we need | |
39 | * to describe about the low-level hardware. | |
40 | */ | |
41 | struct hw_interrupt_type { | |
42 | const char * typename; | |
43 | unsigned int (*startup)(unsigned int irq); | |
44 | void (*shutdown)(unsigned int irq); | |
45 | void (*enable)(unsigned int irq); | |
46 | void (*disable)(unsigned int irq); | |
47 | void (*ack)(unsigned int irq); | |
48 | void (*end)(unsigned int irq); | |
49 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | |
b77d6adc PBG |
50 | /* Currently used only by UML, might disappear one day.*/ |
51 | #ifdef CONFIG_IRQ_RELEASE_METHOD | |
dbce706e | 52 | void (*release)(unsigned int irq, void *dev_id); |
b77d6adc | 53 | #endif |
1da177e4 LT |
54 | }; |
55 | ||
56 | typedef struct hw_interrupt_type hw_irq_controller; | |
57 | ||
58 | /* | |
59 | * This is the "IRQ descriptor", which contains various information | |
60 | * about the irq, including what kind of hardware handling it has, | |
61 | * whether it is disabled etc etc. | |
62 | * | |
63 | * Pad this out to 32 bytes for cache and indexing reasons. | |
64 | */ | |
65 | typedef struct irq_desc { | |
66 | hw_irq_controller *handler; | |
67 | void *handler_data; | |
68 | struct irqaction *action; /* IRQ action list */ | |
69 | unsigned int status; /* IRQ status */ | |
70 | unsigned int depth; /* nested irq disables */ | |
71 | unsigned int irq_count; /* For detecting broken interrupts */ | |
72 | unsigned int irqs_unhandled; | |
73 | spinlock_t lock; | |
54d5d424 AR |
74 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) |
75 | unsigned int move_irq; /* Flag need to re-target intr dest*/ | |
76 | #endif | |
1da177e4 LT |
77 | } ____cacheline_aligned irq_desc_t; |
78 | ||
79 | extern irq_desc_t irq_desc [NR_IRQS]; | |
80 | ||
54d5d424 AR |
81 | /* Return a pointer to the irq descriptor for IRQ. */ |
82 | static inline irq_desc_t * | |
83 | irq_descp (int irq) | |
84 | { | |
85 | return irq_desc + irq; | |
86 | } | |
87 | ||
1da177e4 LT |
88 | #include <asm/hw_irq.h> /* the arch dependent stuff */ |
89 | ||
90 | extern int setup_irq(unsigned int irq, struct irqaction * new); | |
91 | ||
92 | #ifdef CONFIG_GENERIC_HARDIRQS | |
93 | extern cpumask_t irq_affinity[NR_IRQS]; | |
54d5d424 AR |
94 | |
95 | #ifdef CONFIG_SMP | |
96 | static inline void set_native_irq_info(int irq, cpumask_t mask) | |
97 | { | |
98 | irq_affinity[irq] = mask; | |
99 | } | |
100 | #else | |
101 | static inline void set_native_irq_info(int irq, cpumask_t mask) | |
102 | { | |
103 | } | |
104 | #endif | |
105 | ||
106 | #ifdef CONFIG_SMP | |
107 | ||
108 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) | |
109 | extern cpumask_t pending_irq_cpumask[NR_IRQS]; | |
110 | ||
111 | static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | |
112 | { | |
113 | irq_desc_t *desc = irq_desc + irq; | |
114 | unsigned long flags; | |
115 | ||
116 | spin_lock_irqsave(&desc->lock, flags); | |
117 | desc->move_irq = 1; | |
118 | pending_irq_cpumask[irq] = mask; | |
119 | spin_unlock_irqrestore(&desc->lock, flags); | |
120 | } | |
121 | ||
122 | static inline void | |
123 | move_native_irq(int irq) | |
124 | { | |
125 | cpumask_t tmp; | |
126 | irq_desc_t *desc = irq_descp(irq); | |
127 | ||
128 | if (likely (!desc->move_irq)) | |
129 | return; | |
130 | ||
131 | desc->move_irq = 0; | |
132 | ||
133 | if (likely(cpus_empty(pending_irq_cpumask[irq]))) | |
134 | return; | |
135 | ||
136 | if (!desc->handler->set_affinity) | |
137 | return; | |
138 | ||
139 | /* note - we hold the desc->lock */ | |
140 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | |
141 | ||
142 | /* | |
143 | * If there was a valid mask to work with, please | |
144 | * do the disable, re-program, enable sequence. | |
145 | * This is *not* particularly important for level triggered | |
146 | * but in a edge trigger case, we might be setting rte | |
147 | * when an active trigger is comming in. This could | |
148 | * cause some ioapics to mal-function. | |
149 | * Being paranoid i guess! | |
150 | */ | |
151 | if (unlikely(!cpus_empty(tmp))) { | |
152 | desc->handler->disable(irq); | |
153 | desc->handler->set_affinity(irq,tmp); | |
154 | desc->handler->enable(irq); | |
155 | } | |
156 | cpus_clear(pending_irq_cpumask[irq]); | |
157 | } | |
158 | ||
159 | #ifdef CONFIG_PCI_MSI | |
160 | /* | |
161 | * Wonder why these are dummies? | |
162 | * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq() | |
163 | * counter part after translating the vector to irq info. We need to perform | |
164 | * this operation on the real irq, when we dont use vector, i.e when | |
165 | * pci_use_vector() is false. | |
166 | */ | |
167 | static inline void move_irq(int irq) | |
168 | { | |
169 | } | |
170 | ||
171 | static inline void set_irq_info(int irq, cpumask_t mask) | |
172 | { | |
173 | } | |
174 | ||
175 | #else // CONFIG_PCI_MSI | |
176 | ||
177 | static inline void move_irq(int irq) | |
178 | { | |
179 | move_native_irq(irq); | |
180 | } | |
181 | ||
182 | static inline void set_irq_info(int irq, cpumask_t mask) | |
183 | { | |
184 | set_native_irq_info(irq, mask); | |
185 | } | |
186 | #endif // CONFIG_PCI_MSI | |
187 | ||
188 | #else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE | |
189 | ||
190 | #define move_irq(x) | |
191 | #define move_native_irq(x) | |
192 | #define set_pending_irq(x,y) | |
193 | static inline void set_irq_info(int irq, cpumask_t mask) | |
194 | { | |
195 | set_native_irq_info(irq, mask); | |
196 | } | |
197 | ||
198 | #endif // CONFIG_GENERIC_PENDING_IRQ | |
199 | ||
200 | #else // CONFIG_SMP | |
201 | ||
202 | #define move_irq(x) | |
203 | #define move_native_irq(x) | |
204 | ||
205 | #endif // CONFIG_SMP | |
206 | ||
1da177e4 LT |
207 | extern int no_irq_affinity; |
208 | extern int noirqdebug_setup(char *str); | |
209 | ||
210 | extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | |
200803df | 211 | struct irqaction *action); |
1da177e4 | 212 | extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); |
200803df AC |
213 | extern void note_interrupt(unsigned int irq, irq_desc_t *desc, |
214 | int action_ret, struct pt_regs *regs); | |
1da177e4 LT |
215 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
216 | ||
217 | extern void init_irq_proc(void); | |
218 | #endif | |
219 | ||
220 | extern hw_irq_controller no_irq_type; /* needed in every arch ? */ | |
221 | ||
222 | #endif | |
223 | ||
224 | #endif /* __irq_h */ |