]>
Commit | Line | Data |
---|---|---|
de6cc651 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * Code to handle x86 style IRQs plus some generic interrupt stuff. | |
4 | * | |
5 | * Copyright (C) 1992 Linus Torvalds | |
6 | * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle | |
7 | * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, [email protected]) | |
8 | * Copyright (C) 1999-2000 Grant Grundler | |
9 | * Copyright (c) 2005 Matthew Wilcox | |
1da177e4 LT |
10 | */ |
11 | #include <linux/bitops.h> | |
1da177e4 LT |
12 | #include <linux/errno.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/seq_file.h> | |
1da177e4 | 17 | #include <linux/types.h> |
2214c0e7 | 18 | #include <linux/sched/task_stack.h> |
c2ab64d0 | 19 | #include <asm/io.h> |
1da177e4 | 20 | |
db1cc7ae | 21 | #include <asm/softirq_stack.h> |
1d4c452a | 22 | #include <asm/smp.h> |
d96b51ec | 23 | #include <asm/ldcw.h> |
1d4c452a | 24 | |
1da177e4 LT |
25 | #undef PARISC_IRQ_CR16_COUNTS |
26 | ||
be577a52 MW |
27 | extern irqreturn_t timer_interrupt(int, void *); |
28 | extern irqreturn_t ipi_interrupt(int, void *); | |
1da177e4 LT |
29 | |
30 | #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq)) | |
31 | ||
32 | /* Bits in EIEM correlate with cpu_irq_action[]. | |
33 | ** Numbered *Big Endian*! (ie bit 0 is MSB) | |
34 | */ | |
35 | static volatile unsigned long cpu_eiem = 0; | |
36 | ||
7085689e | 37 | /* |
462b529f | 38 | ** local ACK bitmap ... habitually set to 1, but reset to zero |
7085689e JB |
39 | ** between ->ack() and ->end() of the interrupt to prevent |
40 | ** re-interruption of a processing interrupt. | |
41 | */ | |
7085689e JB |
42 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; |
43 | ||
4c4231ea | 44 | static void cpu_mask_irq(struct irq_data *d) |
1da177e4 | 45 | { |
4c4231ea | 46 | unsigned long eirr_bit = EIEM_MASK(d->irq); |
1da177e4 LT |
47 | |
48 | cpu_eiem &= ~eirr_bit; | |
d911aed8 JB |
49 | /* Do nothing on the other CPUs. If they get this interrupt, |
50 | * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't | |
51 | * handle it, and the set_eiem() at the bottom will ensure it | |
52 | * then gets disabled */ | |
1da177e4 LT |
53 | } |
54 | ||
4c4231ea | 55 | static void __cpu_unmask_irq(unsigned int irq) |
1da177e4 LT |
56 | { |
57 | unsigned long eirr_bit = EIEM_MASK(irq); | |
58 | ||
1da177e4 | 59 | cpu_eiem |= eirr_bit; |
d911aed8 | 60 | |
d911aed8 JB |
61 | /* This is just a simple NOP IPI. But what it does is cause |
62 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end | |
63 | * of the interrupt handler */ | |
64 | smp_send_all_nop(); | |
1da177e4 LT |
65 | } |
66 | ||
4c4231ea TG |
67 | static void cpu_unmask_irq(struct irq_data *d) |
68 | { | |
69 | __cpu_unmask_irq(d->irq); | |
70 | } | |
71 | ||
72 | void cpu_ack_irq(struct irq_data *d) | |
7085689e | 73 | { |
4c4231ea | 74 | unsigned long mask = EIEM_MASK(d->irq); |
7085689e JB |
75 | int cpu = smp_processor_id(); |
76 | ||
77 | /* Clear in EIEM so we can no longer process */ | |
462b529f | 78 | per_cpu(local_ack_eiem, cpu) &= ~mask; |
7085689e JB |
79 | |
80 | /* disable the interrupt */ | |
462b529f GG |
81 | set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); |
82 | ||
7085689e JB |
83 | /* and now ack it */ |
84 | mtctl(mask, 23); | |
85 | } | |
86 | ||
4c4231ea | 87 | void cpu_eoi_irq(struct irq_data *d) |
7085689e | 88 | { |
4c4231ea | 89 | unsigned long mask = EIEM_MASK(d->irq); |
7085689e JB |
90 | int cpu = smp_processor_id(); |
91 | ||
92 | /* set it in the eiems---it's no longer in process */ | |
462b529f | 93 | per_cpu(local_ack_eiem, cpu) |= mask; |
7085689e JB |
94 | |
95 | /* enable the interrupt */ | |
462b529f | 96 | set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); |
7085689e JB |
97 | } |
98 | ||
c2ab64d0 | 99 | #ifdef CONFIG_SMP |
4c4231ea | 100 | int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) |
c2ab64d0 JB |
101 | { |
102 | int cpu_dest; | |
103 | ||
104 | /* timer and ipi have to always be received on all CPUs */ | |
337ce681 | 105 | if (irqd_is_per_cpu(d)) |
c2ab64d0 | 106 | return -EINVAL; |
c2ab64d0 JB |
107 | |
108 | /* whatever mask they set, we just allow one CPU */ | |
f73493eb HD |
109 | cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1), |
110 | dest, cpu_online_mask); | |
111 | if (cpu_dest >= nr_cpu_ids) | |
112 | cpu_dest = cpumask_first_and(dest, cpu_online_mask); | |
c2ab64d0 | 113 | |
8b6649c5 | 114 | return cpu_dest; |
c2ab64d0 JB |
115 | } |
116 | ||
4c4231ea TG |
117 | static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, |
118 | bool force) | |
c2ab64d0 | 119 | { |
8b6649c5 KM |
120 | int cpu_dest; |
121 | ||
4c4231ea | 122 | cpu_dest = cpu_check_affinity(d, dest); |
8b6649c5 | 123 | if (cpu_dest < 0) |
d5dedd45 | 124 | return -1; |
c2ab64d0 | 125 | |
d2109a12 | 126 | cpumask_copy(irq_data_get_affinity_mask(d), dest); |
d5dedd45 YL |
127 | |
128 | return 0; | |
c2ab64d0 JB |
129 | } |
130 | #endif | |
131 | ||
dfe07565 | 132 | static struct irq_chip cpu_interrupt_type = { |
4c4231ea TG |
133 | .name = "CPU", |
134 | .irq_mask = cpu_mask_irq, | |
135 | .irq_unmask = cpu_unmask_irq, | |
136 | .irq_ack = cpu_ack_irq, | |
137 | .irq_eoi = cpu_eoi_irq, | |
c2ab64d0 | 138 | #ifdef CONFIG_SMP |
4c4231ea | 139 | .irq_set_affinity = cpu_set_affinity_irq, |
c2ab64d0 | 140 | #endif |
c0ad90a3 IM |
141 | /* XXX: Needs to be written. We managed without it so far, but |
142 | * we really ought to write it. | |
143 | */ | |
4c4231ea | 144 | .irq_retrigger = NULL, |
1da177e4 LT |
145 | }; |
146 | ||
cd85d551 HD |
147 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
148 | #define irq_stats(x) (&per_cpu(irq_stat, x)) | |
149 | ||
150 | /* | |
151 | * /proc/interrupts printing for arch specific interrupts | |
152 | */ | |
153 | int arch_show_interrupts(struct seq_file *p, int prec) | |
154 | { | |
155 | int j; | |
156 | ||
157 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
158 | seq_printf(p, "%*s: ", prec, "STK"); | |
159 | for_each_online_cpu(j) | |
160 | seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); | |
416821d3 HD |
161 | seq_puts(p, " Kernel stack usage\n"); |
162 | # ifdef CONFIG_IRQSTACKS | |
163 | seq_printf(p, "%*s: ", prec, "IST"); | |
164 | for_each_online_cpu(j) | |
165 | seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); | |
166 | seq_puts(p, " Interrupt stack usage\n"); | |
416821d3 | 167 | # endif |
cd85d551 HD |
168 | #endif |
169 | #ifdef CONFIG_SMP | |
237a97d6 HD |
170 | if (num_online_cpus() > 1) { |
171 | seq_printf(p, "%*s: ", prec, "RES"); | |
172 | for_each_online_cpu(j) | |
173 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); | |
174 | seq_puts(p, " Rescheduling interrupts\n"); | |
b102f29b HD |
175 | seq_printf(p, "%*s: ", prec, "CAL"); |
176 | for_each_online_cpu(j) | |
177 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); | |
178 | seq_puts(p, " Function call interrupts\n"); | |
237a97d6 | 179 | } |
0fc537d1 | 180 | #endif |
d0c3be80 HD |
181 | seq_printf(p, "%*s: ", prec, "UAH"); |
182 | for_each_online_cpu(j) | |
183 | seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count); | |
184 | seq_puts(p, " Unaligned access handler traps\n"); | |
185 | seq_printf(p, "%*s: ", prec, "FPA"); | |
186 | for_each_online_cpu(j) | |
187 | seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count); | |
188 | seq_puts(p, " Floating point assist traps\n"); | |
cd85d551 HD |
189 | seq_printf(p, "%*s: ", prec, "TLB"); |
190 | for_each_online_cpu(j) | |
191 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); | |
416821d3 | 192 | seq_puts(p, " TLB shootdowns\n"); |
cd85d551 HD |
193 | return 0; |
194 | } | |
195 | ||
1da177e4 LT |
196 | int show_interrupts(struct seq_file *p, void *v) |
197 | { | |
198 | int i = *(loff_t *) v, j; | |
199 | unsigned long flags; | |
200 | ||
201 | if (i == 0) { | |
202 | seq_puts(p, " "); | |
203 | for_each_online_cpu(j) | |
204 | seq_printf(p, " CPU%d", j); | |
205 | ||
206 | #ifdef PARISC_IRQ_CR16_COUNTS | |
207 | seq_printf(p, " [min/avg/max] (CPU cycle counts)"); | |
208 | #endif | |
209 | seq_putc(p, '\n'); | |
210 | } | |
211 | ||
212 | if (i < NR_IRQS) { | |
68f20f43 | 213 | struct irq_desc *desc = irq_to_desc(i); |
1da177e4 LT |
214 | struct irqaction *action; |
215 | ||
68f20f43 TG |
216 | raw_spin_lock_irqsave(&desc->lock, flags); |
217 | action = desc->action; | |
1da177e4 LT |
218 | if (!action) |
219 | goto skip; | |
220 | seq_printf(p, "%3d: ", i); | |
bb0e5192 | 221 | |
1da177e4 | 222 | for_each_online_cpu(j) |
7435248e | 223 | seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, j)); |
1da177e4 | 224 | |
68f20f43 | 225 | seq_printf(p, " %14s", irq_desc_get_chip(desc)->name); |
1da177e4 LT |
226 | #ifndef PARISC_IRQ_CR16_COUNTS |
227 | seq_printf(p, " %s", action->name); | |
228 | ||
229 | while ((action = action->next)) | |
230 | seq_printf(p, ", %s", action->name); | |
231 | #else | |
232 | for ( ;action; action = action->next) { | |
233 | unsigned int k, avg, min, max; | |
234 | ||
235 | min = max = action->cr16_hist[0]; | |
236 | ||
237 | for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) { | |
238 | int hist = action->cr16_hist[k]; | |
239 | ||
240 | if (hist) { | |
241 | avg += hist; | |
242 | } else | |
243 | break; | |
244 | ||
245 | if (hist > max) max = hist; | |
246 | if (hist < min) min = hist; | |
247 | } | |
248 | ||
249 | avg /= k; | |
250 | seq_printf(p, " %s[%d/%d/%d]", action->name, | |
251 | min,avg,max); | |
252 | } | |
253 | #endif | |
254 | ||
255 | seq_putc(p, '\n'); | |
256 | skip: | |
68f20f43 | 257 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 LT |
258 | } |
259 | ||
cd85d551 HD |
260 | if (i == NR_IRQS) |
261 | arch_show_interrupts(p, 3); | |
262 | ||
1da177e4 LT |
263 | return 0; |
264 | } | |
265 | ||
266 | ||
267 | ||
268 | /* | |
269 | ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data. | |
270 | ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit. | |
271 | ** | |
272 | ** To use txn_XXX() interfaces, get a Virtual IRQ first. | |
273 | ** Then use that to get the Transaction address and data. | |
274 | */ | |
275 | ||
5cfe87d3 | 276 | int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) |
1da177e4 | 277 | { |
68f20f43 | 278 | if (irq_has_action(irq)) |
1da177e4 | 279 | return -EBUSY; |
e2f571d2 | 280 | if (irq_get_chip(irq) != &cpu_interrupt_type) |
1da177e4 LT |
281 | return -EBUSY; |
282 | ||
ba20085c | 283 | /* for iosapic interrupts */ |
1da177e4 | 284 | if (type) { |
e2f571d2 TG |
285 | irq_set_chip_and_handler(irq, type, handle_percpu_irq); |
286 | irq_set_chip_data(irq, data); | |
4c4231ea | 287 | __cpu_unmask_irq(irq); |
1da177e4 LT |
288 | } |
289 | return 0; | |
290 | } | |
291 | ||
292 | int txn_claim_irq(int irq) | |
293 | { | |
294 | return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq; | |
295 | } | |
296 | ||
297 | /* | |
298 | * The bits_wide parameter accommodates the limitations of the HW/SW which | |
299 | * use these bits: | |
300 | * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register) | |
301 | * V-class (EPIC): 6 bits | |
302 | * N/L/A-class (iosapic): 8 bits | |
303 | * PCI 2.2 MSI: 16 bits | |
304 | * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric) | |
305 | * | |
306 | * On the service provider side: | |
307 | * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register) | |
308 | * o PA 2.0 wide mode 6-bits (per processor) | |
309 | * o IA64 8-bits (0-256 total) | |
310 | * | |
311 | * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported | |
312 | * by the processor...and the N/L-class I/O subsystem supports more bits than | |
313 | * PA2.0 has. The first case is the problem. | |
314 | */ | |
315 | int txn_alloc_irq(unsigned int bits_wide) | |
316 | { | |
317 | int irq; | |
318 | ||
319 | /* never return irq 0 cause that's the interval timer */ | |
320 | for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) { | |
321 | if (cpu_claim_irq(irq, NULL, NULL) < 0) | |
322 | continue; | |
323 | if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide)) | |
324 | continue; | |
325 | return irq; | |
326 | } | |
327 | ||
328 | /* unlikely, but be prepared */ | |
329 | return -1; | |
330 | } | |
331 | ||
03afe22f | 332 | |
c2ab64d0 JB |
333 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
334 | { | |
03afe22f | 335 | #ifdef CONFIG_SMP |
4c4231ea | 336 | struct irq_data *d = irq_get_irq_data(irq); |
d2109a12 | 337 | cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); |
03afe22f | 338 | #endif |
c2ab64d0 | 339 | |
ef017beb | 340 | return per_cpu(cpu_data, cpu).txn_addr; |
c2ab64d0 JB |
341 | } |
342 | ||
03afe22f | 343 | |
1da177e4 LT |
344 | unsigned long txn_alloc_addr(unsigned int virt_irq) |
345 | { | |
346 | static int next_cpu = -1; | |
347 | ||
348 | next_cpu++; /* assign to "next" CPU we want this bugger on */ | |
349 | ||
350 | /* validate entry */ | |
bd071e1a | 351 | while ((next_cpu < nr_cpu_ids) && |
ef017beb HD |
352 | (!per_cpu(cpu_data, next_cpu).txn_addr || |
353 | !cpu_online(next_cpu))) | |
1da177e4 LT |
354 | next_cpu++; |
355 | ||
bd071e1a | 356 | if (next_cpu >= nr_cpu_ids) |
1da177e4 LT |
357 | next_cpu = 0; /* nothing else, assign monarch */ |
358 | ||
c2ab64d0 | 359 | return txn_affinity_addr(virt_irq, next_cpu); |
1da177e4 LT |
360 | } |
361 | ||
362 | ||
363 | unsigned int txn_alloc_data(unsigned int virt_irq) | |
364 | { | |
365 | return virt_irq - CPU_IRQ_BASE; | |
366 | } | |
367 | ||
7085689e JB |
368 | static inline int eirr_to_irq(unsigned long eirr) |
369 | { | |
0c2de3c6 | 370 | int bit = fls_long(eirr); |
7085689e JB |
371 | return (BITS_PER_LONG - bit) + TIMER_IRQ; |
372 | } | |
373 | ||
d96b51ec HD |
374 | #ifdef CONFIG_IRQSTACKS |
375 | /* | |
376 | * IRQ STACK - used for irq handler | |
377 | */ | |
31680c1d JDA |
378 | #ifdef CONFIG_64BIT |
379 | #define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */ | |
380 | #else | |
8f8201df | 381 | #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ |
31680c1d | 382 | #endif |
d96b51ec HD |
383 | |
384 | union irq_stack_union { | |
385 | unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; | |
386 | volatile unsigned int slock[4]; | |
387 | volatile unsigned int lock[1]; | |
388 | }; | |
389 | ||
390 | DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { | |
391 | .slock = { 1,1,1,1 }, | |
392 | }; | |
393 | #endif | |
394 | ||
395 | ||
9372450c HD |
396 | int sysctl_panic_on_stackoverflow = 1; |
397 | ||
398 | static inline void stack_overflow_check(struct pt_regs *regs) | |
399 | { | |
400 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
401 | #define STACK_MARGIN (256*6) | |
402 | ||
2214c0e7 | 403 | unsigned long stack_start = (unsigned long) task_stack_page(current); |
9372450c | 404 | unsigned long sp = regs->gr[30]; |
cd85d551 HD |
405 | unsigned long stack_usage; |
406 | unsigned int *last_usage; | |
416821d3 | 407 | int cpu = smp_processor_id(); |
9372450c HD |
408 | |
409 | /* if sr7 != 0, we interrupted a userspace process which we do not want | |
410 | * to check for stack overflow. We will only check the kernel stack. */ | |
411 | if (regs->sr[7]) | |
412 | return; | |
413 | ||
5bc64bd2 HD |
414 | /* exit if already in panic */ |
415 | if (sysctl_panic_on_stackoverflow < 0) | |
416 | return; | |
417 | ||
cd85d551 HD |
418 | /* calculate kernel stack usage */ |
419 | stack_usage = sp - stack_start; | |
416821d3 HD |
420 | #ifdef CONFIG_IRQSTACKS |
421 | if (likely(stack_usage <= THREAD_SIZE)) | |
422 | goto check_kernel_stack; /* found kernel stack */ | |
423 | ||
424 | /* check irq stack usage */ | |
425 | stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; | |
426 | stack_usage = sp - stack_start; | |
427 | ||
428 | last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); | |
429 | if (unlikely(stack_usage > *last_usage)) | |
430 | *last_usage = stack_usage; | |
431 | ||
432 | if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) | |
433 | return; | |
434 | ||
435 | pr_emerg("stackcheck: %s will most likely overflow irq stack " | |
436 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", | |
437 | current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); | |
438 | goto panic_check; | |
439 | ||
440 | check_kernel_stack: | |
441 | #endif | |
442 | ||
443 | /* check kernel stack usage */ | |
444 | last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); | |
cd85d551 HD |
445 | |
446 | if (unlikely(stack_usage > *last_usage)) | |
447 | *last_usage = stack_usage; | |
448 | ||
449 | if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN))) | |
9372450c HD |
450 | return; |
451 | ||
452 | pr_emerg("stackcheck: %s will most likely overflow kernel stack " | |
453 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", | |
454 | current->comm, sp, stack_start, stack_start + THREAD_SIZE); | |
455 | ||
416821d3 HD |
456 | #ifdef CONFIG_IRQSTACKS |
457 | panic_check: | |
458 | #endif | |
5bc64bd2 HD |
459 | if (sysctl_panic_on_stackoverflow) { |
460 | sysctl_panic_on_stackoverflow = -1; /* disable further checks */ | |
9372450c | 461 | panic("low stack detected by irq handler - check messages\n"); |
5bc64bd2 | 462 | } |
9372450c HD |
463 | #endif |
464 | } | |
465 | ||
200c8804 | 466 | #ifdef CONFIG_IRQSTACKS |
d96b51ec HD |
467 | /* in entry.S: */ |
468 | void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); | |
200c8804 HD |
469 | |
470 | static void execute_on_irq_stack(void *func, unsigned long param1) | |
471 | { | |
416821d3 | 472 | union irq_stack_union *union_ptr; |
200c8804 | 473 | unsigned long irq_stack; |
d96b51ec | 474 | volatile unsigned int *irq_stack_in_use; |
200c8804 | 475 | |
416821d3 HD |
476 | union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); |
477 | irq_stack = (unsigned long) &union_ptr->stack; | |
d96b51ec | 478 | irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock), |
2214c0e7 | 479 | FRAME_ALIGN); /* align for stack frame usage */ |
200c8804 | 480 | |
416821d3 HD |
481 | /* We may be called recursive. If we are already using the irq stack, |
482 | * just continue to use it. Use spinlocks to serialize | |
483 | * the irq stack usage. | |
484 | */ | |
d96b51ec HD |
485 | irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr); |
486 | if (!__ldcw(irq_stack_in_use)) { | |
416821d3 HD |
487 | void (*direct_call)(unsigned long p1) = func; |
488 | ||
489 | /* We are using the IRQ stack already. | |
490 | * Do direct call on current stack. */ | |
491 | direct_call(param1); | |
492 | return; | |
493 | } | |
200c8804 HD |
494 | |
495 | /* This is where we switch to the IRQ stack. */ | |
496 | call_on_stack(param1, func, irq_stack); | |
497 | ||
416821d3 | 498 | /* free up irq stack usage. */ |
d96b51ec | 499 | *irq_stack_in_use = 1; |
416821d3 HD |
500 | } |
501 | ||
7d65f4a6 | 502 | void do_softirq_own_stack(void) |
416821d3 | 503 | { |
7d65f4a6 | 504 | execute_on_irq_stack(__do_softirq, 0); |
200c8804 HD |
505 | } |
506 | #endif /* CONFIG_IRQSTACKS */ | |
507 | ||
1da177e4 LT |
508 | /* ONLY called from entry.S:intr_extint() */ |
509 | void do_cpu_irq_mask(struct pt_regs *regs) | |
510 | { | |
e11e30a0 | 511 | struct pt_regs *old_regs; |
1da177e4 | 512 | unsigned long eirr_val; |
7085689e | 513 | int irq, cpu = smp_processor_id(); |
d2109a12 | 514 | struct irq_data *irq_data; |
b1b4e435 | 515 | #ifdef CONFIG_SMP |
7085689e | 516 | cpumask_t dest; |
03afe22f | 517 | #endif |
1da177e4 | 518 | |
e11e30a0 | 519 | old_regs = set_irq_regs(regs); |
7085689e JB |
520 | local_irq_disable(); |
521 | irq_enter(); | |
1da177e4 | 522 | |
462b529f | 523 | eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); |
7085689e JB |
524 | if (!eirr_val) |
525 | goto set_out; | |
526 | irq = eirr_to_irq(eirr_val); | |
c2ab64d0 | 527 | |
d2109a12 | 528 | irq_data = irq_get_irq_data(irq); |
b1b4e435 HD |
529 | |
530 | /* Filter out spurious interrupts, mostly from serial port at bootup */ | |
531 | if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data)))) | |
532 | goto set_out; | |
533 | ||
534 | #ifdef CONFIG_SMP | |
d2109a12 JL |
535 | cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data)); |
536 | if (irqd_is_per_cpu(irq_data) && | |
409e56f3 RR |
537 | !cpumask_test_cpu(smp_processor_id(), &dest)) { |
538 | int cpu = cpumask_first(&dest); | |
7085689e JB |
539 | |
540 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | |
541 | irq, smp_processor_id(), cpu); | |
542 | gsc_writel(irq + CPU_IRQ_BASE, | |
ef017beb | 543 | per_cpu(cpu_data, cpu).hpa); |
7085689e | 544 | goto set_out; |
1da177e4 | 545 | } |
7085689e | 546 | #endif |
9372450c | 547 | stack_overflow_check(regs); |
200c8804 HD |
548 | |
549 | #ifdef CONFIG_IRQSTACKS | |
550 | execute_on_irq_stack(&generic_handle_irq, irq); | |
551 | #else | |
ba20085c | 552 | generic_handle_irq(irq); |
200c8804 | 553 | #endif /* CONFIG_IRQSTACKS */ |
3f902886 | 554 | |
7085689e | 555 | out: |
1da177e4 | 556 | irq_exit(); |
e11e30a0 | 557 | set_irq_regs(old_regs); |
7085689e | 558 | return; |
1da177e4 | 559 | |
7085689e | 560 | set_out: |
462b529f | 561 | set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); |
7085689e JB |
562 | goto out; |
563 | } | |
1da177e4 | 564 | |
1da177e4 LT |
565 | static void claim_cpu_irqs(void) |
566 | { | |
997ba657 | 567 | unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL; |
1da177e4 | 568 | int i; |
997ba657 | 569 | |
1da177e4 | 570 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { |
e2f571d2 | 571 | irq_set_chip_and_handler(i, &cpu_interrupt_type, |
d16cd297 | 572 | handle_percpu_irq); |
1da177e4 LT |
573 | } |
574 | ||
e2f571d2 | 575 | irq_set_handler(TIMER_IRQ, handle_percpu_irq); |
997ba657 | 576 | if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL)) |
577 | pr_err("Failed to register timer interrupt\n"); | |
1da177e4 | 578 | #ifdef CONFIG_SMP |
e2f571d2 | 579 | irq_set_handler(IPI_IRQ, handle_percpu_irq); |
997ba657 | 580 | if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL)) |
581 | pr_err("Failed to register IPI interrupt\n"); | |
1da177e4 LT |
582 | #endif |
583 | } | |
584 | ||
585 | void __init init_IRQ(void) | |
586 | { | |
587 | local_irq_disable(); /* PARANOID - should already be disabled */ | |
588 | mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ | |
1da177e4 | 589 | #ifdef CONFIG_SMP |
cac1f12b JDA |
590 | if (!cpu_eiem) { |
591 | claim_cpu_irqs(); | |
1da177e4 | 592 | cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); |
cac1f12b | 593 | } |
1da177e4 | 594 | #else |
cac1f12b | 595 | claim_cpu_irqs(); |
1da177e4 LT |
596 | cpu_eiem = EIEM_MASK(TIMER_IRQ); |
597 | #endif | |
598 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | |
1da177e4 | 599 | } |