]> Git Repo - linux.git/blob - arch/powerpc/kernel/irq.c
powerpc: Remove unused 'protect4gb' boot parameter
[linux.git] / arch / powerpc / kernel / irq.c
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas ([email protected])
6  *  Updated and modified by Cort Dougan <[email protected]>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras ([email protected])
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30
31 #undef DEBUG
32
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56 #include <linux/perf_event.h>
57
58 #include <asm/uaccess.h>
59 #include <asm/system.h>
60 #include <asm/io.h>
61 #include <asm/pgtable.h>
62 #include <asm/irq.h>
63 #include <asm/cache.h>
64 #include <asm/prom.h>
65 #include <asm/ptrace.h>
66 #include <asm/machdep.h>
67 #include <asm/udbg.h>
68 #ifdef CONFIG_PPC64
69 #include <asm/paca.h>
70 #include <asm/firmware.h>
71 #include <asm/lv1call.h>
72 #endif
73 #define CREATE_TRACE_POINTS
74 #include <asm/trace.h>
75
76 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77 EXPORT_PER_CPU_SYMBOL(irq_stat);
78
79 int __irq_offset_value;
80
81 #ifdef CONFIG_PPC32
82 EXPORT_SYMBOL(__irq_offset_value);
83 atomic_t ppc_n_lost_interrupts;
84
85 #ifdef CONFIG_TAU_INT
86 extern int tau_initialized;
87 extern int tau_interrupts(int);
88 #endif
89 #endif /* CONFIG_PPC32 */
90
91 #ifdef CONFIG_PPC64
92
93 #ifndef CONFIG_SPARSE_IRQ
94 EXPORT_SYMBOL(irq_desc);
95 #endif
96
97 int distribute_irqs = 1;
98
99 static inline notrace unsigned long get_hard_enabled(void)
100 {
101         unsigned long enabled;
102
103         __asm__ __volatile__("lbz %0,%1(13)"
104         : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
105
106         return enabled;
107 }
108
109 static inline notrace void set_soft_enabled(unsigned long enable)
110 {
111         __asm__ __volatile__("stb %0,%1(13)"
112         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
113 }
114
115 notrace void raw_local_irq_restore(unsigned long en)
116 {
117         /*
118          * get_paca()->soft_enabled = en;
119          * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
120          * That was allowed before, and in such a case we do need to take care
121          * that gcc will set soft_enabled directly via r13, not choose to use
122          * an intermediate register, lest we're preempted to a different cpu.
123          */
124         set_soft_enabled(en);
125         if (!en)
126                 return;
127
128 #ifdef CONFIG_PPC_STD_MMU_64
129         if (firmware_has_feature(FW_FEATURE_ISERIES)) {
130                 /*
131                  * Do we need to disable preemption here?  Not really: in the
132                  * unlikely event that we're preempted to a different cpu in
133                  * between getting r13, loading its lppaca_ptr, and loading
134                  * its any_int, we might call iseries_handle_interrupts without
135                  * an interrupt pending on the new cpu, but that's no disaster,
136                  * is it?  And the business of preempting us off the old cpu
137                  * would itself involve a local_irq_restore which handles the
138                  * interrupt to that cpu.
139                  *
140                  * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
141                  * to avoid any preemption checking added into get_paca().
142                  */
143                 if (local_paca->lppaca_ptr->int_dword.any_int)
144                         iseries_handle_interrupts();
145         }
146 #endif /* CONFIG_PPC_STD_MMU_64 */
147
148         if (test_perf_event_pending()) {
149                 clear_perf_event_pending();
150                 perf_event_do_pending();
151         }
152
153         /*
154          * if (get_paca()->hard_enabled) return;
155          * But again we need to take care that gcc gets hard_enabled directly
156          * via r13, not choose to use an intermediate register, lest we're
157          * preempted to a different cpu in between the two instructions.
158          */
159         if (get_hard_enabled())
160                 return;
161
162         /*
163          * Need to hard-enable interrupts here.  Since currently disabled,
164          * no need to take further asm precautions against preemption; but
165          * use local_paca instead of get_paca() to avoid preemption checking.
166          */
167         local_paca->hard_enabled = en;
168         if ((int)mfspr(SPRN_DEC) < 0)
169                 mtspr(SPRN_DEC, 1);
170
171         /*
172          * Force the delivery of pending soft-disabled interrupts on PS3.
173          * Any HV call will have this side effect.
174          */
175         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
176                 u64 tmp;
177                 lv1_get_version_info(&tmp);
178         }
179
180         __hard_irq_enable();
181 }
182 EXPORT_SYMBOL(raw_local_irq_restore);
183 #endif /* CONFIG_PPC64 */
184
185 static int show_other_interrupts(struct seq_file *p, int prec)
186 {
187         int j;
188
189 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
190         if (tau_initialized) {
191                 seq_printf(p, "%*s: ", prec, "TAU");
192                 for_each_online_cpu(j)
193                         seq_printf(p, "%10u ", tau_interrupts(j));
194                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
195         }
196 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
197
198         seq_printf(p, "%*s: ", prec, "LOC");
199         for_each_online_cpu(j)
200                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
201         seq_printf(p, "  Local timer interrupts\n");
202
203         seq_printf(p, "%*s: ", prec, "SPU");
204         for_each_online_cpu(j)
205                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
206         seq_printf(p, "  Spurious interrupts\n");
207
208         seq_printf(p, "%*s: ", prec, "CNT");
209         for_each_online_cpu(j)
210                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
211         seq_printf(p, "  Performance monitoring interrupts\n");
212
213         seq_printf(p, "%*s: ", prec, "MCE");
214         for_each_online_cpu(j)
215                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
216         seq_printf(p, "  Machine check exceptions\n");
217
218         return 0;
219 }
220
221 int show_interrupts(struct seq_file *p, void *v)
222 {
223         unsigned long flags, any_count = 0;
224         int i = *(loff_t *) v, j, prec;
225         struct irqaction *action;
226         struct irq_desc *desc;
227
228         if (i > nr_irqs)
229                 return 0;
230
231         for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
232                 j *= 10;
233
234         if (i == nr_irqs)
235                 return show_other_interrupts(p, prec);
236
237         /* print header */
238         if (i == 0) {
239                 seq_printf(p, "%*s", prec + 8, "");
240                 for_each_online_cpu(j)
241                         seq_printf(p, "CPU%-8d", j);
242                 seq_putc(p, '\n');
243         }
244
245         desc = irq_to_desc(i);
246         if (!desc)
247                 return 0;
248
249         raw_spin_lock_irqsave(&desc->lock, flags);
250         for_each_online_cpu(j)
251                 any_count |= kstat_irqs_cpu(i, j);
252         action = desc->action;
253         if (!action && !any_count)
254                 goto out;
255
256         seq_printf(p, "%*d: ", prec, i);
257         for_each_online_cpu(j)
258                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
259
260         if (desc->chip)
261                 seq_printf(p, "  %-16s", desc->chip->name);
262         else
263                 seq_printf(p, "  %-16s", "None");
264         seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
265
266         if (action) {
267                 seq_printf(p, "     %s", action->name);
268                 while ((action = action->next) != NULL)
269                         seq_printf(p, ", %s", action->name);
270         }
271
272         seq_putc(p, '\n');
273 out:
274         raw_spin_unlock_irqrestore(&desc->lock, flags);
275         return 0;
276 }
277
278 /*
279  * /proc/stat helpers
280  */
281 u64 arch_irq_stat_cpu(unsigned int cpu)
282 {
283         u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
284
285         sum += per_cpu(irq_stat, cpu).pmu_irqs;
286         sum += per_cpu(irq_stat, cpu).mce_exceptions;
287         sum += per_cpu(irq_stat, cpu).spurious_irqs;
288
289         return sum;
290 }
291
292 #ifdef CONFIG_HOTPLUG_CPU
293 void fixup_irqs(const struct cpumask *map)
294 {
295         struct irq_desc *desc;
296         unsigned int irq;
297         static int warned;
298         cpumask_var_t mask;
299
300         alloc_cpumask_var(&mask, GFP_KERNEL);
301
302         for_each_irq(irq) {
303                 desc = irq_to_desc(irq);
304                 if (desc && desc->status & IRQ_PER_CPU)
305                         continue;
306
307                 cpumask_and(mask, desc->affinity, map);
308                 if (cpumask_any(mask) >= nr_cpu_ids) {
309                         printk("Breaking affinity for irq %i\n", irq);
310                         cpumask_copy(mask, map);
311                 }
312                 if (desc->chip->set_affinity)
313                         desc->chip->set_affinity(irq, mask);
314                 else if (desc->action && !(warned++))
315                         printk("Cannot set affinity for irq %i\n", irq);
316         }
317
318         free_cpumask_var(mask);
319
320         local_irq_enable();
321         mdelay(1);
322         local_irq_disable();
323 }
324 #endif
325
326 #ifdef CONFIG_IRQSTACKS
327 static inline void handle_one_irq(unsigned int irq)
328 {
329         struct thread_info *curtp, *irqtp;
330         unsigned long saved_sp_limit;
331         struct irq_desc *desc;
332
333         /* Switch to the irq stack to handle this */
334         curtp = current_thread_info();
335         irqtp = hardirq_ctx[smp_processor_id()];
336
337         if (curtp == irqtp) {
338                 /* We're already on the irq stack, just handle it */
339                 generic_handle_irq(irq);
340                 return;
341         }
342
343         desc = irq_to_desc(irq);
344         saved_sp_limit = current->thread.ksp_limit;
345
346         irqtp->task = curtp->task;
347         irqtp->flags = 0;
348
349         /* Copy the softirq bits in preempt_count so that the
350          * softirq checks work in the hardirq context. */
351         irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
352                                (curtp->preempt_count & SOFTIRQ_MASK);
353
354         current->thread.ksp_limit = (unsigned long)irqtp +
355                 _ALIGN_UP(sizeof(struct thread_info), 16);
356
357         call_handle_irq(irq, desc, irqtp, desc->handle_irq);
358         current->thread.ksp_limit = saved_sp_limit;
359         irqtp->task = NULL;
360
361         /* Set any flag that may have been set on the
362          * alternate stack
363          */
364         if (irqtp->flags)
365                 set_bits(irqtp->flags, &curtp->flags);
366 }
367 #else
368 static inline void handle_one_irq(unsigned int irq)
369 {
370         generic_handle_irq(irq);
371 }
372 #endif
373
374 static inline void check_stack_overflow(void)
375 {
376 #ifdef CONFIG_DEBUG_STACKOVERFLOW
377         long sp;
378
379         sp = __get_SP() & (THREAD_SIZE-1);
380
381         /* check for stack overflow: is there less than 2KB free? */
382         if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
383                 printk("do_IRQ: stack overflow: %ld\n",
384                         sp - sizeof(struct thread_info));
385                 dump_stack();
386         }
387 #endif
388 }
389
390 void do_IRQ(struct pt_regs *regs)
391 {
392         struct pt_regs *old_regs = set_irq_regs(regs);
393         unsigned int irq;
394
395         trace_irq_entry(regs);
396
397         irq_enter();
398
399         check_stack_overflow();
400
401         irq = ppc_md.get_irq();
402
403         if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
404                 handle_one_irq(irq);
405         else if (irq != NO_IRQ_IGNORE)
406                 __get_cpu_var(irq_stat).spurious_irqs++;
407
408         irq_exit();
409         set_irq_regs(old_regs);
410
411 #ifdef CONFIG_PPC_ISERIES
412         if (firmware_has_feature(FW_FEATURE_ISERIES) &&
413                         get_lppaca()->int_dword.fields.decr_int) {
414                 get_lppaca()->int_dword.fields.decr_int = 0;
415                 /* Signal a fake decrementer interrupt */
416                 timer_interrupt(regs);
417         }
418 #endif
419
420         trace_irq_exit(regs);
421 }
422
423 void __init init_IRQ(void)
424 {
425         if (ppc_md.init_IRQ)
426                 ppc_md.init_IRQ();
427
428         exc_lvl_ctx_init();
429
430         irq_ctx_init();
431 }
432
433 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
434 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
435 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
436 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
437
438 void exc_lvl_ctx_init(void)
439 {
440         struct thread_info *tp;
441         int i;
442
443         for_each_possible_cpu(i) {
444                 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
445                 tp = critirq_ctx[i];
446                 tp->cpu = i;
447                 tp->preempt_count = 0;
448
449 #ifdef CONFIG_BOOKE
450                 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
451                 tp = dbgirq_ctx[i];
452                 tp->cpu = i;
453                 tp->preempt_count = 0;
454
455                 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
456                 tp = mcheckirq_ctx[i];
457                 tp->cpu = i;
458                 tp->preempt_count = HARDIRQ_OFFSET;
459 #endif
460         }
461 }
462 #endif
463
464 #ifdef CONFIG_IRQSTACKS
465 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
466 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
467
468 void irq_ctx_init(void)
469 {
470         struct thread_info *tp;
471         int i;
472
473         for_each_possible_cpu(i) {
474                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
475                 tp = softirq_ctx[i];
476                 tp->cpu = i;
477                 tp->preempt_count = 0;
478
479                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
480                 tp = hardirq_ctx[i];
481                 tp->cpu = i;
482                 tp->preempt_count = HARDIRQ_OFFSET;
483         }
484 }
485
486 static inline void do_softirq_onstack(void)
487 {
488         struct thread_info *curtp, *irqtp;
489         unsigned long saved_sp_limit = current->thread.ksp_limit;
490
491         curtp = current_thread_info();
492         irqtp = softirq_ctx[smp_processor_id()];
493         irqtp->task = curtp->task;
494         current->thread.ksp_limit = (unsigned long)irqtp +
495                                     _ALIGN_UP(sizeof(struct thread_info), 16);
496         call_do_softirq(irqtp);
497         current->thread.ksp_limit = saved_sp_limit;
498         irqtp->task = NULL;
499 }
500
501 #else
502 #define do_softirq_onstack()    __do_softirq()
503 #endif /* CONFIG_IRQSTACKS */
504
505 void do_softirq(void)
506 {
507         unsigned long flags;
508
509         if (in_interrupt())
510                 return;
511
512         local_irq_save(flags);
513
514         if (local_softirq_pending())
515                 do_softirq_onstack();
516
517         local_irq_restore(flags);
518 }
519
520
521 /*
522  * IRQ controller and virtual interrupts
523  */
524
525 static LIST_HEAD(irq_hosts);
526 static DEFINE_RAW_SPINLOCK(irq_big_lock);
527 static unsigned int revmap_trees_allocated;
528 static DEFINE_MUTEX(revmap_trees_mutex);
529 struct irq_map_entry irq_map[NR_IRQS];
530 static unsigned int irq_virq_count = NR_IRQS;
531 static struct irq_host *irq_default_host;
532
533 irq_hw_number_t virq_to_hw(unsigned int virq)
534 {
535         return irq_map[virq].hwirq;
536 }
537 EXPORT_SYMBOL_GPL(virq_to_hw);
538
539 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
540 {
541         return h->of_node != NULL && h->of_node == np;
542 }
543
544 struct irq_host *irq_alloc_host(struct device_node *of_node,
545                                 unsigned int revmap_type,
546                                 unsigned int revmap_arg,
547                                 struct irq_host_ops *ops,
548                                 irq_hw_number_t inval_irq)
549 {
550         struct irq_host *host;
551         unsigned int size = sizeof(struct irq_host);
552         unsigned int i;
553         unsigned int *rmap;
554         unsigned long flags;
555
556         /* Allocate structure and revmap table if using linear mapping */
557         if (revmap_type == IRQ_HOST_MAP_LINEAR)
558                 size += revmap_arg * sizeof(unsigned int);
559         host = zalloc_maybe_bootmem(size, GFP_KERNEL);
560         if (host == NULL)
561                 return NULL;
562
563         /* Fill structure */
564         host->revmap_type = revmap_type;
565         host->inval_irq = inval_irq;
566         host->ops = ops;
567         host->of_node = of_node_get(of_node);
568
569         if (host->ops->match == NULL)
570                 host->ops->match = default_irq_host_match;
571
572         raw_spin_lock_irqsave(&irq_big_lock, flags);
573
574         /* If it's a legacy controller, check for duplicates and
575          * mark it as allocated (we use irq 0 host pointer for that
576          */
577         if (revmap_type == IRQ_HOST_MAP_LEGACY) {
578                 if (irq_map[0].host != NULL) {
579                         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
580                         /* If we are early boot, we can't free the structure,
581                          * too bad...
582                          * this will be fixed once slab is made available early
583                          * instead of the current cruft
584                          */
585                         if (mem_init_done)
586                                 kfree(host);
587                         return NULL;
588                 }
589                 irq_map[0].host = host;
590         }
591
592         list_add(&host->link, &irq_hosts);
593         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
594
595         /* Additional setups per revmap type */
596         switch(revmap_type) {
597         case IRQ_HOST_MAP_LEGACY:
598                 /* 0 is always the invalid number for legacy */
599                 host->inval_irq = 0;
600                 /* setup us as the host for all legacy interrupts */
601                 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
602                         irq_map[i].hwirq = i;
603                         smp_wmb();
604                         irq_map[i].host = host;
605                         smp_wmb();
606
607                         /* Clear norequest flags */
608                         irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
609
610                         /* Legacy flags are left to default at this point,
611                          * one can then use irq_create_mapping() to
612                          * explicitly change them
613                          */
614                         ops->map(host, i, i);
615                 }
616                 break;
617         case IRQ_HOST_MAP_LINEAR:
618                 rmap = (unsigned int *)(host + 1);
619                 for (i = 0; i < revmap_arg; i++)
620                         rmap[i] = NO_IRQ;
621                 host->revmap_data.linear.size = revmap_arg;
622                 smp_wmb();
623                 host->revmap_data.linear.revmap = rmap;
624                 break;
625         default:
626                 break;
627         }
628
629         pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
630
631         return host;
632 }
633
634 struct irq_host *irq_find_host(struct device_node *node)
635 {
636         struct irq_host *h, *found = NULL;
637         unsigned long flags;
638
639         /* We might want to match the legacy controller last since
640          * it might potentially be set to match all interrupts in
641          * the absence of a device node. This isn't a problem so far
642          * yet though...
643          */
644         raw_spin_lock_irqsave(&irq_big_lock, flags);
645         list_for_each_entry(h, &irq_hosts, link)
646                 if (h->ops->match(h, node)) {
647                         found = h;
648                         break;
649                 }
650         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
651         return found;
652 }
653 EXPORT_SYMBOL_GPL(irq_find_host);
654
655 void irq_set_default_host(struct irq_host *host)
656 {
657         pr_debug("irq: Default host set to @0x%p\n", host);
658
659         irq_default_host = host;
660 }
661
662 void irq_set_virq_count(unsigned int count)
663 {
664         pr_debug("irq: Trying to set virq count to %d\n", count);
665
666         BUG_ON(count < NUM_ISA_INTERRUPTS);
667         if (count < NR_IRQS)
668                 irq_virq_count = count;
669 }
670
671 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
672                             irq_hw_number_t hwirq)
673 {
674         struct irq_desc *desc;
675
676         desc = irq_to_desc_alloc_node(virq, 0);
677         if (!desc) {
678                 pr_debug("irq: -> allocating desc failed\n");
679                 goto error;
680         }
681
682         /* Clear IRQ_NOREQUEST flag */
683         desc->status &= ~IRQ_NOREQUEST;
684
685         /* map it */
686         smp_wmb();
687         irq_map[virq].hwirq = hwirq;
688         smp_mb();
689
690         if (host->ops->map(host, virq, hwirq)) {
691                 pr_debug("irq: -> mapping failed, freeing\n");
692                 goto error;
693         }
694
695         return 0;
696
697 error:
698         irq_free_virt(virq, 1);
699         return -1;
700 }
701
702 unsigned int irq_create_direct_mapping(struct irq_host *host)
703 {
704         unsigned int virq;
705
706         if (host == NULL)
707                 host = irq_default_host;
708
709         BUG_ON(host == NULL);
710         WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
711
712         virq = irq_alloc_virt(host, 1, 0);
713         if (virq == NO_IRQ) {
714                 pr_debug("irq: create_direct virq allocation failed\n");
715                 return NO_IRQ;
716         }
717
718         pr_debug("irq: create_direct obtained virq %d\n", virq);
719
720         if (irq_setup_virq(host, virq, virq))
721                 return NO_IRQ;
722
723         return virq;
724 }
725
726 unsigned int irq_create_mapping(struct irq_host *host,
727                                 irq_hw_number_t hwirq)
728 {
729         unsigned int virq, hint;
730
731         pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
732
733         /* Look for default host if nececssary */
734         if (host == NULL)
735                 host = irq_default_host;
736         if (host == NULL) {
737                 printk(KERN_WARNING "irq_create_mapping called for"
738                        " NULL host, hwirq=%lx\n", hwirq);
739                 WARN_ON(1);
740                 return NO_IRQ;
741         }
742         pr_debug("irq: -> using host @%p\n", host);
743
744         /* Check if mapping already exist, if it does, call
745          * host->ops->map() to update the flags
746          */
747         virq = irq_find_mapping(host, hwirq);
748         if (virq != NO_IRQ) {
749                 if (host->ops->remap)
750                         host->ops->remap(host, virq, hwirq);
751                 pr_debug("irq: -> existing mapping on virq %d\n", virq);
752                 return virq;
753         }
754
755         /* Get a virtual interrupt number */
756         if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
757                 /* Handle legacy */
758                 virq = (unsigned int)hwirq;
759                 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
760                         return NO_IRQ;
761                 return virq;
762         } else {
763                 /* Allocate a virtual interrupt number */
764                 hint = hwirq % irq_virq_count;
765                 virq = irq_alloc_virt(host, 1, hint);
766                 if (virq == NO_IRQ) {
767                         pr_debug("irq: -> virq allocation failed\n");
768                         return NO_IRQ;
769                 }
770         }
771
772         if (irq_setup_virq(host, virq, hwirq))
773                 return NO_IRQ;
774
775         printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
776                 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
777
778         return virq;
779 }
780 EXPORT_SYMBOL_GPL(irq_create_mapping);
781
782 unsigned int irq_create_of_mapping(struct device_node *controller,
783                                    const u32 *intspec, unsigned int intsize)
784 {
785         struct irq_host *host;
786         irq_hw_number_t hwirq;
787         unsigned int type = IRQ_TYPE_NONE;
788         unsigned int virq;
789
790         if (controller == NULL)
791                 host = irq_default_host;
792         else
793                 host = irq_find_host(controller);
794         if (host == NULL) {
795                 printk(KERN_WARNING "irq: no irq host found for %s !\n",
796                        controller->full_name);
797                 return NO_IRQ;
798         }
799
800         /* If host has no translation, then we assume interrupt line */
801         if (host->ops->xlate == NULL)
802                 hwirq = intspec[0];
803         else {
804                 if (host->ops->xlate(host, controller, intspec, intsize,
805                                      &hwirq, &type))
806                         return NO_IRQ;
807         }
808
809         /* Create mapping */
810         virq = irq_create_mapping(host, hwirq);
811         if (virq == NO_IRQ)
812                 return virq;
813
814         /* Set type if specified and different than the current one */
815         if (type != IRQ_TYPE_NONE &&
816             type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
817                 set_irq_type(virq, type);
818         return virq;
819 }
820 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
821
822 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
823 {
824         struct of_irq oirq;
825
826         if (of_irq_map_one(dev, index, &oirq))
827                 return NO_IRQ;
828
829         return irq_create_of_mapping(oirq.controller, oirq.specifier,
830                                      oirq.size);
831 }
832 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
833
834 void irq_dispose_mapping(unsigned int virq)
835 {
836         struct irq_host *host;
837         irq_hw_number_t hwirq;
838
839         if (virq == NO_IRQ)
840                 return;
841
842         host = irq_map[virq].host;
843         WARN_ON (host == NULL);
844         if (host == NULL)
845                 return;
846
847         /* Never unmap legacy interrupts */
848         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
849                 return;
850
851         /* remove chip and handler */
852         set_irq_chip_and_handler(virq, NULL, NULL);
853
854         /* Make sure it's completed */
855         synchronize_irq(virq);
856
857         /* Tell the PIC about it */
858         if (host->ops->unmap)
859                 host->ops->unmap(host, virq);
860         smp_mb();
861
862         /* Clear reverse map */
863         hwirq = irq_map[virq].hwirq;
864         switch(host->revmap_type) {
865         case IRQ_HOST_MAP_LINEAR:
866                 if (hwirq < host->revmap_data.linear.size)
867                         host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
868                 break;
869         case IRQ_HOST_MAP_TREE:
870                 /*
871                  * Check if radix tree allocated yet, if not then nothing to
872                  * remove.
873                  */
874                 smp_rmb();
875                 if (revmap_trees_allocated < 1)
876                         break;
877                 mutex_lock(&revmap_trees_mutex);
878                 radix_tree_delete(&host->revmap_data.tree, hwirq);
879                 mutex_unlock(&revmap_trees_mutex);
880                 break;
881         }
882
883         /* Destroy map */
884         smp_mb();
885         irq_map[virq].hwirq = host->inval_irq;
886
887         /* Set some flags */
888         irq_to_desc(virq)->status |= IRQ_NOREQUEST;
889
890         /* Free it */
891         irq_free_virt(virq, 1);
892 }
893 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
894
895 unsigned int irq_find_mapping(struct irq_host *host,
896                               irq_hw_number_t hwirq)
897 {
898         unsigned int i;
899         unsigned int hint = hwirq % irq_virq_count;
900
901         /* Look for default host if nececssary */
902         if (host == NULL)
903                 host = irq_default_host;
904         if (host == NULL)
905                 return NO_IRQ;
906
907         /* legacy -> bail early */
908         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
909                 return hwirq;
910
911         /* Slow path does a linear search of the map */
912         if (hint < NUM_ISA_INTERRUPTS)
913                 hint = NUM_ISA_INTERRUPTS;
914         i = hint;
915         do  {
916                 if (irq_map[i].host == host &&
917                     irq_map[i].hwirq == hwirq)
918                         return i;
919                 i++;
920                 if (i >= irq_virq_count)
921                         i = NUM_ISA_INTERRUPTS;
922         } while(i != hint);
923         return NO_IRQ;
924 }
925 EXPORT_SYMBOL_GPL(irq_find_mapping);
926
927
928 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
929                                      irq_hw_number_t hwirq)
930 {
931         struct irq_map_entry *ptr;
932         unsigned int virq;
933
934         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
935
936         /*
937          * Check if the radix tree exists and has bee initialized.
938          * If not, we fallback to slow mode
939          */
940         if (revmap_trees_allocated < 2)
941                 return irq_find_mapping(host, hwirq);
942
943         /* Now try to resolve */
944         /*
945          * No rcu_read_lock(ing) needed, the ptr returned can't go under us
946          * as it's referencing an entry in the static irq_map table.
947          */
948         ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
949
950         /*
951          * If found in radix tree, then fine.
952          * Else fallback to linear lookup - this should not happen in practice
953          * as it means that we failed to insert the node in the radix tree.
954          */
955         if (ptr)
956                 virq = ptr - irq_map;
957         else
958                 virq = irq_find_mapping(host, hwirq);
959
960         return virq;
961 }
962
963 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
964                              irq_hw_number_t hwirq)
965 {
966
967         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
968
969         /*
970          * Check if the radix tree exists yet.
971          * If not, then the irq will be inserted into the tree when it gets
972          * initialized.
973          */
974         smp_rmb();
975         if (revmap_trees_allocated < 1)
976                 return;
977
978         if (virq != NO_IRQ) {
979                 mutex_lock(&revmap_trees_mutex);
980                 radix_tree_insert(&host->revmap_data.tree, hwirq,
981                                   &irq_map[virq]);
982                 mutex_unlock(&revmap_trees_mutex);
983         }
984 }
985
986 unsigned int irq_linear_revmap(struct irq_host *host,
987                                irq_hw_number_t hwirq)
988 {
989         unsigned int *revmap;
990
991         WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
992
993         /* Check revmap bounds */
994         if (unlikely(hwirq >= host->revmap_data.linear.size))
995                 return irq_find_mapping(host, hwirq);
996
997         /* Check if revmap was allocated */
998         revmap = host->revmap_data.linear.revmap;
999         if (unlikely(revmap == NULL))
1000                 return irq_find_mapping(host, hwirq);
1001
1002         /* Fill up revmap with slow path if no mapping found */
1003         if (unlikely(revmap[hwirq] == NO_IRQ))
1004                 revmap[hwirq] = irq_find_mapping(host, hwirq);
1005
1006         return revmap[hwirq];
1007 }
1008
1009 unsigned int irq_alloc_virt(struct irq_host *host,
1010                             unsigned int count,
1011                             unsigned int hint)
1012 {
1013         unsigned long flags;
1014         unsigned int i, j, found = NO_IRQ;
1015
1016         if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1017                 return NO_IRQ;
1018
1019         raw_spin_lock_irqsave(&irq_big_lock, flags);
1020
1021         /* Use hint for 1 interrupt if any */
1022         if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1023             hint < irq_virq_count && irq_map[hint].host == NULL) {
1024                 found = hint;
1025                 goto hint_found;
1026         }
1027
1028         /* Look for count consecutive numbers in the allocatable
1029          * (non-legacy) space
1030          */
1031         for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1032                 if (irq_map[i].host != NULL)
1033                         j = 0;
1034                 else
1035                         j++;
1036
1037                 if (j == count) {
1038                         found = i - count + 1;
1039                         break;
1040                 }
1041         }
1042         if (found == NO_IRQ) {
1043                 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1044                 return NO_IRQ;
1045         }
1046  hint_found:
1047         for (i = found; i < (found + count); i++) {
1048                 irq_map[i].hwirq = host->inval_irq;
1049                 smp_wmb();
1050                 irq_map[i].host = host;
1051         }
1052         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1053         return found;
1054 }
1055
1056 void irq_free_virt(unsigned int virq, unsigned int count)
1057 {
1058         unsigned long flags;
1059         unsigned int i;
1060
1061         WARN_ON (virq < NUM_ISA_INTERRUPTS);
1062         WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1063
1064         raw_spin_lock_irqsave(&irq_big_lock, flags);
1065         for (i = virq; i < (virq + count); i++) {
1066                 struct irq_host *host;
1067
1068                 if (i < NUM_ISA_INTERRUPTS ||
1069                     (virq + count) > irq_virq_count)
1070                         continue;
1071
1072                 host = irq_map[i].host;
1073                 irq_map[i].hwirq = host->inval_irq;
1074                 smp_wmb();
1075                 irq_map[i].host = NULL;
1076         }
1077         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1078 }
1079
1080 int arch_early_irq_init(void)
1081 {
1082         struct irq_desc *desc;
1083         int i;
1084
1085         for (i = 0; i < NR_IRQS; i++) {
1086                 desc = irq_to_desc(i);
1087                 if (desc)
1088                         desc->status |= IRQ_NOREQUEST;
1089         }
1090
1091         return 0;
1092 }
1093
1094 int arch_init_chip_data(struct irq_desc *desc, int node)
1095 {
1096         desc->status |= IRQ_NOREQUEST;
1097         return 0;
1098 }
1099
1100 /* We need to create the radix trees late */
1101 static int irq_late_init(void)
1102 {
1103         struct irq_host *h;
1104         unsigned int i;
1105
1106         /*
1107          * No mutual exclusion with respect to accessors of the tree is needed
1108          * here as the synchronization is done via the state variable
1109          * revmap_trees_allocated.
1110          */
1111         list_for_each_entry(h, &irq_hosts, link) {
1112                 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1113                         INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1114         }
1115
1116         /*
1117          * Make sure the radix trees inits are visible before setting
1118          * the flag
1119          */
1120         smp_wmb();
1121         revmap_trees_allocated = 1;
1122
1123         /*
1124          * Insert the reverse mapping for those interrupts already present
1125          * in irq_map[].
1126          */
1127         mutex_lock(&revmap_trees_mutex);
1128         for (i = 0; i < irq_virq_count; i++) {
1129                 if (irq_map[i].host &&
1130                     (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1131                         radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1132                                           irq_map[i].hwirq, &irq_map[i]);
1133         }
1134         mutex_unlock(&revmap_trees_mutex);
1135
1136         /*
1137          * Make sure the radix trees insertions are visible before setting
1138          * the flag
1139          */
1140         smp_wmb();
1141         revmap_trees_allocated = 2;
1142
1143         return 0;
1144 }
1145 arch_initcall(irq_late_init);
1146
1147 #ifdef CONFIG_VIRQ_DEBUG
1148 static int virq_debug_show(struct seq_file *m, void *private)
1149 {
1150         unsigned long flags;
1151         struct irq_desc *desc;
1152         const char *p;
1153         char none[] = "none";
1154         int i;
1155
1156         seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1157                       "chip name", "host name");
1158
1159         for (i = 1; i < nr_irqs; i++) {
1160                 desc = irq_to_desc(i);
1161                 if (!desc)
1162                         continue;
1163
1164                 raw_spin_lock_irqsave(&desc->lock, flags);
1165
1166                 if (desc->action && desc->action->handler) {
1167                         seq_printf(m, "%5d  ", i);
1168                         seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1169
1170                         if (desc->chip && desc->chip->name)
1171                                 p = desc->chip->name;
1172                         else
1173                                 p = none;
1174                         seq_printf(m, "%-15s  ", p);
1175
1176                         if (irq_map[i].host && irq_map[i].host->of_node)
1177                                 p = irq_map[i].host->of_node->full_name;
1178                         else
1179                                 p = none;
1180                         seq_printf(m, "%s\n", p);
1181                 }
1182
1183                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int virq_debug_open(struct inode *inode, struct file *file)
1190 {
1191         return single_open(file, virq_debug_show, inode->i_private);
1192 }
1193
1194 static const struct file_operations virq_debug_fops = {
1195         .open = virq_debug_open,
1196         .read = seq_read,
1197         .llseek = seq_lseek,
1198         .release = single_release,
1199 };
1200
1201 static int __init irq_debugfs_init(void)
1202 {
1203         if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1204                                  NULL, &virq_debug_fops) == NULL)
1205                 return -ENOMEM;
1206
1207         return 0;
1208 }
1209 __initcall(irq_debugfs_init);
1210 #endif /* CONFIG_VIRQ_DEBUG */
1211
1212 #ifdef CONFIG_PPC64
1213 static int __init setup_noirqdistrib(char *str)
1214 {
1215         distribute_irqs = 0;
1216         return 1;
1217 }
1218
1219 __setup("noirqdistrib", setup_noirqdistrib);
1220 #endif /* CONFIG_PPC64 */
This page took 0.104892 seconds and 4 git commands to generate.