]> Git Repo - linux.git/commitdiff
Merge branch 'core/percpu' into x86/core
authorIngo Molnar <[email protected]>
Wed, 28 Jan 2009 22:12:55 +0000 (23:12 +0100)
committerIngo Molnar <[email protected]>
Wed, 28 Jan 2009 22:12:55 +0000 (23:12 +0100)
Conflicts:
kernel/irq/handle.c

1  2 
kernel/irq/handle.c
kernel/irq/manage.c

diff --combined kernel/irq/handle.c
index 3aba8d12f328ec91e59f5c72217ceb0c0d2fd0a1,375d68cd5bf0dd298d90d8d2c3ccb7764c5506b8..f51eaee921b603b202bf184cdfdaee3a8da2ca08
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/kernel_stat.h>
  #include <linux/rculist.h>
  #include <linux/hash.h>
+ #include <linux/bootmem.h>
  
  #include "internals.h"
  
@@@ -39,18 -40,6 +40,18 @@@ void handle_bad_irq(unsigned int irq, s
        ack_bad_irq(irq);
  }
  
 +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
 +static void __init init_irq_default_affinity(void)
 +{
 +      alloc_bootmem_cpumask_var(&irq_default_affinity);
 +      cpumask_setall(irq_default_affinity);
 +}
 +#else
 +static void __init init_irq_default_affinity(void)
 +{
 +}
 +#endif
 +
  /*
   * Linux has a controller-independent interrupt architecture.
   * Every controller has a 'controller-template', that is used
@@@ -69,6 -58,7 +70,7 @@@ int nr_irqs = NR_IRQS
  EXPORT_SYMBOL_GPL(nr_irqs);
  
  #ifdef CONFIG_SPARSE_IRQ
  static struct irq_desc irq_desc_init = {
        .irq        = -1,
        .status     = IRQ_DISABLED,
@@@ -76,9 -66,6 +78,6 @@@
        .handle_irq = handle_bad_irq,
        .depth      = 1,
        .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
- #ifdef CONFIG_SMP
-       .affinity   = CPU_MASK_ALL
- #endif
  };
  
  void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@@ -113,6 -100,10 +112,10 @@@ static void init_one_irq_desc(int irq, 
                printk(KERN_ERR "can not alloc kstat_irqs\n");
                BUG_ON(1);
        }
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+               BUG_ON(1);
+       }
        arch_init_chip_data(desc, cpu);
  }
  
   */
  DEFINE_SPINLOCK(sparse_irq_lock);
  
- struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+ struct irq_desc **irq_desc_ptrs __read_mostly;
  
  static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS_LEGACY-1] = {
                .handle_irq = handle_bad_irq,
                .depth      = 1,
                .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
- #ifdef CONFIG_SMP
-               .affinity   = CPU_MASK_ALL
- #endif
        }
  };
  
- /* FIXME: use bootmem alloc ...*/
- static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+ static unsigned int *kstat_irqs_legacy;
  
  int __init early_irq_init(void)
  {
        int legacy_count;
        int i;
  
 +      init_irq_default_affinity();
 +
+        /* initialize nr_irqs based on nr_cpu_ids */
+       arch_probe_nr_irqs();
+       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
  
+       /* allocate irq_desc_ptrs array based on nr_irqs */
+       irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+       /* allocate based on nr_cpu_ids */
+       /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+       kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+                                         sizeof(int));
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
-               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               init_alloc_desc_masks(&desc[i], 0, true);
                irq_desc_ptrs[i] = desc + i;
        }
  
-       for (i = legacy_count; i < NR_IRQS; i++)
+       for (i = legacy_count; i < nr_irqs; i++)
                irq_desc_ptrs[i] = NULL;
  
        return arch_early_irq_init();
  
  struct irq_desc *irq_to_desc(unsigned int irq)
  {
-       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+       if (irq_desc_ptrs && irq < nr_irqs)
+               return irq_desc_ptrs[irq];
+       return NULL;
  }
  
  struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
        unsigned long flags;
        int node;
  
-       if (irq >= NR_IRQS) {
-               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
-                               irq, NR_IRQS);
-               WARN_ON(1);
+       if (irq >= nr_irqs) {
+               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+                       irq, nr_irqs);
                return NULL;
        }
  
@@@ -221,9 -220,6 +234,6 @@@ struct irq_desc irq_desc[NR_IRQS] __cac
                .handle_irq = handle_bad_irq,
                .depth = 1,
                .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- #ifdef CONFIG_SMP
-               .affinity = CPU_MASK_ALL
- #endif
        }
  };
  
@@@ -233,14 -229,15 +243,17 @@@ int __init early_irq_init(void
        int count;
        int i;
  
 +      init_irq_default_affinity();
 +
+       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
        desc = irq_desc;
        count = ARRAY_SIZE(irq_desc);
  
-       for (i = 0; i < count; i++)
+       for (i = 0; i < count; i++) {
                desc[i].irq = i;
+               init_alloc_desc_masks(&desc[i], 0, true);
+       }
        return arch_early_irq_init();
  }
  
diff --combined kernel/irq/manage.c
index 291f03664552387658690f947b1d3a4d9562dcc6,b98739af455893d7a5d919987e49635d5ccbf50b..a3a5dc9ef346d813edf3b971926cb649d9c89d67
  
  #include "internals.h"
  
 -#ifdef CONFIG_SMP
 +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
  cpumask_var_t irq_default_affinity;
  
 -static int init_irq_default_affinity(void)
 -{
 -      alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
 -      cpumask_setall(irq_default_affinity);
 -      return 0;
 -}
 -core_initcall(init_irq_default_affinity);
 -
  /**
   *    synchronize_irq - wait for pending IRQ handlers (on other CPUs)
   *    @irq: interrupt number to wait for
@@@ -90,14 -98,14 +90,14 @@@ int irq_set_affinity(unsigned int irq, 
  
  #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               cpumask_copy(&desc->affinity, cpumask);
+               cpumask_copy(desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, cpumask);
+               cpumask_copy(desc->pending_mask, cpumask);
        }
  #else
-       cpumask_copy(&desc->affinity, cpumask);
+       cpumask_copy(desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
  #endif
        desc->status |= IRQ_AFFINITY_SET;
@@@ -119,16 -127,16 +119,16 @@@ int do_irq_select_affinity(unsigned in
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+               if (cpumask_any_and(desc->affinity, cpu_online_mask)
                    < nr_cpu_ids)
                        goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
  
-       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
  set_affinity:
-       desc->chip->set_affinity(irq, &desc->affinity);
+       desc->chip->set_affinity(irq, desc->affinity);
  
        return 0;
  }
This page took 0.076001 seconds and 4 git commands to generate.