genirq: Use sane sparse allocator
authorThomas Gleixner <tglx@linutronix.de>
Mon, 27 Sep 2010 18:02:56 +0000 (20:02 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 12 Oct 2010 14:39:07 +0000 (16:39 +0200)
Make irq_to_desc_alloc_node() a wrapper around the new allocator.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
kernel/irq/irqdesc.c

index 35d9052..7cbe4f9 100644 (file)
@@ -51,7 +51,7 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
 
 static void desc_smp_init(struct irq_desc *desc, int node)
 {
-       desc->node = node;
+       desc->irq_data.node = node;
        cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
 }
 
@@ -84,13 +84,6 @@ static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
 
 #ifdef CONFIG_SPARSE_IRQ
 
-static struct irq_desc irq_desc_init = {
-       .status         = IRQ_DEFAULT_INIT_FLAGS,
-       .handle_irq     = handle_bad_irq,
-       .depth          = 1,
-       .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-};
-
 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
 {
        void *ptr;
@@ -108,29 +101,6 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
        }
 }
 
-static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
-{
-       memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
-
-       raw_spin_lock_init(&desc->lock);
-       desc->irq_data.irq = irq;
-#ifdef CONFIG_SMP
-       desc->irq_data.node = node;
-#endif
-       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-       init_kstat_irqs(desc, node, nr_cpu_ids);
-       if (!desc->kstat_irqs) {
-               printk(KERN_ERR "can not alloc kstat_irqs\n");
-               BUG_ON(1);
-       }
-       if (!alloc_desc_masks(desc, node, false)) {
-               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
-               BUG_ON(1);
-       }
-       init_desc_masks(desc);
-       arch_init_chip_data(desc, node);
-}
-
 static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
 
 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
@@ -171,8 +141,9 @@ static inline void free_masks(struct irq_desc *desc) { }
 
 static struct irq_desc *alloc_desc(int irq, int node)
 {
+       /* Temporary hack until we can switch to GFP_KERNEL */
+       gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC;
        struct irq_desc *desc;
-       gfp_t gfp = GFP_KERNEL;
 
        desc = kzalloc_node(sizeof(*desc), gfp, node);
        if (!desc)
@@ -226,6 +197,8 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node)
                desc = alloc_desc(start + i, node);
                if (!desc)
                        goto err;
+               /* temporary until I fixed x86 madness */
+               arch_init_chip_data(desc, node);
                raw_spin_lock_irqsave(&sparse_irq_lock, flags);
                irq_insert_desc(start + i, desc);
                raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
@@ -242,23 +215,19 @@ err:
        return -ENOMEM;
 }
 
-static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
-       [0 ... NR_IRQS_LEGACY-1] = {
-               .status         = IRQ_DEFAULT_INIT_FLAGS,
-               .handle_irq     = handle_bad_irq,
-               .depth          = 1,
-               .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-       }
-};
+struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
+{
+       int res = irq_alloc_descs(irq, irq, 1, node);
 
-static unsigned int *kstat_irqs_legacy;
+       if (res == -EEXIST || res == irq)
+               return irq_to_desc(irq);
+       return NULL;
+}
 
 int __init early_irq_init(void)
 {
+       int i, node = first_online_node;
        struct irq_desc *desc;
-       int legacy_count;
-       int node;
-       int i;
 
        init_irq_default_affinity();
 
@@ -266,71 +235,14 @@ int __init early_irq_init(void)
        arch_probe_nr_irqs();
        printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
 
-       desc = irq_desc_legacy;
-       legacy_count = ARRAY_SIZE(irq_desc_legacy);
-       node = first_online_node;
-
-       /* allocate based on nr_cpu_ids */
-       kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
-                                         sizeof(int), GFP_NOWAIT, node);
-
-       irq_desc_init.irq_data.chip = &no_irq_chip;
-
-       for (i = 0; i < legacy_count; i++) {
-               desc[i].irq_data.irq = i;
-               desc[i].irq_data.chip = &no_irq_chip;
-#ifdef CONFIG_SMP
-               desc[i].irq_data.node = node;
-#endif
-               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
-               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-               alloc_desc_masks(&desc[i], node, true);
-               init_desc_masks(&desc[i]);
-               irq_insert_desc(i, &desc[i]);
+       for (i = 0; i < NR_IRQS_LEGACY; i++) {
+               desc = alloc_desc(i, node);
+               set_bit(i, allocated_irqs);
+               irq_insert_desc(i, desc);
        }
-
        return arch_early_irq_init();
 }
 
-struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
-{
-       struct irq_desc *desc;
-       unsigned long flags;
-
-       if (irq >= nr_irqs) {
-               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
-                       irq, nr_irqs);
-               return NULL;
-       }
-
-       desc = irq_to_desc(irq);
-       if (desc)
-               return desc;
-
-       raw_spin_lock_irqsave(&sparse_irq_lock, flags);
-
-       /* We have to check it to avoid races with another CPU */
-       desc = irq_to_desc(irq);
-       if (desc)
-               goto out_unlock;
-
-       desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
-
-       printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
-       if (!desc) {
-               printk(KERN_ERR "can not alloc irq_desc\n");
-               BUG_ON(1);
-       }
-       init_one_irq_desc(irq, desc, node);
-
-       irq_insert_desc(irq, desc);
-
-out_unlock:
-       raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
-
-       return desc;
-}
-
 #else /* !CONFIG_SPARSE_IRQ */
 
 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
@@ -345,9 +257,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
 int __init early_irq_init(void)
 {
+       int count, i, node = first_online_node;
        struct irq_desc *desc;
-       int count;
-       int i;
 
        init_irq_default_affinity();
 
@@ -359,9 +270,9 @@ int __init early_irq_init(void)
        for (i = 0; i < count; i++) {
                desc[i].irq_data.irq = i;
                desc[i].irq_data.chip = &no_irq_chip;
-               alloc_desc_masks(&desc[i], 0, true);
-               init_desc_masks(&desc[i]);
                desc[i].kstat_irqs = kstat_irqs_all[i];
+               alloc_masks(desc + i, GFP_KERNEL, node);
+               desc_smp_init(desc + i, node);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
        }
        return arch_early_irq_init();