2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * This file contains the interrupt descriptor management code
7 * Detailed information is available in Documentation/DocBook/genericirq
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
17 #include "internals.h"
20 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 struct lock_class_key irq_desc_lock_class;
24 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
25 static void __init init_irq_default_affinity(void)
27 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
28 cpumask_setall(irq_default_affinity);
31 static void __init init_irq_default_affinity(void)
36 int nr_irqs = NR_IRQS;
37 EXPORT_SYMBOL_GPL(nr_irqs);
39 #ifdef CONFIG_SPARSE_IRQ
41 static struct irq_desc irq_desc_init = {
42 .status = IRQ_DISABLED,
43 .handle_irq = handle_bad_irq,
45 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
48 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
52 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
56 * don't overwite if can not get new one
57 * init_copy_kstat_irqs() could still use old one
60 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
61 desc->kstat_irqs = ptr;
65 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
67 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
69 raw_spin_lock_init(&desc->lock);
70 desc->irq_data.irq = irq;
72 desc->irq_data.node = node;
74 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
75 init_kstat_irqs(desc, node, nr_cpu_ids);
76 if (!desc->kstat_irqs) {
77 printk(KERN_ERR "can not alloc kstat_irqs\n");
80 if (!alloc_desc_masks(desc, node, false)) {
81 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
84 init_desc_masks(desc);
85 arch_init_chip_data(desc, node);
89 * Protect the sparse_irqs:
91 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
93 static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
95 static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
97 radix_tree_insert(&irq_desc_tree, irq, desc);
100 struct irq_desc *irq_to_desc(unsigned int irq)
102 return radix_tree_lookup(&irq_desc_tree, irq);
105 void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
109 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
111 radix_tree_replace_slot(ptr, desc);
114 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
115 [0 ... NR_IRQS_LEGACY-1] = {
116 .status = IRQ_DISABLED,
117 .handle_irq = handle_bad_irq,
119 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
123 static unsigned int *kstat_irqs_legacy;
125 int __init early_irq_init(void)
127 struct irq_desc *desc;
132 init_irq_default_affinity();
134 /* initialize nr_irqs based on nr_cpu_ids */
135 arch_probe_nr_irqs();
136 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
138 desc = irq_desc_legacy;
139 legacy_count = ARRAY_SIZE(irq_desc_legacy);
140 node = first_online_node;
142 /* allocate based on nr_cpu_ids */
143 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
144 sizeof(int), GFP_NOWAIT, node);
146 irq_desc_init.irq_data.chip = &no_irq_chip;
148 for (i = 0; i < legacy_count; i++) {
149 desc[i].irq_data.irq = i;
150 desc[i].irq_data.chip = &no_irq_chip;
152 desc[i].irq_data.node = node;
154 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
155 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
156 alloc_desc_masks(&desc[i], node, true);
157 init_desc_masks(&desc[i]);
158 set_irq_desc(i, &desc[i]);
161 return arch_early_irq_init();
164 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
166 struct irq_desc *desc;
169 if (irq >= nr_irqs) {
170 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
175 desc = irq_to_desc(irq);
179 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
181 /* We have to check it to avoid races with another CPU */
182 desc = irq_to_desc(irq);
186 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
188 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
190 printk(KERN_ERR "can not alloc irq_desc\n");
193 init_one_irq_desc(irq, desc, node);
195 set_irq_desc(irq, desc);
198 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
203 #else /* !CONFIG_SPARSE_IRQ */
205 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
206 [0 ... NR_IRQS-1] = {
207 .status = IRQ_DISABLED,
208 .handle_irq = handle_bad_irq,
210 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
214 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
215 int __init early_irq_init(void)
217 struct irq_desc *desc;
221 init_irq_default_affinity();
223 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
226 count = ARRAY_SIZE(irq_desc);
228 for (i = 0; i < count; i++) {
229 desc[i].irq_data.irq = i;
230 desc[i].irq_data.chip = &no_irq_chip;
231 alloc_desc_masks(&desc[i], 0, true);
232 init_desc_masks(&desc[i]);
233 desc[i].kstat_irqs = kstat_irqs_all[i];
235 return arch_early_irq_init();
238 struct irq_desc *irq_to_desc(unsigned int irq)
240 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
243 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
245 return irq_to_desc(irq);
247 #endif /* !CONFIG_SPARSE_IRQ */
249 void clear_kstat_irqs(struct irq_desc *desc)
251 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
254 void early_init_irq_lock_class(void)
256 struct irq_desc *desc;
259 for_each_irq_desc(i, desc) {
260 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
264 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
266 struct irq_desc *desc = irq_to_desc(irq);
267 return desc ? desc->kstat_irqs[cpu] : 0;
269 EXPORT_SYMBOL(kstat_irqs_cpu);