/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
-struct lock_class_key irq_desc_lock_class;
+static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
static void __init init_irq_default_affinity(void)
{
desc->irq_data.node = node;
cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_clear(desc->pending_mask);
+#endif
+}
+
+static inline int desc_node(struct irq_desc *desc)
+{
+ return desc->irq_data.node;
}
#else
static inline int
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
+static inline int desc_node(struct irq_desc *desc) { return 0; }
#endif
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
desc->status = IRQ_DEFAULT_INIT_FLAGS;
desc->handle_irq = handle_bad_irq;
desc->depth = 1;
+ desc->irq_count = 0;
+ desc->irqs_unhandled = 0;
desc->name = NULL;
memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
desc_smp_init(desc, node);
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
-DEFINE_RAW_SPINLOCK(sparse_irq_lock);
+static DEFINE_MUTEX(sparse_irq_lock);
static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
#ifdef CONFIG_SPARSE_IRQ
-void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
-{
- void *ptr;
-
- ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
- GFP_ATOMIC, node);
-
- /*
- * don't overwite if can not get new one
- * init_copy_kstat_irqs() could still use old one
- */
- if (ptr) {
- printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
- desc->kstat_irqs = ptr;
- }
-}
-
static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
return radix_tree_lookup(&irq_desc_tree, irq);
}
-void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
-{
- void **ptr;
-
- ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
- if (ptr)
- radix_tree_replace_slot(ptr, desc);
-}
-
static void delete_irq_desc(unsigned int irq)
{
radix_tree_delete(&irq_desc_tree, irq);
static void free_desc(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
unregister_irq_proc(irq, desc);
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+ mutex_lock(&sparse_irq_lock);
delete_irq_desc(irq);
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
free_masks(desc);
kfree(desc->kstat_irqs);
static int alloc_descs(unsigned int start, unsigned int cnt, int node)
{
struct irq_desc *desc;
- unsigned long flags;
int i;
for (i = 0; i < cnt; i++) {
desc = alloc_desc(start + i, node);
if (!desc)
goto err;
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+ mutex_lock(&sparse_irq_lock);
irq_insert_desc(start + i, desc);
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
}
return start;
for (i--; i >= 0; i--)
free_desc(start + i);
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+ mutex_lock(&sparse_irq_lock);
bitmap_clear(allocated_irqs, start, cnt);
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
return -ENOMEM;
}
return irq_to_desc(irq);
}
-#ifdef CONFIG_SMP
-static inline int desc_node(struct irq_desc *desc)
-{
- return desc->irq_data.node;
-}
-#else
-static inline int desc_node(struct irq_desc *desc) { return 0; }
-#endif
-
static void free_desc(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- desc_set_defaults(irq, desc, desc_node(desc));
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ dynamic_irq_cleanup(irq);
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
*/
void irq_free_descs(unsigned int from, unsigned int cnt)
{
- unsigned long flags;
int i;
if (from >= nr_irqs || (from + cnt) > nr_irqs)
for (i = 0; i < cnt; i++)
free_desc(from + i);
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+ mutex_lock(&sparse_irq_lock);
bitmap_clear(allocated_irqs, from, cnt);
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
}
/**
int __ref
irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
{
- unsigned long flags;
int start, ret;
if (!cnt)
return -EINVAL;
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+ mutex_lock(&sparse_irq_lock);
start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
ret = -EEXIST;
goto err;
bitmap_set(allocated_irqs, start, cnt);
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
return alloc_descs(start, cnt, node);
err:
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
return ret;
}
*/
int irq_reserve_irqs(unsigned int from, unsigned int cnt)
{
- unsigned long flags;
unsigned int start;
int ret = 0;
if (!cnt || (from + cnt) > nr_irqs)
return -EINVAL;
- raw_spin_lock_irqsave(&sparse_irq_lock, flags);
+ mutex_lock(&sparse_irq_lock);
start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
if (start == from)
bitmap_set(allocated_irqs, start, cnt);
else
ret = -EEXIST;
- raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ mutex_unlock(&sparse_irq_lock);
return ret;
}
return find_next_bit(allocated_irqs, nr_irqs, offset);
}
-/* Statistics access */
-void clear_kstat_irqs(struct irq_desc *desc)
+/**
+ * dynamic_irq_cleanup - cleanup a dynamically allocated irq
+ * @irq: irq number to initialize
+ */
+void dynamic_irq_cleanup(unsigned int irq)
{
- memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc_set_defaults(irq, desc, desc_node(desc));
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)