2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
57 #include <linux/of_irq.h>
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
62 #include <asm/pgtable.h>
64 #include <asm/cache.h>
66 #include <asm/ptrace.h>
67 #include <asm/machdep.h>
69 #include <asm/dbell.h>
74 #include <asm/firmware.h>
75 #include <asm/lv1call.h>
77 #define CREATE_TRACE_POINTS
78 #include <asm/trace.h>
80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
81 EXPORT_PER_CPU_SYMBOL(irq_stat);
83 int __irq_offset_value;
86 EXPORT_SYMBOL(__irq_offset_value);
87 atomic_t ppc_n_lost_interrupts;
90 extern int tau_initialized;
91 extern int tau_interrupts(int);
93 #endif /* CONFIG_PPC32 */
97 #ifndef CONFIG_SPARSE_IRQ
98 EXPORT_SYMBOL(irq_desc);
101 int distribute_irqs = 1;
103 static inline notrace unsigned long get_hard_enabled(void)
105 unsigned long enabled;
107 __asm__ __volatile__("lbz %0,%1(13)"
108 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
113 static inline notrace void set_soft_enabled(unsigned long enable)
115 __asm__ __volatile__("stb %0,%1(13)"
116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
119 notrace void arch_local_irq_restore(unsigned long en)
122 * get_paca()->soft_enabled = en;
123 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
124 * That was allowed before, and in such a case we do need to take care
125 * that gcc will set soft_enabled directly via r13, not choose to use
126 * an intermediate register, lest we're preempted to a different cpu.
128 set_soft_enabled(en);
132 #ifdef CONFIG_PPC_STD_MMU_64
133 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
135 * Do we need to disable preemption here? Not really: in the
136 * unlikely event that we're preempted to a different cpu in
137 * between getting r13, loading its lppaca_ptr, and loading
138 * its any_int, we might call iseries_handle_interrupts without
139 * an interrupt pending on the new cpu, but that's no disaster,
140 * is it? And the business of preempting us off the old cpu
141 * would itself involve a local_irq_restore which handles the
142 * interrupt to that cpu.
144 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
145 * to avoid any preemption checking added into get_paca().
147 if (local_paca->lppaca_ptr->int_dword.any_int)
148 iseries_handle_interrupts();
150 #endif /* CONFIG_PPC_STD_MMU_64 */
153 * if (get_paca()->hard_enabled) return;
154 * But again we need to take care that gcc gets hard_enabled directly
155 * via r13, not choose to use an intermediate register, lest we're
156 * preempted to a different cpu in between the two instructions.
158 if (get_hard_enabled())
161 #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
162 /* Check for pending doorbell interrupts and resend to ourself */
163 doorbell_check_self();
167 * Need to hard-enable interrupts here. Since currently disabled,
168 * no need to take further asm precautions against preemption; but
169 * use local_paca instead of get_paca() to avoid preemption checking.
171 local_paca->hard_enabled = en;
174 /* On server, re-trigger the decrementer if it went negative since
175 * some processors only trigger on edge transitions of the sign bit.
177 * BookE has a level sensitive decrementer (latches in TSR) so we
180 if ((int)mfspr(SPRN_DEC) < 0)
182 #endif /* CONFIG_BOOKE */
185 * Force the delivery of pending soft-disabled interrupts on PS3.
186 * Any HV call will have this side effect.
188 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
190 lv1_get_version_info(&tmp);
195 EXPORT_SYMBOL(arch_local_irq_restore);
196 #endif /* CONFIG_PPC64 */
198 int arch_show_interrupts(struct seq_file *p, int prec)
202 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
203 if (tau_initialized) {
204 seq_printf(p, "%*s: ", prec, "TAU");
205 for_each_online_cpu(j)
206 seq_printf(p, "%10u ", tau_interrupts(j));
207 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
209 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
211 seq_printf(p, "%*s: ", prec, "LOC");
212 for_each_online_cpu(j)
213 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
214 seq_printf(p, " Local timer interrupts\n");
216 seq_printf(p, "%*s: ", prec, "SPU");
217 for_each_online_cpu(j)
218 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
219 seq_printf(p, " Spurious interrupts\n");
221 seq_printf(p, "%*s: ", prec, "CNT");
222 for_each_online_cpu(j)
223 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
224 seq_printf(p, " Performance monitoring interrupts\n");
226 seq_printf(p, "%*s: ", prec, "MCE");
227 for_each_online_cpu(j)
228 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
229 seq_printf(p, " Machine check exceptions\n");
237 u64 arch_irq_stat_cpu(unsigned int cpu)
239 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
241 sum += per_cpu(irq_stat, cpu).pmu_irqs;
242 sum += per_cpu(irq_stat, cpu).mce_exceptions;
243 sum += per_cpu(irq_stat, cpu).spurious_irqs;
248 #ifdef CONFIG_HOTPLUG_CPU
249 void migrate_irqs(void)
251 struct irq_desc *desc;
255 const struct cpumask *map = cpu_online_mask;
257 alloc_cpumask_var(&mask, GFP_KERNEL);
260 struct irq_data *data;
261 struct irq_chip *chip;
263 desc = irq_to_desc(irq);
267 data = irq_desc_get_irq_data(desc);
268 if (irqd_is_per_cpu(data))
271 chip = irq_data_get_irq_chip(data);
273 cpumask_and(mask, data->affinity, map);
274 if (cpumask_any(mask) >= nr_cpu_ids) {
275 printk("Breaking affinity for irq %i\n", irq);
276 cpumask_copy(mask, map);
278 if (chip->irq_set_affinity)
279 chip->irq_set_affinity(data, mask, true);
280 else if (desc->action && !(warned++))
281 printk("Cannot set affinity for irq %i\n", irq);
284 free_cpumask_var(mask);
292 static inline void handle_one_irq(unsigned int irq)
294 struct thread_info *curtp, *irqtp;
295 unsigned long saved_sp_limit;
296 struct irq_desc *desc;
298 /* Switch to the irq stack to handle this */
299 curtp = current_thread_info();
300 irqtp = hardirq_ctx[smp_processor_id()];
302 if (curtp == irqtp) {
303 /* We're already on the irq stack, just handle it */
304 generic_handle_irq(irq);
308 desc = irq_to_desc(irq);
309 saved_sp_limit = current->thread.ksp_limit;
311 irqtp->task = curtp->task;
314 /* Copy the softirq bits in preempt_count so that the
315 * softirq checks work in the hardirq context. */
316 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
317 (curtp->preempt_count & SOFTIRQ_MASK);
319 current->thread.ksp_limit = (unsigned long)irqtp +
320 _ALIGN_UP(sizeof(struct thread_info), 16);
322 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
323 current->thread.ksp_limit = saved_sp_limit;
326 /* Set any flag that may have been set on the
330 set_bits(irqtp->flags, &curtp->flags);
333 static inline void check_stack_overflow(void)
335 #ifdef CONFIG_DEBUG_STACKOVERFLOW
338 sp = __get_SP() & (THREAD_SIZE-1);
340 /* check for stack overflow: is there less than 2KB free? */
341 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
342 printk("do_IRQ: stack overflow: %ld\n",
343 sp - sizeof(struct thread_info));
349 void do_IRQ(struct pt_regs *regs)
351 struct pt_regs *old_regs = set_irq_regs(regs);
354 trace_irq_entry(regs);
358 check_stack_overflow();
360 irq = ppc_md.get_irq();
362 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
364 else if (irq != NO_IRQ_IGNORE)
365 __get_cpu_var(irq_stat).spurious_irqs++;
368 set_irq_regs(old_regs);
370 #ifdef CONFIG_PPC_ISERIES
371 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
372 get_lppaca()->int_dword.fields.decr_int) {
373 get_lppaca()->int_dword.fields.decr_int = 0;
374 /* Signal a fake decrementer interrupt */
375 timer_interrupt(regs);
379 trace_irq_exit(regs);
382 void __init init_IRQ(void)
392 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
393 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
394 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
395 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
397 void exc_lvl_ctx_init(void)
399 struct thread_info *tp;
402 for_each_possible_cpu(i) {
406 cpu_nr = get_hard_smp_processor_id(i);
408 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
409 tp = critirq_ctx[cpu_nr];
411 tp->preempt_count = 0;
414 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
415 tp = dbgirq_ctx[cpu_nr];
417 tp->preempt_count = 0;
419 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
420 tp = mcheckirq_ctx[cpu_nr];
422 tp->preempt_count = HARDIRQ_OFFSET;
428 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
429 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
431 void irq_ctx_init(void)
433 struct thread_info *tp;
436 for_each_possible_cpu(i) {
437 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
440 tp->preempt_count = 0;
442 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
445 tp->preempt_count = HARDIRQ_OFFSET;
449 static inline void do_softirq_onstack(void)
451 struct thread_info *curtp, *irqtp;
452 unsigned long saved_sp_limit = current->thread.ksp_limit;
454 curtp = current_thread_info();
455 irqtp = softirq_ctx[smp_processor_id()];
456 irqtp->task = curtp->task;
457 current->thread.ksp_limit = (unsigned long)irqtp +
458 _ALIGN_UP(sizeof(struct thread_info), 16);
459 call_do_softirq(irqtp);
460 current->thread.ksp_limit = saved_sp_limit;
464 void do_softirq(void)
471 local_irq_save(flags);
473 if (local_softirq_pending())
474 do_softirq_onstack();
476 local_irq_restore(flags);
481 * IRQ controller and virtual interrupts
484 static LIST_HEAD(irq_hosts);
485 static DEFINE_RAW_SPINLOCK(irq_big_lock);
486 static unsigned int revmap_trees_allocated;
487 static DEFINE_MUTEX(revmap_trees_mutex);
488 struct irq_map_entry irq_map[NR_IRQS];
489 static unsigned int irq_virq_count = NR_IRQS;
490 static struct irq_host *irq_default_host;
492 irq_hw_number_t virq_to_hw(unsigned int virq)
494 return irq_map[virq].hwirq;
496 EXPORT_SYMBOL_GPL(virq_to_hw);
498 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
500 return h->of_node != NULL && h->of_node == np;
503 struct irq_host *irq_alloc_host(struct device_node *of_node,
504 unsigned int revmap_type,
505 unsigned int revmap_arg,
506 struct irq_host_ops *ops,
507 irq_hw_number_t inval_irq)
509 struct irq_host *host;
510 unsigned int size = sizeof(struct irq_host);
515 /* Allocate structure and revmap table if using linear mapping */
516 if (revmap_type == IRQ_HOST_MAP_LINEAR)
517 size += revmap_arg * sizeof(unsigned int);
518 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
523 host->revmap_type = revmap_type;
524 host->inval_irq = inval_irq;
526 host->of_node = of_node_get(of_node);
528 if (host->ops->match == NULL)
529 host->ops->match = default_irq_host_match;
531 raw_spin_lock_irqsave(&irq_big_lock, flags);
533 /* If it's a legacy controller, check for duplicates and
534 * mark it as allocated (we use irq 0 host pointer for that
536 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
537 if (irq_map[0].host != NULL) {
538 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
539 /* If we are early boot, we can't free the structure,
541 * this will be fixed once slab is made available early
542 * instead of the current cruft
545 of_node_put(host->of_node);
550 irq_map[0].host = host;
553 list_add(&host->link, &irq_hosts);
554 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
556 /* Additional setups per revmap type */
557 switch(revmap_type) {
558 case IRQ_HOST_MAP_LEGACY:
559 /* 0 is always the invalid number for legacy */
561 /* setup us as the host for all legacy interrupts */
562 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
563 irq_map[i].hwirq = i;
565 irq_map[i].host = host;
568 /* Clear norequest flags */
569 irq_clear_status_flags(i, IRQ_NOREQUEST);
571 /* Legacy flags are left to default at this point,
572 * one can then use irq_create_mapping() to
573 * explicitly change them
575 ops->map(host, i, i);
578 case IRQ_HOST_MAP_LINEAR:
579 rmap = (unsigned int *)(host + 1);
580 for (i = 0; i < revmap_arg; i++)
582 host->revmap_data.linear.size = revmap_arg;
584 host->revmap_data.linear.revmap = rmap;
590 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
595 struct irq_host *irq_find_host(struct device_node *node)
597 struct irq_host *h, *found = NULL;
600 /* We might want to match the legacy controller last since
601 * it might potentially be set to match all interrupts in
602 * the absence of a device node. This isn't a problem so far
605 raw_spin_lock_irqsave(&irq_big_lock, flags);
606 list_for_each_entry(h, &irq_hosts, link)
607 if (h->ops->match(h, node)) {
611 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
614 EXPORT_SYMBOL_GPL(irq_find_host);
616 void irq_set_default_host(struct irq_host *host)
618 pr_debug("irq: Default host set to @0x%p\n", host);
620 irq_default_host = host;
623 void irq_set_virq_count(unsigned int count)
625 pr_debug("irq: Trying to set virq count to %d\n", count);
627 BUG_ON(count < NUM_ISA_INTERRUPTS);
629 irq_virq_count = count;
632 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
633 irq_hw_number_t hwirq)
637 res = irq_alloc_desc_at(virq, 0);
639 pr_debug("irq: -> allocating desc failed\n");
643 irq_clear_status_flags(virq, IRQ_NOREQUEST);
647 irq_map[virq].hwirq = hwirq;
650 if (host->ops->map(host, virq, hwirq)) {
651 pr_debug("irq: -> mapping failed, freeing\n");
658 irq_free_descs(virq, 1);
660 irq_free_virt(virq, 1);
664 unsigned int irq_create_direct_mapping(struct irq_host *host)
669 host = irq_default_host;
671 BUG_ON(host == NULL);
672 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
674 virq = irq_alloc_virt(host, 1, 0);
675 if (virq == NO_IRQ) {
676 pr_debug("irq: create_direct virq allocation failed\n");
680 pr_debug("irq: create_direct obtained virq %d\n", virq);
682 if (irq_setup_virq(host, virq, virq))
688 unsigned int irq_create_mapping(struct irq_host *host,
689 irq_hw_number_t hwirq)
691 unsigned int virq, hint;
693 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
695 /* Look for default host if nececssary */
697 host = irq_default_host;
699 printk(KERN_WARNING "irq_create_mapping called for"
700 " NULL host, hwirq=%lx\n", hwirq);
704 pr_debug("irq: -> using host @%p\n", host);
706 /* Check if mapping already exist, if it does, call
707 * host->ops->map() to update the flags
709 virq = irq_find_mapping(host, hwirq);
710 if (virq != NO_IRQ) {
711 if (host->ops->remap)
712 host->ops->remap(host, virq, hwirq);
713 pr_debug("irq: -> existing mapping on virq %d\n", virq);
717 /* Get a virtual interrupt number */
718 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
720 virq = (unsigned int)hwirq;
721 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
725 /* Allocate a virtual interrupt number */
726 hint = hwirq % irq_virq_count;
727 virq = irq_alloc_virt(host, 1, hint);
728 if (virq == NO_IRQ) {
729 pr_debug("irq: -> virq allocation failed\n");
734 if (irq_setup_virq(host, virq, hwirq))
737 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
738 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
742 EXPORT_SYMBOL_GPL(irq_create_mapping);
744 unsigned int irq_create_of_mapping(struct device_node *controller,
745 const u32 *intspec, unsigned int intsize)
747 struct irq_host *host;
748 irq_hw_number_t hwirq;
749 unsigned int type = IRQ_TYPE_NONE;
752 if (controller == NULL)
753 host = irq_default_host;
755 host = irq_find_host(controller);
757 printk(KERN_WARNING "irq: no irq host found for %s !\n",
758 controller->full_name);
762 /* If host has no translation, then we assume interrupt line */
763 if (host->ops->xlate == NULL)
766 if (host->ops->xlate(host, controller, intspec, intsize,
772 virq = irq_create_mapping(host, hwirq);
776 /* Set type if specified and different than the current one */
777 if (type != IRQ_TYPE_NONE &&
778 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
779 irq_set_irq_type(virq, type);
782 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
784 void irq_dispose_mapping(unsigned int virq)
786 struct irq_host *host;
787 irq_hw_number_t hwirq;
792 host = irq_map[virq].host;
793 WARN_ON (host == NULL);
797 /* Never unmap legacy interrupts */
798 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
801 /* remove chip and handler */
802 irq_set_chip_and_handler(virq, NULL, NULL);
804 /* Make sure it's completed */
805 synchronize_irq(virq);
807 /* Tell the PIC about it */
808 if (host->ops->unmap)
809 host->ops->unmap(host, virq);
812 /* Clear reverse map */
813 hwirq = irq_map[virq].hwirq;
814 switch(host->revmap_type) {
815 case IRQ_HOST_MAP_LINEAR:
816 if (hwirq < host->revmap_data.linear.size)
817 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
819 case IRQ_HOST_MAP_TREE:
821 * Check if radix tree allocated yet, if not then nothing to
825 if (revmap_trees_allocated < 1)
827 mutex_lock(&revmap_trees_mutex);
828 radix_tree_delete(&host->revmap_data.tree, hwirq);
829 mutex_unlock(&revmap_trees_mutex);
835 irq_map[virq].hwirq = host->inval_irq;
837 irq_set_status_flags(virq, IRQ_NOREQUEST);
839 irq_free_descs(virq, 1);
841 irq_free_virt(virq, 1);
843 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
845 unsigned int irq_find_mapping(struct irq_host *host,
846 irq_hw_number_t hwirq)
849 unsigned int hint = hwirq % irq_virq_count;
851 /* Look for default host if nececssary */
853 host = irq_default_host;
857 /* legacy -> bail early */
858 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
861 /* Slow path does a linear search of the map */
862 if (hint < NUM_ISA_INTERRUPTS)
863 hint = NUM_ISA_INTERRUPTS;
866 if (irq_map[i].host == host &&
867 irq_map[i].hwirq == hwirq)
870 if (i >= irq_virq_count)
871 i = NUM_ISA_INTERRUPTS;
875 EXPORT_SYMBOL_GPL(irq_find_mapping);
878 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
879 irq_hw_number_t hwirq)
881 struct irq_map_entry *ptr;
884 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
887 * Check if the radix tree exists and has bee initialized.
888 * If not, we fallback to slow mode
890 if (revmap_trees_allocated < 2)
891 return irq_find_mapping(host, hwirq);
893 /* Now try to resolve */
895 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
896 * as it's referencing an entry in the static irq_map table.
898 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
901 * If found in radix tree, then fine.
902 * Else fallback to linear lookup - this should not happen in practice
903 * as it means that we failed to insert the node in the radix tree.
906 virq = ptr - irq_map;
908 virq = irq_find_mapping(host, hwirq);
913 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
914 irq_hw_number_t hwirq)
917 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
920 * Check if the radix tree exists yet.
921 * If not, then the irq will be inserted into the tree when it gets
925 if (revmap_trees_allocated < 1)
928 if (virq != NO_IRQ) {
929 mutex_lock(&revmap_trees_mutex);
930 radix_tree_insert(&host->revmap_data.tree, hwirq,
932 mutex_unlock(&revmap_trees_mutex);
936 unsigned int irq_linear_revmap(struct irq_host *host,
937 irq_hw_number_t hwirq)
939 unsigned int *revmap;
941 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
943 /* Check revmap bounds */
944 if (unlikely(hwirq >= host->revmap_data.linear.size))
945 return irq_find_mapping(host, hwirq);
947 /* Check if revmap was allocated */
948 revmap = host->revmap_data.linear.revmap;
949 if (unlikely(revmap == NULL))
950 return irq_find_mapping(host, hwirq);
952 /* Fill up revmap with slow path if no mapping found */
953 if (unlikely(revmap[hwirq] == NO_IRQ))
954 revmap[hwirq] = irq_find_mapping(host, hwirq);
956 return revmap[hwirq];
959 unsigned int irq_alloc_virt(struct irq_host *host,
964 unsigned int i, j, found = NO_IRQ;
966 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
969 raw_spin_lock_irqsave(&irq_big_lock, flags);
971 /* Use hint for 1 interrupt if any */
972 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
973 hint < irq_virq_count && irq_map[hint].host == NULL) {
978 /* Look for count consecutive numbers in the allocatable
981 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
982 if (irq_map[i].host != NULL)
988 found = i - count + 1;
992 if (found == NO_IRQ) {
993 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
997 for (i = found; i < (found + count); i++) {
998 irq_map[i].hwirq = host->inval_irq;
1000 irq_map[i].host = host;
1002 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1006 void irq_free_virt(unsigned int virq, unsigned int count)
1008 unsigned long flags;
1011 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1012 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1014 raw_spin_lock_irqsave(&irq_big_lock, flags);
1015 for (i = virq; i < (virq + count); i++) {
1016 struct irq_host *host;
1018 if (i < NUM_ISA_INTERRUPTS ||
1019 (virq + count) > irq_virq_count)
1022 host = irq_map[i].host;
1023 irq_map[i].hwirq = host->inval_irq;
1025 irq_map[i].host = NULL;
1027 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1030 int arch_early_irq_init(void)
1035 /* We need to create the radix trees late */
1036 static int irq_late_init(void)
1042 * No mutual exclusion with respect to accessors of the tree is needed
1043 * here as the synchronization is done via the state variable
1044 * revmap_trees_allocated.
1046 list_for_each_entry(h, &irq_hosts, link) {
1047 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1048 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1052 * Make sure the radix trees inits are visible before setting
1056 revmap_trees_allocated = 1;
1059 * Insert the reverse mapping for those interrupts already present
1062 mutex_lock(&revmap_trees_mutex);
1063 for (i = 0; i < irq_virq_count; i++) {
1064 if (irq_map[i].host &&
1065 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1066 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1067 irq_map[i].hwirq, &irq_map[i]);
1069 mutex_unlock(&revmap_trees_mutex);
1072 * Make sure the radix trees insertions are visible before setting
1076 revmap_trees_allocated = 2;
1080 arch_initcall(irq_late_init);
1082 #ifdef CONFIG_VIRQ_DEBUG
1083 static int virq_debug_show(struct seq_file *m, void *private)
1085 unsigned long flags;
1086 struct irq_desc *desc;
1088 static const char none[] = "none";
1091 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1092 "chip name", "host name");
1094 for (i = 1; i < nr_irqs; i++) {
1095 desc = irq_to_desc(i);
1099 raw_spin_lock_irqsave(&desc->lock, flags);
1101 if (desc->action && desc->action->handler) {
1102 struct irq_chip *chip;
1104 seq_printf(m, "%5d ", i);
1105 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1107 chip = irq_desc_get_chip(desc);
1108 if (chip && chip->name)
1112 seq_printf(m, "%-15s ", p);
1114 if (irq_map[i].host && irq_map[i].host->of_node)
1115 p = irq_map[i].host->of_node->full_name;
1118 seq_printf(m, "%s\n", p);
1121 raw_spin_unlock_irqrestore(&desc->lock, flags);
1127 static int virq_debug_open(struct inode *inode, struct file *file)
1129 return single_open(file, virq_debug_show, inode->i_private);
1132 static const struct file_operations virq_debug_fops = {
1133 .open = virq_debug_open,
1135 .llseek = seq_lseek,
1136 .release = single_release,
1139 static int __init irq_debugfs_init(void)
1141 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1142 NULL, &virq_debug_fops) == NULL)
1147 __initcall(irq_debugfs_init);
1148 #endif /* CONFIG_VIRQ_DEBUG */
1151 static int __init setup_noirqdistrib(char *str)
1153 distribute_irqs = 0;
1157 __setup("noirqdistrib", setup_noirqdistrib);
1158 #endif /* CONFIG_PPC64 */