Merge branch 'linus' into perfcounters/core-v2
[pandora-kernel.git] / arch / powerpc / kernel / irq.c
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30
31 #undef DEBUG
32
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56
57 #include <asm/uaccess.h>
58 #include <asm/system.h>
59 #include <asm/io.h>
60 #include <asm/pgtable.h>
61 #include <asm/irq.h>
62 #include <asm/cache.h>
63 #include <asm/prom.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
66 #include <asm/udbg.h>
67 #ifdef CONFIG_PPC64
68 #include <asm/paca.h>
69 #include <asm/firmware.h>
70 #include <asm/lv1call.h>
71 #endif
72
73 int __irq_offset_value;
74 static int ppc_spurious_interrupts;
75
76 #ifdef CONFIG_PPC32
77 EXPORT_SYMBOL(__irq_offset_value);
78 atomic_t ppc_n_lost_interrupts;
79
80 #ifdef CONFIG_TAU_INT
81 extern int tau_initialized;
82 extern int tau_interrupts(int);
83 #endif
84 #endif /* CONFIG_PPC32 */
85
86 #ifdef CONFIG_PPC64
87 EXPORT_SYMBOL(irq_desc);
88
89 int distribute_irqs = 1;
90
91 static inline notrace unsigned long get_hard_enabled(void)
92 {
93         unsigned long enabled;
94
95         __asm__ __volatile__("lbz %0,%1(13)"
96         : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
97
98         return enabled;
99 }
100
101 static inline notrace void set_soft_enabled(unsigned long enable)
102 {
103         __asm__ __volatile__("stb %0,%1(13)"
104         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
105 }
106
107 #ifdef CONFIG_PERF_COUNTERS
108 notrace void __weak perf_counter_do_pending(void)
109 {
110         set_perf_counter_pending(0);
111 }
112 #endif
113
114 notrace void raw_local_irq_restore(unsigned long en)
115 {
116         /*
117          * get_paca()->soft_enabled = en;
118          * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
119          * That was allowed before, and in such a case we do need to take care
120          * that gcc will set soft_enabled directly via r13, not choose to use
121          * an intermediate register, lest we're preempted to a different cpu.
122          */
123         set_soft_enabled(en);
124         if (!en)
125                 return;
126
127         if (firmware_has_feature(FW_FEATURE_ISERIES)) {
128                 /*
129                  * Do we need to disable preemption here?  Not really: in the
130                  * unlikely event that we're preempted to a different cpu in
131                  * between getting r13, loading its lppaca_ptr, and loading
132                  * its any_int, we might call iseries_handle_interrupts without
133                  * an interrupt pending on the new cpu, but that's no disaster,
134                  * is it?  And the business of preempting us off the old cpu
135                  * would itself involve a local_irq_restore which handles the
136                  * interrupt to that cpu.
137                  *
138                  * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
139                  * to avoid any preemption checking added into get_paca().
140                  */
141                 if (local_paca->lppaca_ptr->int_dword.any_int)
142                         iseries_handle_interrupts();
143         }
144
145         if (get_perf_counter_pending())
146                 perf_counter_do_pending();
147
148         /*
149          * if (get_paca()->hard_enabled) return;
150          * But again we need to take care that gcc gets hard_enabled directly
151          * via r13, not choose to use an intermediate register, lest we're
152          * preempted to a different cpu in between the two instructions.
153          */
154         if (get_hard_enabled())
155                 return;
156
157         /*
158          * Need to hard-enable interrupts here.  Since currently disabled,
159          * no need to take further asm precautions against preemption; but
160          * use local_paca instead of get_paca() to avoid preemption checking.
161          */
162         local_paca->hard_enabled = en;
163         if ((int)mfspr(SPRN_DEC) < 0)
164                 mtspr(SPRN_DEC, 1);
165
166         /*
167          * Force the delivery of pending soft-disabled interrupts on PS3.
168          * Any HV call will have this side effect.
169          */
170         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
171                 u64 tmp;
172                 lv1_get_version_info(&tmp);
173         }
174
175         __hard_irq_enable();
176 }
177 EXPORT_SYMBOL(raw_local_irq_restore);
178 #endif /* CONFIG_PPC64 */
179
180 int show_interrupts(struct seq_file *p, void *v)
181 {
182         int i = *(loff_t *)v, j;
183         struct irqaction *action;
184         struct irq_desc *desc;
185         unsigned long flags;
186
187         if (i == 0) {
188                 seq_puts(p, "           ");
189                 for_each_online_cpu(j)
190                         seq_printf(p, "CPU%d       ", j);
191                 seq_putc(p, '\n');
192         }
193
194         if (i < NR_IRQS) {
195                 desc = get_irq_desc(i);
196                 spin_lock_irqsave(&desc->lock, flags);
197                 action = desc->action;
198                 if (!action || !action->handler)
199                         goto skip;
200                 seq_printf(p, "%3d: ", i);
201 #ifdef CONFIG_SMP
202                 for_each_online_cpu(j)
203                         seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
204 #else
205                 seq_printf(p, "%10u ", kstat_irqs(i));
206 #endif /* CONFIG_SMP */
207                 if (desc->chip)
208                         seq_printf(p, " %s ", desc->chip->typename);
209                 else
210                         seq_puts(p, "  None      ");
211                 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
212                 seq_printf(p, "    %s", action->name);
213                 for (action = action->next; action; action = action->next)
214                         seq_printf(p, ", %s", action->name);
215                 seq_putc(p, '\n');
216 skip:
217                 spin_unlock_irqrestore(&desc->lock, flags);
218         } else if (i == NR_IRQS) {
219 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
220                 if (tau_initialized){
221                         seq_puts(p, "TAU: ");
222                         for_each_online_cpu(j)
223                                 seq_printf(p, "%10u ", tau_interrupts(j));
224                         seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
225                 }
226 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
227                 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
228         }
229         return 0;
230 }
231
232 #ifdef CONFIG_HOTPLUG_CPU
233 void fixup_irqs(cpumask_t map)
234 {
235         unsigned int irq;
236         static int warned;
237
238         for_each_irq(irq) {
239                 cpumask_t mask;
240
241                 if (irq_desc[irq].status & IRQ_PER_CPU)
242                         continue;
243
244                 cpumask_and(&mask, irq_desc[irq].affinity, &map);
245                 if (any_online_cpu(mask) == NR_CPUS) {
246                         printk("Breaking affinity for irq %i\n", irq);
247                         mask = map;
248                 }
249                 if (irq_desc[irq].chip->set_affinity)
250                         irq_desc[irq].chip->set_affinity(irq, &mask);
251                 else if (irq_desc[irq].action && !(warned++))
252                         printk("Cannot set affinity for irq %i\n", irq);
253         }
254
255         local_irq_enable();
256         mdelay(1);
257         local_irq_disable();
258 }
259 #endif
260
261 void do_IRQ(struct pt_regs *regs)
262 {
263         struct pt_regs *old_regs = set_irq_regs(regs);
264         unsigned int irq;
265 #ifdef CONFIG_IRQSTACKS
266         struct thread_info *curtp, *irqtp;
267 #endif
268
269         irq_enter();
270
271 #ifdef CONFIG_DEBUG_STACKOVERFLOW
272         /* Debugging check for stack overflow: is there less than 2KB free? */
273         {
274                 long sp;
275
276                 sp = __get_SP() & (THREAD_SIZE-1);
277
278                 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
279                         printk("do_IRQ: stack overflow: %ld\n",
280                                 sp - sizeof(struct thread_info));
281                         dump_stack();
282                 }
283         }
284 #endif
285
286         /*
287          * Every platform is required to implement ppc_md.get_irq.
288          * This function will either return an irq number or NO_IRQ to
289          * indicate there are no more pending.
290          * The value NO_IRQ_IGNORE is for buggy hardware and means that this
291          * IRQ has already been handled. -- Tom
292          */
293         irq = ppc_md.get_irq();
294
295         if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
296 #ifdef CONFIG_IRQSTACKS
297                 /* Switch to the irq stack to handle this */
298                 curtp = current_thread_info();
299                 irqtp = hardirq_ctx[smp_processor_id()];
300                 if (curtp != irqtp) {
301                         struct irq_desc *desc = irq_desc + irq;
302                         void *handler = desc->handle_irq;
303                         unsigned long saved_sp_limit = current->thread.ksp_limit;
304                         if (handler == NULL)
305                                 handler = &__do_IRQ;
306                         irqtp->task = curtp->task;
307                         irqtp->flags = 0;
308
309                         /* Copy the softirq bits in preempt_count so that the
310                          * softirq checks work in the hardirq context.
311                          */
312                         irqtp->preempt_count =
313                                 (irqtp->preempt_count & ~SOFTIRQ_MASK) |
314                                 (curtp->preempt_count & SOFTIRQ_MASK);
315
316                         current->thread.ksp_limit = (unsigned long)irqtp +
317                                 _ALIGN_UP(sizeof(struct thread_info), 16);
318                         call_handle_irq(irq, desc, irqtp, handler);
319                         current->thread.ksp_limit = saved_sp_limit;
320                         irqtp->task = NULL;
321
322
323                         /* Set any flag that may have been set on the
324                          * alternate stack
325                          */
326                         if (irqtp->flags)
327                                 set_bits(irqtp->flags, &curtp->flags);
328                 } else
329 #endif
330                         generic_handle_irq(irq);
331         } else if (irq != NO_IRQ_IGNORE)
332                 /* That's not SMP safe ... but who cares ? */
333                 ppc_spurious_interrupts++;
334
335         irq_exit();
336         set_irq_regs(old_regs);
337
338 #ifdef CONFIG_PPC_ISERIES
339         if (firmware_has_feature(FW_FEATURE_ISERIES) &&
340                         get_lppaca()->int_dword.fields.decr_int) {
341                 get_lppaca()->int_dword.fields.decr_int = 0;
342                 /* Signal a fake decrementer interrupt */
343                 timer_interrupt(regs);
344         }
345 #endif
346 }
347
348 void __init init_IRQ(void)
349 {
350         if (ppc_md.init_IRQ)
351                 ppc_md.init_IRQ();
352
353         exc_lvl_ctx_init();
354
355         irq_ctx_init();
356 }
357
358 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
359 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
360 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
361 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
362
363 void exc_lvl_ctx_init(void)
364 {
365         struct thread_info *tp;
366         int i;
367
368         for_each_possible_cpu(i) {
369                 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
370                 tp = critirq_ctx[i];
371                 tp->cpu = i;
372                 tp->preempt_count = 0;
373
374 #ifdef CONFIG_BOOKE
375                 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
376                 tp = dbgirq_ctx[i];
377                 tp->cpu = i;
378                 tp->preempt_count = 0;
379
380                 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
381                 tp = mcheckirq_ctx[i];
382                 tp->cpu = i;
383                 tp->preempt_count = HARDIRQ_OFFSET;
384 #endif
385         }
386 }
387 #endif
388
389 #ifdef CONFIG_IRQSTACKS
390 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
391 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
392
393 void irq_ctx_init(void)
394 {
395         struct thread_info *tp;
396         int i;
397
398         for_each_possible_cpu(i) {
399                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
400                 tp = softirq_ctx[i];
401                 tp->cpu = i;
402                 tp->preempt_count = 0;
403
404                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
405                 tp = hardirq_ctx[i];
406                 tp->cpu = i;
407                 tp->preempt_count = HARDIRQ_OFFSET;
408         }
409 }
410
411 static inline void do_softirq_onstack(void)
412 {
413         struct thread_info *curtp, *irqtp;
414         unsigned long saved_sp_limit = current->thread.ksp_limit;
415
416         curtp = current_thread_info();
417         irqtp = softirq_ctx[smp_processor_id()];
418         irqtp->task = curtp->task;
419         current->thread.ksp_limit = (unsigned long)irqtp +
420                                     _ALIGN_UP(sizeof(struct thread_info), 16);
421         call_do_softirq(irqtp);
422         current->thread.ksp_limit = saved_sp_limit;
423         irqtp->task = NULL;
424 }
425
426 #else
427 #define do_softirq_onstack()    __do_softirq()
428 #endif /* CONFIG_IRQSTACKS */
429
430 void do_softirq(void)
431 {
432         unsigned long flags;
433
434         if (in_interrupt())
435                 return;
436
437         local_irq_save(flags);
438
439         if (local_softirq_pending())
440                 do_softirq_onstack();
441
442         local_irq_restore(flags);
443 }
444
445
446 /*
447  * IRQ controller and virtual interrupts
448  */
449
450 static LIST_HEAD(irq_hosts);
451 static DEFINE_SPINLOCK(irq_big_lock);
452 static unsigned int revmap_trees_allocated;
453 static DEFINE_MUTEX(revmap_trees_mutex);
454 struct irq_map_entry irq_map[NR_IRQS];
455 static unsigned int irq_virq_count = NR_IRQS;
456 static struct irq_host *irq_default_host;
457
458 irq_hw_number_t virq_to_hw(unsigned int virq)
459 {
460         return irq_map[virq].hwirq;
461 }
462 EXPORT_SYMBOL_GPL(virq_to_hw);
463
464 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
465 {
466         return h->of_node != NULL && h->of_node == np;
467 }
468
469 struct irq_host *irq_alloc_host(struct device_node *of_node,
470                                 unsigned int revmap_type,
471                                 unsigned int revmap_arg,
472                                 struct irq_host_ops *ops,
473                                 irq_hw_number_t inval_irq)
474 {
475         struct irq_host *host;
476         unsigned int size = sizeof(struct irq_host);
477         unsigned int i;
478         unsigned int *rmap;
479         unsigned long flags;
480
481         /* Allocate structure and revmap table if using linear mapping */
482         if (revmap_type == IRQ_HOST_MAP_LINEAR)
483                 size += revmap_arg * sizeof(unsigned int);
484         host = zalloc_maybe_bootmem(size, GFP_KERNEL);
485         if (host == NULL)
486                 return NULL;
487
488         /* Fill structure */
489         host->revmap_type = revmap_type;
490         host->inval_irq = inval_irq;
491         host->ops = ops;
492         host->of_node = of_node_get(of_node);
493
494         if (host->ops->match == NULL)
495                 host->ops->match = default_irq_host_match;
496
497         spin_lock_irqsave(&irq_big_lock, flags);
498
499         /* If it's a legacy controller, check for duplicates and
500          * mark it as allocated (we use irq 0 host pointer for that
501          */
502         if (revmap_type == IRQ_HOST_MAP_LEGACY) {
503                 if (irq_map[0].host != NULL) {
504                         spin_unlock_irqrestore(&irq_big_lock, flags);
505                         /* If we are early boot, we can't free the structure,
506                          * too bad...
507                          * this will be fixed once slab is made available early
508                          * instead of the current cruft
509                          */
510                         if (mem_init_done)
511                                 kfree(host);
512                         return NULL;
513                 }
514                 irq_map[0].host = host;
515         }
516
517         list_add(&host->link, &irq_hosts);
518         spin_unlock_irqrestore(&irq_big_lock, flags);
519
520         /* Additional setups per revmap type */
521         switch(revmap_type) {
522         case IRQ_HOST_MAP_LEGACY:
523                 /* 0 is always the invalid number for legacy */
524                 host->inval_irq = 0;
525                 /* setup us as the host for all legacy interrupts */
526                 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
527                         irq_map[i].hwirq = i;
528                         smp_wmb();
529                         irq_map[i].host = host;
530                         smp_wmb();
531
532                         /* Clear norequest flags */
533                         get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
534
535                         /* Legacy flags are left to default at this point,
536                          * one can then use irq_create_mapping() to
537                          * explicitly change them
538                          */
539                         ops->map(host, i, i);
540                 }
541                 break;
542         case IRQ_HOST_MAP_LINEAR:
543                 rmap = (unsigned int *)(host + 1);
544                 for (i = 0; i < revmap_arg; i++)
545                         rmap[i] = NO_IRQ;
546                 host->revmap_data.linear.size = revmap_arg;
547                 smp_wmb();
548                 host->revmap_data.linear.revmap = rmap;
549                 break;
550         default:
551                 break;
552         }
553
554         pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
555
556         return host;
557 }
558
559 struct irq_host *irq_find_host(struct device_node *node)
560 {
561         struct irq_host *h, *found = NULL;
562         unsigned long flags;
563
564         /* We might want to match the legacy controller last since
565          * it might potentially be set to match all interrupts in
566          * the absence of a device node. This isn't a problem so far
567          * yet though...
568          */
569         spin_lock_irqsave(&irq_big_lock, flags);
570         list_for_each_entry(h, &irq_hosts, link)
571                 if (h->ops->match(h, node)) {
572                         found = h;
573                         break;
574                 }
575         spin_unlock_irqrestore(&irq_big_lock, flags);
576         return found;
577 }
578 EXPORT_SYMBOL_GPL(irq_find_host);
579
580 void irq_set_default_host(struct irq_host *host)
581 {
582         pr_debug("irq: Default host set to @0x%p\n", host);
583
584         irq_default_host = host;
585 }
586
587 void irq_set_virq_count(unsigned int count)
588 {
589         pr_debug("irq: Trying to set virq count to %d\n", count);
590
591         BUG_ON(count < NUM_ISA_INTERRUPTS);
592         if (count < NR_IRQS)
593                 irq_virq_count = count;
594 }
595
596 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
597                             irq_hw_number_t hwirq)
598 {
599         /* Clear IRQ_NOREQUEST flag */
600         get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
601
602         /* map it */
603         smp_wmb();
604         irq_map[virq].hwirq = hwirq;
605         smp_mb();
606
607         if (host->ops->map(host, virq, hwirq)) {
608                 pr_debug("irq: -> mapping failed, freeing\n");
609                 irq_free_virt(virq, 1);
610                 return -1;
611         }
612
613         return 0;
614 }
615
616 unsigned int irq_create_direct_mapping(struct irq_host *host)
617 {
618         unsigned int virq;
619
620         if (host == NULL)
621                 host = irq_default_host;
622
623         BUG_ON(host == NULL);
624         WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
625
626         virq = irq_alloc_virt(host, 1, 0);
627         if (virq == NO_IRQ) {
628                 pr_debug("irq: create_direct virq allocation failed\n");
629                 return NO_IRQ;
630         }
631
632         pr_debug("irq: create_direct obtained virq %d\n", virq);
633
634         if (irq_setup_virq(host, virq, virq))
635                 return NO_IRQ;
636
637         return virq;
638 }
639
640 unsigned int irq_create_mapping(struct irq_host *host,
641                                 irq_hw_number_t hwirq)
642 {
643         unsigned int virq, hint;
644
645         pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
646
647         /* Look for default host if nececssary */
648         if (host == NULL)
649                 host = irq_default_host;
650         if (host == NULL) {
651                 printk(KERN_WARNING "irq_create_mapping called for"
652                        " NULL host, hwirq=%lx\n", hwirq);
653                 WARN_ON(1);
654                 return NO_IRQ;
655         }
656         pr_debug("irq: -> using host @%p\n", host);
657
658         /* Check if mapping already exist, if it does, call
659          * host->ops->map() to update the flags
660          */
661         virq = irq_find_mapping(host, hwirq);
662         if (virq != NO_IRQ) {
663                 if (host->ops->remap)
664                         host->ops->remap(host, virq, hwirq);
665                 pr_debug("irq: -> existing mapping on virq %d\n", virq);
666                 return virq;
667         }
668
669         /* Get a virtual interrupt number */
670         if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
671                 /* Handle legacy */
672                 virq = (unsigned int)hwirq;
673                 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
674                         return NO_IRQ;
675                 return virq;
676         } else {
677                 /* Allocate a virtual interrupt number */
678                 hint = hwirq % irq_virq_count;
679                 virq = irq_alloc_virt(host, 1, hint);
680                 if (virq == NO_IRQ) {
681                         pr_debug("irq: -> virq allocation failed\n");
682                         return NO_IRQ;
683                 }
684         }
685         pr_debug("irq: -> obtained virq %d\n", virq);
686
687         if (irq_setup_virq(host, virq, hwirq))
688                 return NO_IRQ;
689
690         return virq;
691 }
692 EXPORT_SYMBOL_GPL(irq_create_mapping);
693
694 unsigned int irq_create_of_mapping(struct device_node *controller,
695                                    u32 *intspec, unsigned int intsize)
696 {
697         struct irq_host *host;
698         irq_hw_number_t hwirq;
699         unsigned int type = IRQ_TYPE_NONE;
700         unsigned int virq;
701
702         if (controller == NULL)
703                 host = irq_default_host;
704         else
705                 host = irq_find_host(controller);
706         if (host == NULL) {
707                 printk(KERN_WARNING "irq: no irq host found for %s !\n",
708                        controller->full_name);
709                 return NO_IRQ;
710         }
711
712         /* If host has no translation, then we assume interrupt line */
713         if (host->ops->xlate == NULL)
714                 hwirq = intspec[0];
715         else {
716                 if (host->ops->xlate(host, controller, intspec, intsize,
717                                      &hwirq, &type))
718                         return NO_IRQ;
719         }
720
721         /* Create mapping */
722         virq = irq_create_mapping(host, hwirq);
723         if (virq == NO_IRQ)
724                 return virq;
725
726         /* Set type if specified and different than the current one */
727         if (type != IRQ_TYPE_NONE &&
728             type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
729                 set_irq_type(virq, type);
730         return virq;
731 }
732 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
733
734 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
735 {
736         struct of_irq oirq;
737
738         if (of_irq_map_one(dev, index, &oirq))
739                 return NO_IRQ;
740
741         return irq_create_of_mapping(oirq.controller, oirq.specifier,
742                                      oirq.size);
743 }
744 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
745
746 void irq_dispose_mapping(unsigned int virq)
747 {
748         struct irq_host *host;
749         irq_hw_number_t hwirq;
750
751         if (virq == NO_IRQ)
752                 return;
753
754         host = irq_map[virq].host;
755         WARN_ON (host == NULL);
756         if (host == NULL)
757                 return;
758
759         /* Never unmap legacy interrupts */
760         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
761                 return;
762
763         /* remove chip and handler */
764         set_irq_chip_and_handler(virq, NULL, NULL);
765
766         /* Make sure it's completed */
767         synchronize_irq(virq);
768
769         /* Tell the PIC about it */
770         if (host->ops->unmap)
771                 host->ops->unmap(host, virq);
772         smp_mb();
773
774         /* Clear reverse map */
775         hwirq = irq_map[virq].hwirq;
776         switch(host->revmap_type) {
777         case IRQ_HOST_MAP_LINEAR:
778                 if (hwirq < host->revmap_data.linear.size)
779                         host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
780                 break;
781         case IRQ_HOST_MAP_TREE:
782                 /*
783                  * Check if radix tree allocated yet, if not then nothing to
784                  * remove.
785                  */
786                 smp_rmb();
787                 if (revmap_trees_allocated < 1)
788                         break;
789                 mutex_lock(&revmap_trees_mutex);
790                 radix_tree_delete(&host->revmap_data.tree, hwirq);
791                 mutex_unlock(&revmap_trees_mutex);
792                 break;
793         }
794
795         /* Destroy map */
796         smp_mb();
797         irq_map[virq].hwirq = host->inval_irq;
798
799         /* Set some flags */
800         get_irq_desc(virq)->status |= IRQ_NOREQUEST;
801
802         /* Free it */
803         irq_free_virt(virq, 1);
804 }
805 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
806
807 unsigned int irq_find_mapping(struct irq_host *host,
808                               irq_hw_number_t hwirq)
809 {
810         unsigned int i;
811         unsigned int hint = hwirq % irq_virq_count;
812
813         /* Look for default host if nececssary */
814         if (host == NULL)
815                 host = irq_default_host;
816         if (host == NULL)
817                 return NO_IRQ;
818
819         /* legacy -> bail early */
820         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
821                 return hwirq;
822
823         /* Slow path does a linear search of the map */
824         if (hint < NUM_ISA_INTERRUPTS)
825                 hint = NUM_ISA_INTERRUPTS;
826         i = hint;
827         do  {
828                 if (irq_map[i].host == host &&
829                     irq_map[i].hwirq == hwirq)
830                         return i;
831                 i++;
832                 if (i >= irq_virq_count)
833                         i = NUM_ISA_INTERRUPTS;
834         } while(i != hint);
835         return NO_IRQ;
836 }
837 EXPORT_SYMBOL_GPL(irq_find_mapping);
838
839
840 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
841                                      irq_hw_number_t hwirq)
842 {
843         struct irq_map_entry *ptr;
844         unsigned int virq;
845
846         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
847
848         /*
849          * Check if the radix tree exists and has bee initialized.
850          * If not, we fallback to slow mode
851          */
852         if (revmap_trees_allocated < 2)
853                 return irq_find_mapping(host, hwirq);
854
855         /* Now try to resolve */
856         /*
857          * No rcu_read_lock(ing) needed, the ptr returned can't go under us
858          * as it's referencing an entry in the static irq_map table.
859          */
860         ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
861
862         /*
863          * If found in radix tree, then fine.
864          * Else fallback to linear lookup - this should not happen in practice
865          * as it means that we failed to insert the node in the radix tree.
866          */
867         if (ptr)
868                 virq = ptr - irq_map;
869         else
870                 virq = irq_find_mapping(host, hwirq);
871
872         return virq;
873 }
874
875 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
876                              irq_hw_number_t hwirq)
877 {
878
879         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
880
881         /*
882          * Check if the radix tree exists yet.
883          * If not, then the irq will be inserted into the tree when it gets
884          * initialized.
885          */
886         smp_rmb();
887         if (revmap_trees_allocated < 1)
888                 return;
889
890         if (virq != NO_IRQ) {
891                 mutex_lock(&revmap_trees_mutex);
892                 radix_tree_insert(&host->revmap_data.tree, hwirq,
893                                   &irq_map[virq]);
894                 mutex_unlock(&revmap_trees_mutex);
895         }
896 }
897
898 unsigned int irq_linear_revmap(struct irq_host *host,
899                                irq_hw_number_t hwirq)
900 {
901         unsigned int *revmap;
902
903         WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
904
905         /* Check revmap bounds */
906         if (unlikely(hwirq >= host->revmap_data.linear.size))
907                 return irq_find_mapping(host, hwirq);
908
909         /* Check if revmap was allocated */
910         revmap = host->revmap_data.linear.revmap;
911         if (unlikely(revmap == NULL))
912                 return irq_find_mapping(host, hwirq);
913
914         /* Fill up revmap with slow path if no mapping found */
915         if (unlikely(revmap[hwirq] == NO_IRQ))
916                 revmap[hwirq] = irq_find_mapping(host, hwirq);
917
918         return revmap[hwirq];
919 }
920
921 unsigned int irq_alloc_virt(struct irq_host *host,
922                             unsigned int count,
923                             unsigned int hint)
924 {
925         unsigned long flags;
926         unsigned int i, j, found = NO_IRQ;
927
928         if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
929                 return NO_IRQ;
930
931         spin_lock_irqsave(&irq_big_lock, flags);
932
933         /* Use hint for 1 interrupt if any */
934         if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
935             hint < irq_virq_count && irq_map[hint].host == NULL) {
936                 found = hint;
937                 goto hint_found;
938         }
939
940         /* Look for count consecutive numbers in the allocatable
941          * (non-legacy) space
942          */
943         for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
944                 if (irq_map[i].host != NULL)
945                         j = 0;
946                 else
947                         j++;
948
949                 if (j == count) {
950                         found = i - count + 1;
951                         break;
952                 }
953         }
954         if (found == NO_IRQ) {
955                 spin_unlock_irqrestore(&irq_big_lock, flags);
956                 return NO_IRQ;
957         }
958  hint_found:
959         for (i = found; i < (found + count); i++) {
960                 irq_map[i].hwirq = host->inval_irq;
961                 smp_wmb();
962                 irq_map[i].host = host;
963         }
964         spin_unlock_irqrestore(&irq_big_lock, flags);
965         return found;
966 }
967
968 void irq_free_virt(unsigned int virq, unsigned int count)
969 {
970         unsigned long flags;
971         unsigned int i;
972
973         WARN_ON (virq < NUM_ISA_INTERRUPTS);
974         WARN_ON (count == 0 || (virq + count) > irq_virq_count);
975
976         spin_lock_irqsave(&irq_big_lock, flags);
977         for (i = virq; i < (virq + count); i++) {
978                 struct irq_host *host;
979
980                 if (i < NUM_ISA_INTERRUPTS ||
981                     (virq + count) > irq_virq_count)
982                         continue;
983
984                 host = irq_map[i].host;
985                 irq_map[i].hwirq = host->inval_irq;
986                 smp_wmb();
987                 irq_map[i].host = NULL;
988         }
989         spin_unlock_irqrestore(&irq_big_lock, flags);
990 }
991
992 void irq_early_init(void)
993 {
994         unsigned int i;
995
996         for (i = 0; i < NR_IRQS; i++)
997                 get_irq_desc(i)->status |= IRQ_NOREQUEST;
998 }
999
1000 /* We need to create the radix trees late */
1001 static int irq_late_init(void)
1002 {
1003         struct irq_host *h;
1004         unsigned int i;
1005
1006         /*
1007          * No mutual exclusion with respect to accessors of the tree is needed
1008          * here as the synchronization is done via the state variable
1009          * revmap_trees_allocated.
1010          */
1011         list_for_each_entry(h, &irq_hosts, link) {
1012                 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1013                         INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1014         }
1015
1016         /*
1017          * Make sure the radix trees inits are visible before setting
1018          * the flag
1019          */
1020         smp_wmb();
1021         revmap_trees_allocated = 1;
1022
1023         /*
1024          * Insert the reverse mapping for those interrupts already present
1025          * in irq_map[].
1026          */
1027         mutex_lock(&revmap_trees_mutex);
1028         for (i = 0; i < irq_virq_count; i++) {
1029                 if (irq_map[i].host &&
1030                     (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1031                         radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1032                                           irq_map[i].hwirq, &irq_map[i]);
1033         }
1034         mutex_unlock(&revmap_trees_mutex);
1035
1036         /*
1037          * Make sure the radix trees insertions are visible before setting
1038          * the flag
1039          */
1040         smp_wmb();
1041         revmap_trees_allocated = 2;
1042
1043         return 0;
1044 }
1045 arch_initcall(irq_late_init);
1046
1047 #ifdef CONFIG_VIRQ_DEBUG
1048 static int virq_debug_show(struct seq_file *m, void *private)
1049 {
1050         unsigned long flags;
1051         struct irq_desc *desc;
1052         const char *p;
1053         char none[] = "none";
1054         int i;
1055
1056         seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1057                       "chip name", "host name");
1058
1059         for (i = 1; i < NR_IRQS; i++) {
1060                 desc = get_irq_desc(i);
1061                 spin_lock_irqsave(&desc->lock, flags);
1062
1063                 if (desc->action && desc->action->handler) {
1064                         seq_printf(m, "%5d  ", i);
1065                         seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1066
1067                         if (desc->chip && desc->chip->typename)
1068                                 p = desc->chip->typename;
1069                         else
1070                                 p = none;
1071                         seq_printf(m, "%-15s  ", p);
1072
1073                         if (irq_map[i].host && irq_map[i].host->of_node)
1074                                 p = irq_map[i].host->of_node->full_name;
1075                         else
1076                                 p = none;
1077                         seq_printf(m, "%s\n", p);
1078                 }
1079
1080                 spin_unlock_irqrestore(&desc->lock, flags);
1081         }
1082
1083         return 0;
1084 }
1085
1086 static int virq_debug_open(struct inode *inode, struct file *file)
1087 {
1088         return single_open(file, virq_debug_show, inode->i_private);
1089 }
1090
1091 static const struct file_operations virq_debug_fops = {
1092         .open = virq_debug_open,
1093         .read = seq_read,
1094         .llseek = seq_lseek,
1095         .release = single_release,
1096 };
1097
1098 static int __init irq_debugfs_init(void)
1099 {
1100         if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1101                                  NULL, &virq_debug_fops) == NULL)
1102                 return -ENOMEM;
1103
1104         return 0;
1105 }
1106 __initcall(irq_debugfs_init);
1107 #endif /* CONFIG_VIRQ_DEBUG */
1108
1109 #ifdef CONFIG_PPC64
1110 static int __init setup_noirqdistrib(char *str)
1111 {
1112         distribute_irqs = 0;
1113         return 1;
1114 }
1115
1116 __setup("noirqdistrib", setup_noirqdistrib);
1117 #endif /* CONFIG_PPC64 */