2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
7 * Based on intc2.c and ipr.c
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/irq.h>
23 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/sh_intc.h>
28 #include <linux/sysdev.h>
29 #include <linux/list.h>
30 #include <linux/topology.h>
31 #include <linux/bitmap.h>
32 #include <linux/cpumask.h>
33 #include <linux/spinlock.h>
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/radix-tree.h>
37 #include <linux/mutex.h>
38 #include <linux/rcupdate.h>
39 #include <asm/sizes.h>
41 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
42 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
43 ((addr_e) << 16) | ((addr_d << 24)))
45 #define _INTC_SHIFT(h) (h & 0x1f)
46 #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
47 #define _INTC_FN(h) ((h >> 9) & 0xf)
48 #define _INTC_MODE(h) ((h >> 13) & 0x7)
49 #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
50 #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
52 struct intc_handle_int {
63 struct intc_map_entry {
65 struct intc_desc_int *desc;
68 struct intc_subgroup_entry {
74 struct intc_desc_int {
75 struct list_head list;
76 struct sys_device sysdev;
77 struct radix_tree_root tree;
86 struct intc_handle_int *prio;
88 struct intc_handle_int *sense;
89 unsigned int nr_sense;
90 struct intc_window *window;
91 unsigned int nr_windows;
95 static LIST_HEAD(intc_list);
96 static unsigned int nr_intc_controllers;
99 * The intc_irq_map provides a global map of bound IRQ vectors for a
100 * given platform. Allocation of IRQs are either static through the CPU
101 * vector map, or dynamic in the case of board mux vectors or MSI.
103 * As this is a central point for all IRQ controllers on the system,
104 * each of the available sources are mapped out here. This combined with
105 * sparseirq makes it quite trivial to keep the vector map tightly packed
106 * when dynamically creating IRQs, as well as tying in to otherwise
107 * unused irq_desc positions in the sparse array.
109 static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
110 static struct intc_map_entry intc_irq_xlate[NR_IRQS];
111 static DEFINE_SPINLOCK(vector_lock);
112 static DEFINE_SPINLOCK(xlate_lock);
115 #define IS_SMP(x) x.smp
116 #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
117 #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
120 #define INTC_REG(d, x, c) (d->reg[(x)])
121 #define SMP_NR(d, x) 1
124 static unsigned int intc_prio_level[NR_IRQS]; /* for now */
125 static unsigned int default_prio_level = 2; /* 2 - 16 */
126 static unsigned long ack_handle[NR_IRQS];
127 #ifdef CONFIG_INTC_BALANCING
128 static unsigned long dist_handle[NR_IRQS];
131 struct intc_virq_list {
133 struct intc_virq_list *next;
136 #define for_each_virq(entry, head) \
137 for (entry = head; entry; entry = entry->next)
139 static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
141 struct irq_chip *chip = get_irq_chip(irq);
143 return container_of(chip, struct intc_desc_int, chip);
146 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
148 generic_handle_irq((unsigned int)get_irq_data(irq));
151 static inline void activate_irq(int irq)
154 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
155 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
157 set_irq_flags(irq, IRQF_VALID);
159 /* same effect on other architectures */
160 set_irq_noprobe(irq);
164 static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
165 unsigned long address)
167 struct intc_window *window;
170 /* scan through physical windows and convert address */
171 for (k = 0; k < d->nr_windows; k++) {
172 window = d->window + k;
174 if (address < window->phys)
177 if (address >= (window->phys + window->size))
180 address -= window->phys;
181 address += (unsigned long)window->virt;
186 /* no windows defined, register must be 1:1 mapped virt:phys */
190 static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
194 address = intc_phys_to_virt(d, address);
196 for (k = 0; k < d->nr_reg; k++) {
197 if (d->reg[k] == address)
205 static inline unsigned int set_field(unsigned int value,
206 unsigned int field_value,
209 unsigned int width = _INTC_WIDTH(handle);
210 unsigned int shift = _INTC_SHIFT(handle);
212 value &= ~(((1 << width) - 1) << shift);
213 value |= field_value << shift;
217 static inline unsigned long get_field(unsigned int value, unsigned int handle)
219 unsigned int width = _INTC_WIDTH(handle);
220 unsigned int shift = _INTC_SHIFT(handle);
221 unsigned int mask = ((1 << width) - 1) << shift;
223 return (value & mask) >> shift;
226 static unsigned long test_8(unsigned long addr, unsigned long h,
227 unsigned long ignore)
229 return get_field(__raw_readb(addr), h);
232 static unsigned long test_16(unsigned long addr, unsigned long h,
233 unsigned long ignore)
235 return get_field(__raw_readw(addr), h);
238 static unsigned long test_32(unsigned long addr, unsigned long h,
239 unsigned long ignore)
241 return get_field(__raw_readl(addr), h);
244 static unsigned long write_8(unsigned long addr, unsigned long h,
247 __raw_writeb(set_field(0, data, h), addr);
248 (void)__raw_readb(addr); /* Defeat write posting */
252 static unsigned long write_16(unsigned long addr, unsigned long h,
255 __raw_writew(set_field(0, data, h), addr);
256 (void)__raw_readw(addr); /* Defeat write posting */
260 static unsigned long write_32(unsigned long addr, unsigned long h,
263 __raw_writel(set_field(0, data, h), addr);
264 (void)__raw_readl(addr); /* Defeat write posting */
268 static unsigned long modify_8(unsigned long addr, unsigned long h,
272 local_irq_save(flags);
273 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
274 (void)__raw_readb(addr); /* Defeat write posting */
275 local_irq_restore(flags);
279 static unsigned long modify_16(unsigned long addr, unsigned long h,
283 local_irq_save(flags);
284 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
285 (void)__raw_readw(addr); /* Defeat write posting */
286 local_irq_restore(flags);
290 static unsigned long modify_32(unsigned long addr, unsigned long h,
294 local_irq_save(flags);
295 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
296 (void)__raw_readl(addr); /* Defeat write posting */
297 local_irq_restore(flags);
303 REG_FN_TEST_BASE = 1,
304 REG_FN_WRITE_BASE = 5,
305 REG_FN_MODIFY_BASE = 9
308 static unsigned long (*intc_reg_fns[])(unsigned long addr,
310 unsigned long data) = {
311 [REG_FN_TEST_BASE + 0] = test_8,
312 [REG_FN_TEST_BASE + 1] = test_16,
313 [REG_FN_TEST_BASE + 3] = test_32,
314 [REG_FN_WRITE_BASE + 0] = write_8,
315 [REG_FN_WRITE_BASE + 1] = write_16,
316 [REG_FN_WRITE_BASE + 3] = write_32,
317 [REG_FN_MODIFY_BASE + 0] = modify_8,
318 [REG_FN_MODIFY_BASE + 1] = modify_16,
319 [REG_FN_MODIFY_BASE + 3] = modify_32,
322 enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
323 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
324 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
325 MODE_PRIO_REG, /* Priority value written to enable interrupt */
326 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
329 static unsigned long intc_mode_field(unsigned long addr,
330 unsigned long handle,
331 unsigned long (*fn)(unsigned long,
336 return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
339 static unsigned long intc_mode_zero(unsigned long addr,
340 unsigned long handle,
341 unsigned long (*fn)(unsigned long,
346 return fn(addr, handle, 0);
349 static unsigned long intc_mode_prio(unsigned long addr,
350 unsigned long handle,
351 unsigned long (*fn)(unsigned long,
356 return fn(addr, handle, intc_prio_level[irq]);
359 static unsigned long (*intc_enable_fns[])(unsigned long addr,
360 unsigned long handle,
361 unsigned long (*fn)(unsigned long,
364 unsigned int irq) = {
365 [MODE_ENABLE_REG] = intc_mode_field,
366 [MODE_MASK_REG] = intc_mode_zero,
367 [MODE_DUAL_REG] = intc_mode_field,
368 [MODE_PRIO_REG] = intc_mode_prio,
369 [MODE_PCLR_REG] = intc_mode_prio,
372 static unsigned long (*intc_disable_fns[])(unsigned long addr,
373 unsigned long handle,
374 unsigned long (*fn)(unsigned long,
377 unsigned int irq) = {
378 [MODE_ENABLE_REG] = intc_mode_zero,
379 [MODE_MASK_REG] = intc_mode_field,
380 [MODE_DUAL_REG] = intc_mode_field,
381 [MODE_PRIO_REG] = intc_mode_zero,
382 [MODE_PCLR_REG] = intc_mode_field,
385 #ifdef CONFIG_INTC_BALANCING
386 static inline void intc_balancing_enable(unsigned int irq)
388 struct intc_desc_int *d = get_intc_desc(irq);
389 unsigned long handle = dist_handle[irq];
392 if (irq_balancing_disabled(irq) || !handle)
395 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
396 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
399 static inline void intc_balancing_disable(unsigned int irq)
401 struct intc_desc_int *d = get_intc_desc(irq);
402 unsigned long handle = dist_handle[irq];
405 if (irq_balancing_disabled(irq) || !handle)
408 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
409 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
412 static unsigned int intc_dist_data(struct intc_desc *desc,
413 struct intc_desc_int *d,
416 struct intc_mask_reg *mr = desc->hw.mask_regs;
417 unsigned int i, j, fn, mode;
418 unsigned long reg_e, reg_d;
420 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
421 mr = desc->hw.mask_regs + i;
424 * Skip this entry if there's no auto-distribution
425 * register associated with it.
430 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
431 if (mr->enum_ids[j] != enum_id)
434 fn = REG_FN_MODIFY_BASE;
435 mode = MODE_ENABLE_REG;
436 reg_e = mr->dist_reg;
437 reg_d = mr->dist_reg;
439 fn += (mr->reg_width >> 3) - 1;
440 return _INTC_MK(fn, mode,
441 intc_get_reg(d, reg_e),
442 intc_get_reg(d, reg_d),
444 (mr->reg_width - 1) - j);
449 * It's possible we've gotten here with no distribution options
450 * available for the IRQ in question, so we just skip over those.
455 static inline void intc_balancing_enable(unsigned int irq)
459 static inline void intc_balancing_disable(unsigned int irq)
464 static inline void _intc_enable(unsigned int irq, unsigned long handle)
466 struct intc_desc_int *d = get_intc_desc(irq);
470 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
472 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
475 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
476 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
477 [_INTC_FN(handle)], irq);
480 intc_balancing_enable(irq);
483 static void intc_enable(unsigned int irq)
485 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
488 static void intc_disable(unsigned int irq)
490 struct intc_desc_int *d = get_intc_desc(irq);
491 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
495 intc_balancing_disable(irq);
497 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
499 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
502 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
503 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
504 [_INTC_FN(handle)], irq);
509 (*intc_enable_noprio_fns[])(unsigned long addr,
510 unsigned long handle,
511 unsigned long (*fn)(unsigned long,
514 unsigned int irq) = {
515 [MODE_ENABLE_REG] = intc_mode_field,
516 [MODE_MASK_REG] = intc_mode_zero,
517 [MODE_DUAL_REG] = intc_mode_field,
518 [MODE_PRIO_REG] = intc_mode_field,
519 [MODE_PCLR_REG] = intc_mode_field,
522 static void intc_enable_disable(struct intc_desc_int *d,
523 unsigned long handle, int do_enable)
527 unsigned long (*fn)(unsigned long, unsigned long,
528 unsigned long (*)(unsigned long, unsigned long,
533 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
534 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
535 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
536 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
539 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
540 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
541 fn = intc_disable_fns[_INTC_MODE(handle)];
542 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
547 static int intc_set_wake(unsigned int irq, unsigned int on)
549 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
554 * This is held with the irq desc lock held, so we don't require any
555 * additional locking here at the intc desc level. The affinity mask is
556 * later tested in the enable/disable paths.
558 static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
560 if (!cpumask_intersects(cpumask, cpu_online_mask))
563 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
569 static void intc_mask_ack(unsigned int irq)
571 struct intc_desc_int *d = get_intc_desc(irq);
572 unsigned long handle = ack_handle[irq];
577 /* read register and write zero only to the associated bit */
579 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
580 switch (_INTC_FN(handle)) {
581 case REG_FN_MODIFY_BASE + 0: /* 8bit */
583 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
585 case REG_FN_MODIFY_BASE + 1: /* 16bit */
587 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
589 case REG_FN_MODIFY_BASE + 3: /* 32bit */
591 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
600 static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
607 * this doesn't scale well, but...
609 * this function should only be used for cerain uncommon
610 * operations such as intc_set_priority() and intc_set_sense()
611 * and in those rare cases performance doesn't matter that much.
612 * keeping the memory footprint low is more important.
614 * one rather simple way to speed this up and still keep the
615 * memory footprint down is to make sure the array is sorted
616 * and then perform a bisect to lookup the irq.
618 for (i = 0; i < nr_hp; i++) {
619 if ((hp + i)->irq != irq)
628 int intc_set_priority(unsigned int irq, unsigned int prio)
630 struct intc_desc_int *d = get_intc_desc(irq);
631 struct intc_handle_int *ihp;
633 if (!intc_prio_level[irq] || prio <= 1)
636 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
638 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
641 intc_prio_level[irq] = prio;
644 * only set secondary masking method directly
645 * primary masking method is using intc_prio_level[irq]
646 * priority level will be set during next enable()
648 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
649 _intc_enable(irq, ihp->handle);
654 #define VALID(x) (x | 0x80)
656 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
657 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
658 [IRQ_TYPE_EDGE_RISING] = VALID(1),
659 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
660 /* SH7706, SH7707 and SH7709 do not support high level triggered */
661 #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
662 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
663 !defined(CONFIG_CPU_SUBTYPE_SH7709)
664 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
668 static int intc_set_sense(unsigned int irq, unsigned int type)
670 struct intc_desc_int *d = get_intc_desc(irq);
671 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
672 struct intc_handle_int *ihp;
678 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
680 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
681 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
686 static intc_enum __init intc_grp_id(struct intc_desc *desc,
689 struct intc_group *g = desc->hw.groups;
692 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
693 g = desc->hw.groups + i;
695 for (j = 0; g->enum_ids[j]; j++) {
696 if (g->enum_ids[j] != enum_id)
706 static unsigned int __init _intc_mask_data(struct intc_desc *desc,
707 struct intc_desc_int *d,
709 unsigned int *reg_idx,
710 unsigned int *fld_idx)
712 struct intc_mask_reg *mr = desc->hw.mask_regs;
713 unsigned int fn, mode;
714 unsigned long reg_e, reg_d;
716 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
717 mr = desc->hw.mask_regs + *reg_idx;
719 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
720 if (mr->enum_ids[*fld_idx] != enum_id)
723 if (mr->set_reg && mr->clr_reg) {
724 fn = REG_FN_WRITE_BASE;
725 mode = MODE_DUAL_REG;
729 fn = REG_FN_MODIFY_BASE;
731 mode = MODE_ENABLE_REG;
735 mode = MODE_MASK_REG;
741 fn += (mr->reg_width >> 3) - 1;
742 return _INTC_MK(fn, mode,
743 intc_get_reg(d, reg_e),
744 intc_get_reg(d, reg_d),
746 (mr->reg_width - 1) - *fld_idx);
756 static unsigned int __init intc_mask_data(struct intc_desc *desc,
757 struct intc_desc_int *d,
758 intc_enum enum_id, int do_grps)
764 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
769 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
774 static unsigned int __init _intc_prio_data(struct intc_desc *desc,
775 struct intc_desc_int *d,
777 unsigned int *reg_idx,
778 unsigned int *fld_idx)
780 struct intc_prio_reg *pr = desc->hw.prio_regs;
781 unsigned int fn, n, mode, bit;
782 unsigned long reg_e, reg_d;
784 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
785 pr = desc->hw.prio_regs + *reg_idx;
787 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
788 if (pr->enum_ids[*fld_idx] != enum_id)
791 if (pr->set_reg && pr->clr_reg) {
792 fn = REG_FN_WRITE_BASE;
793 mode = MODE_PCLR_REG;
797 fn = REG_FN_MODIFY_BASE;
798 mode = MODE_PRIO_REG;
805 fn += (pr->reg_width >> 3) - 1;
808 BUG_ON(n * pr->field_width > pr->reg_width);
810 bit = pr->reg_width - (n * pr->field_width);
812 return _INTC_MK(fn, mode,
813 intc_get_reg(d, reg_e),
814 intc_get_reg(d, reg_d),
815 pr->field_width, bit);
825 static unsigned int __init intc_prio_data(struct intc_desc *desc,
826 struct intc_desc_int *d,
827 intc_enum enum_id, int do_grps)
833 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
838 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
843 static void __init intc_enable_disable_enum(struct intc_desc *desc,
844 struct intc_desc_int *d,
845 intc_enum enum_id, int enable)
847 unsigned int i, j, data;
849 /* go through and enable/disable all mask bits */
852 data = _intc_mask_data(desc, d, enum_id, &i, &j);
854 intc_enable_disable(d, data, enable);
858 /* go through and enable/disable all priority fields */
861 data = _intc_prio_data(desc, d, enum_id, &i, &j);
863 intc_enable_disable(d, data, enable);
869 static unsigned int __init intc_ack_data(struct intc_desc *desc,
870 struct intc_desc_int *d,
873 struct intc_mask_reg *mr = desc->hw.ack_regs;
874 unsigned int i, j, fn, mode;
875 unsigned long reg_e, reg_d;
877 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
878 mr = desc->hw.ack_regs + i;
880 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
881 if (mr->enum_ids[j] != enum_id)
884 fn = REG_FN_MODIFY_BASE;
885 mode = MODE_ENABLE_REG;
889 fn += (mr->reg_width >> 3) - 1;
890 return _INTC_MK(fn, mode,
891 intc_get_reg(d, reg_e),
892 intc_get_reg(d, reg_d),
894 (mr->reg_width - 1) - j);
901 static unsigned int __init intc_sense_data(struct intc_desc *desc,
902 struct intc_desc_int *d,
905 struct intc_sense_reg *sr = desc->hw.sense_regs;
906 unsigned int i, j, fn, bit;
908 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
909 sr = desc->hw.sense_regs + i;
911 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
912 if (sr->enum_ids[j] != enum_id)
915 fn = REG_FN_MODIFY_BASE;
916 fn += (sr->reg_width >> 3) - 1;
918 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
920 bit = sr->reg_width - ((j + 1) * sr->field_width);
922 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
923 0, sr->field_width, bit);
930 #define INTC_TAG_VIRQ_NEEDS_ALLOC 0
932 int intc_irq_lookup(const char *chipname, intc_enum enum_id)
934 struct intc_map_entry *ptr;
935 struct intc_desc_int *d;
938 list_for_each_entry(d, &intc_list, list) {
941 if (strcmp(d->chip.name, chipname) != 0)
945 * Catch early lookups for subgroup VIRQs that have not
946 * yet been allocated an IRQ. This already includes a
947 * fast-path out if the tree is untagged, so there is no
948 * need to explicitly test the root tree.
950 tagged = radix_tree_tag_get(&d->tree, enum_id,
951 INTC_TAG_VIRQ_NEEDS_ALLOC);
952 if (unlikely(tagged))
955 ptr = radix_tree_lookup(&d->tree, enum_id);
957 irq = ptr - intc_irq_xlate;
964 EXPORT_SYMBOL_GPL(intc_irq_lookup);
966 static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
968 struct intc_virq_list **last, *entry;
969 struct irq_desc *desc = irq_to_desc(irq);
971 /* scan for duplicates */
972 last = (struct intc_virq_list **)&desc->handler_data;
973 for_each_virq(entry, desc->handler_data) {
974 if (entry->irq == virq)
979 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
981 pr_err("can't allocate VIRQ mapping for %d\n", virq);
992 static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
994 struct intc_virq_list *entry, *vlist = get_irq_data(irq);
995 struct intc_desc_int *d = get_intc_desc(irq);
997 desc->chip->mask_ack(irq);
999 for_each_virq(entry, vlist) {
1000 unsigned long addr, handle;
1002 handle = (unsigned long)get_irq_data(entry->irq);
1003 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
1005 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
1006 generic_handle_irq(entry->irq);
1009 desc->chip->unmask(irq);
1012 static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
1013 struct intc_desc_int *d,
1016 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
1018 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
1019 0, 1, (subgroup->reg_width - 1) - index);
1022 static void __init intc_subgroup_init_one(struct intc_desc *desc,
1023 struct intc_desc_int *d,
1024 struct intc_subgroup *subgroup)
1026 struct intc_map_entry *mapped;
1028 unsigned long flags;
1031 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
1037 pirq = mapped - intc_irq_xlate;
1039 spin_lock_irqsave(&d->lock, flags);
1041 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
1042 struct intc_subgroup_entry *entry;
1045 if (!subgroup->enum_ids[i])
1048 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
1053 entry->enum_id = subgroup->enum_ids[i];
1054 entry->handle = intc_subgroup_data(subgroup, d, i);
1056 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
1057 if (unlikely(err < 0))
1060 radix_tree_tag_set(&d->tree, entry->enum_id,
1061 INTC_TAG_VIRQ_NEEDS_ALLOC);
1064 spin_unlock_irqrestore(&d->lock, flags);
1067 static void __init intc_subgroup_init(struct intc_desc *desc,
1068 struct intc_desc_int *d)
1072 if (!desc->hw.subgroups)
1075 for (i = 0; i < desc->hw.nr_subgroups; i++)
1076 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
1079 static void __init intc_subgroup_map(struct intc_desc_int *d)
1081 struct intc_subgroup_entry *entries[32];
1082 unsigned long flags;
1083 unsigned int nr_found;
1086 spin_lock_irqsave(&d->lock, flags);
1089 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
1090 (void ***)entries, 0, ARRAY_SIZE(entries),
1091 INTC_TAG_VIRQ_NEEDS_ALLOC);
1093 for (i = 0; i < nr_found; i++) {
1094 struct intc_subgroup_entry *entry;
1097 entry = radix_tree_deref_slot((void **)entries[i]);
1098 if (unlikely(!entry))
1100 if (unlikely(entry == RADIX_TREE_RETRY))
1104 if (unlikely(irq < 0)) {
1105 pr_err("no more free IRQs, bailing..\n");
1109 pr_info("Setting up a chained VIRQ from %d -> %d\n",
1112 spin_lock(&xlate_lock);
1113 intc_irq_xlate[irq].desc = d;
1114 intc_irq_xlate[irq].enum_id = entry->enum_id;
1115 spin_unlock(&xlate_lock);
1117 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
1118 handle_simple_irq, "virq");
1119 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
1121 set_irq_data(irq, (void *)entry->handle);
1123 set_irq_chained_handler(entry->pirq, intc_virq_handler);
1124 add_virq_to_pirq(entry->pirq, irq);
1126 radix_tree_tag_clear(&d->tree, entry->enum_id,
1127 INTC_TAG_VIRQ_NEEDS_ALLOC);
1128 radix_tree_replace_slot((void **)entries[i],
1129 &intc_irq_xlate[irq]);
1132 spin_unlock_irqrestore(&d->lock, flags);
1135 void __init intc_finalize(void)
1137 struct intc_desc_int *d;
1139 list_for_each_entry(d, &intc_list, list)
1140 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
1141 intc_subgroup_map(d);
1144 static void __init intc_register_irq(struct intc_desc *desc,
1145 struct intc_desc_int *d,
1149 struct intc_handle_int *hp;
1150 unsigned int data[2], primary;
1151 unsigned long flags;
1154 * Register the IRQ position with the global IRQ map, then insert
1155 * it in to the radix tree.
1157 set_bit(irq, intc_irq_map);
1159 spin_lock_irqsave(&xlate_lock, flags);
1160 radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]);
1161 spin_unlock_irqrestore(&xlate_lock, flags);
1164 * Prefer single interrupt source bitmap over other combinations:
1166 * 1. bitmap, single interrupt source
1167 * 2. priority, single interrupt source
1168 * 3. bitmap, multiple interrupt sources (groups)
1169 * 4. priority, multiple interrupt sources (groups)
1171 data[0] = intc_mask_data(desc, d, enum_id, 0);
1172 data[1] = intc_prio_data(desc, d, enum_id, 0);
1175 if (!data[0] && data[1])
1178 if (!data[0] && !data[1])
1179 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
1182 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
1183 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
1188 BUG_ON(!data[primary]); /* must have primary masking method */
1190 disable_irq_nosync(irq);
1191 set_irq_chip_and_handler_name(irq, &d->chip,
1192 handle_level_irq, "level");
1193 set_irq_chip_data(irq, (void *)data[primary]);
1196 * set priority level
1197 * - this needs to be at least 2 for 5-bit priorities on 7780
1199 intc_prio_level[irq] = default_prio_level;
1201 /* enable secondary masking method if present */
1203 _intc_enable(irq, data[!primary]);
1205 /* add irq to d->prio list if priority is available */
1207 hp = d->prio + d->nr_prio;
1209 hp->handle = data[1];
1213 * only secondary priority should access registers, so
1214 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
1216 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
1217 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
1222 /* add irq to d->sense list if sense is available */
1223 data[0] = intc_sense_data(desc, d, enum_id);
1225 (d->sense + d->nr_sense)->irq = irq;
1226 (d->sense + d->nr_sense)->handle = data[0];
1230 /* irq should be disabled by default */
1233 if (desc->hw.ack_regs)
1234 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
1236 #ifdef CONFIG_INTC_BALANCING
1237 if (desc->hw.mask_regs)
1238 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
1244 static unsigned int __init save_reg(struct intc_desc_int *d,
1246 unsigned long value,
1250 value = intc_phys_to_virt(d, value);
1252 d->reg[cnt] = value;
1262 int __init register_intc_controller(struct intc_desc *desc)
1264 unsigned int i, k, smp;
1265 struct intc_hw_desc *hw = &desc->hw;
1266 struct intc_desc_int *d;
1267 struct resource *res;
1269 pr_info("Registered controller '%s' with %u IRQs\n",
1270 desc->name, hw->nr_vectors);
1272 d = kzalloc(sizeof(*d), GFP_NOWAIT);
1276 INIT_LIST_HEAD(&d->list);
1277 list_add_tail(&d->list, &intc_list);
1279 spin_lock_init(&d->lock);
1281 d->index = nr_intc_controllers;
1283 if (desc->num_resources) {
1284 d->nr_windows = desc->num_resources;
1285 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
1290 for (k = 0; k < d->nr_windows; k++) {
1291 res = desc->resource + k;
1292 WARN_ON(resource_type(res) != IORESOURCE_MEM);
1293 d->window[k].phys = res->start;
1294 d->window[k].size = resource_size(res);
1295 d->window[k].virt = ioremap_nocache(res->start,
1296 resource_size(res));
1297 if (!d->window[k].virt)
1302 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
1303 #ifdef CONFIG_INTC_BALANCING
1305 d->nr_reg += hw->nr_mask_regs;
1307 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
1308 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
1309 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
1310 d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
1312 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
1317 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
1323 if (hw->mask_regs) {
1324 for (i = 0; i < hw->nr_mask_regs; i++) {
1325 smp = IS_SMP(hw->mask_regs[i]);
1326 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
1327 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
1328 #ifdef CONFIG_INTC_BALANCING
1329 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1334 if (hw->prio_regs) {
1335 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
1340 for (i = 0; i < hw->nr_prio_regs; i++) {
1341 smp = IS_SMP(hw->prio_regs[i]);
1342 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
1343 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
1347 if (hw->sense_regs) {
1348 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
1353 for (i = 0; i < hw->nr_sense_regs; i++)
1354 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
1358 for (i = 0; i < hw->nr_subgroups; i++)
1359 if (hw->subgroups[i].reg)
1360 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
1362 d->chip.name = desc->name;
1363 d->chip.mask = intc_disable;
1364 d->chip.unmask = intc_enable;
1365 d->chip.mask_ack = intc_disable;
1366 d->chip.enable = intc_enable;
1367 d->chip.disable = intc_disable;
1368 d->chip.shutdown = intc_disable;
1369 d->chip.set_type = intc_set_sense;
1370 d->chip.set_wake = intc_set_wake;
1372 d->chip.set_affinity = intc_set_affinity;
1376 for (i = 0; i < hw->nr_ack_regs; i++)
1377 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
1379 d->chip.mask_ack = intc_mask_ack;
1382 /* disable bits matching force_disable before registering irqs */
1383 if (desc->force_disable)
1384 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
1386 /* disable bits matching force_enable before registering irqs */
1387 if (desc->force_enable)
1388 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
1390 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1392 /* register the vectors one by one */
1393 for (i = 0; i < hw->nr_vectors; i++) {
1394 struct intc_vect *vect = hw->vectors + i;
1395 unsigned int irq = evt2irq(vect->vect);
1396 unsigned long flags;
1397 struct irq_desc *irq_desc;
1402 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
1403 if (unlikely(!irq_desc)) {
1404 pr_err("can't get irq_desc for %d\n", irq);
1408 spin_lock_irqsave(&xlate_lock, flags);
1409 intc_irq_xlate[irq].enum_id = vect->enum_id;
1410 intc_irq_xlate[irq].desc = d;
1411 spin_unlock_irqrestore(&xlate_lock, flags);
1413 intc_register_irq(desc, d, vect->enum_id, irq);
1415 for (k = i + 1; k < hw->nr_vectors; k++) {
1416 struct intc_vect *vect2 = hw->vectors + k;
1417 unsigned int irq2 = evt2irq(vect2->vect);
1419 if (vect->enum_id != vect2->enum_id)
1423 * In the case of multi-evt handling and sparse
1424 * IRQ support, each vector still needs to have
1425 * its own backing irq_desc.
1427 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1428 if (unlikely(!irq_desc)) {
1429 pr_err("can't get irq_desc for %d\n", irq2);
1435 /* redirect this interrupts to the first one */
1436 set_irq_chip(irq2, &dummy_irq_chip);
1437 set_irq_chained_handler(irq2, intc_redirect_irq);
1438 set_irq_data(irq2, (void *)irq);
1442 intc_subgroup_init(desc, d);
1444 /* enable bits matching force_enable after registering irqs */
1445 if (desc->force_enable)
1446 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1448 nr_intc_controllers++;
1460 for (k = 0; k < d->nr_windows; k++)
1461 if (d->window[k].virt)
1462 iounmap(d->window[k].virt);
1468 pr_err("unable to allocate INTC memory\n");
1473 #ifdef CONFIG_INTC_USERIMASK
1474 static void __iomem *uimask;
1476 int register_intc_userimask(unsigned long addr)
1478 if (unlikely(uimask))
1481 uimask = ioremap_nocache(addr, SZ_4K);
1482 if (unlikely(!uimask))
1485 pr_info("userimask support registered for levels 0 -> %d\n",
1486 default_prio_level - 1);
1492 show_intc_userimask(struct sysdev_class *cls,
1493 struct sysdev_class_attribute *attr, char *buf)
1495 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1499 store_intc_userimask(struct sysdev_class *cls,
1500 struct sysdev_class_attribute *attr,
1501 const char *buf, size_t count)
1503 unsigned long level;
1505 level = simple_strtoul(buf, NULL, 10);
1508 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1509 * these are chomped so as to not interfere with normal IRQs.
1511 * Level 1 is a special case on some CPUs in that it's not
1512 * directly settable, but given that USERIMASK cuts off below a
1513 * certain level, we don't care about this limitation here.
1514 * Level 0 on the other hand equates to user masking disabled.
1516 * We use default_prio_level as a cut off so that only special
1517 * case opt-in IRQs can be mangled.
1519 if (level >= default_prio_level)
1522 __raw_writel(0xa5 << 24 | level << 4, uimask);
1527 static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1528 show_intc_userimask, store_intc_userimask);
1531 #ifdef CONFIG_INTC_MAPPING_DEBUG
1532 static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
1536 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
1538 for (i = 1; i < nr_irqs; i++) {
1539 struct intc_desc_int *desc = intc_irq_xlate[i].desc;
1544 seq_printf(m, "%5d ", i);
1545 seq_printf(m, "0x%05x ", intc_irq_xlate[i].enum_id);
1546 seq_printf(m, "%-15s\n", desc->chip.name);
1552 static int intc_irq_xlate_open(struct inode *inode, struct file *file)
1554 return single_open(file, intc_irq_xlate_debug, inode->i_private);
1557 static const struct file_operations intc_irq_xlate_fops = {
1558 .open = intc_irq_xlate_open,
1560 .llseek = seq_lseek,
1561 .release = single_release,
1564 static int __init intc_irq_xlate_init(void)
1567 * XXX.. use arch_debugfs_dir here when all of the intc users are
1570 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
1571 &intc_irq_xlate_fops) == NULL)
1576 fs_initcall(intc_irq_xlate_init);
1580 show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1582 struct intc_desc_int *d;
1584 d = container_of(dev, struct intc_desc_int, sysdev);
1586 return sprintf(buf, "%s\n", d->chip.name);
1589 static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1591 static int intc_suspend(struct sys_device *dev, pm_message_t state)
1593 struct intc_desc_int *d;
1594 struct irq_desc *desc;
1597 /* get intc controller associated with this sysdev */
1598 d = container_of(dev, struct intc_desc_int, sysdev);
1600 switch (state.event) {
1602 if (d->state.event != PM_EVENT_FREEZE)
1604 for_each_irq_desc(irq, desc) {
1605 if (desc->handle_irq == intc_redirect_irq)
1607 if (desc->chip != &d->chip)
1609 if (desc->status & IRQ_DISABLED)
1615 case PM_EVENT_FREEZE:
1616 /* nothing has to be done */
1618 case PM_EVENT_SUSPEND:
1619 /* enable wakeup irqs belonging to this intc controller */
1620 for_each_irq_desc(irq, desc) {
1621 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1631 static int intc_resume(struct sys_device *dev)
1633 return intc_suspend(dev, PMSG_ON);
1636 static struct sysdev_class intc_sysdev_class = {
1638 .suspend = intc_suspend,
1639 .resume = intc_resume,
1642 /* register this intc as sysdev to allow suspend/resume */
1643 static int __init register_intc_sysdevs(void)
1645 struct intc_desc_int *d;
1648 error = sysdev_class_register(&intc_sysdev_class);
1649 #ifdef CONFIG_INTC_USERIMASK
1650 if (!error && uimask)
1651 error = sysdev_class_create_file(&intc_sysdev_class,
1655 list_for_each_entry(d, &intc_list, list) {
1656 d->sysdev.id = d->index;
1657 d->sysdev.cls = &intc_sysdev_class;
1658 error = sysdev_register(&d->sysdev);
1660 error = sysdev_create_file(&d->sysdev,
1668 pr_err("sysdev registration error\n");
1672 device_initcall(register_intc_sysdevs);
1675 * Dynamic IRQ allocation and deallocation
1677 unsigned int create_irq_nr(unsigned int irq_want, int node)
1679 unsigned int irq = 0, new;
1680 unsigned long flags;
1681 struct irq_desc *desc;
1683 spin_lock_irqsave(&vector_lock, flags);
1686 * First try the wanted IRQ
1688 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1691 /* .. then fall back to scanning. */
1692 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1693 if (unlikely(new == nr_irqs))
1696 __set_bit(new, intc_irq_map);
1699 desc = irq_to_desc_alloc_node(new, node);
1700 if (unlikely(!desc)) {
1701 pr_err("can't get irq_desc for %d\n", new);
1705 desc = move_irq_desc(desc, node);
1709 spin_unlock_irqrestore(&vector_lock, flags);
1712 dynamic_irq_init(irq);
1719 int create_irq(void)
1721 int nid = cpu_to_node(smp_processor_id());
1724 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1731 void destroy_irq(unsigned int irq)
1733 unsigned long flags;
1735 dynamic_irq_cleanup(irq);
1737 spin_lock_irqsave(&vector_lock, flags);
1738 __clear_bit(irq, intc_irq_map);
1739 spin_unlock_irqrestore(&vector_lock, flags);
1742 int reserve_irq_vector(unsigned int irq)
1744 unsigned long flags;
1747 spin_lock_irqsave(&vector_lock, flags);
1748 if (test_and_set_bit(irq, intc_irq_map))
1750 spin_unlock_irqrestore(&vector_lock, flags);
1755 void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
1757 unsigned long flags;
1760 spin_lock_irqsave(&vector_lock, flags);
1761 for (i = 0; i < nr_vecs; i++)
1762 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
1763 spin_unlock_irqrestore(&vector_lock, flags);
1766 void reserve_irq_legacy(void)
1768 unsigned long flags;
1771 spin_lock_irqsave(&vector_lock, flags);
1772 j = find_first_bit(intc_irq_map, nr_irqs);
1773 for (i = 0; i < j; i++)
1774 __set_bit(i, intc_irq_map);
1775 spin_unlock_irqrestore(&vector_lock, flags);