Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / arch / powerpc / sysdev / mv64x60_pic.c
1 /*
2  * Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery)
3  *
4  * Author: Dale Farnsworth <dale@farnsworth.org>
5  *
6  * 2007 (c) MontaVista, Software, Inc.  This file is licensed under
7  * the terms of the GNU General Public License version 2.  This program
8  * is licensed "as is" without any warranty of any kind, whether express
9  * or implied.
10  */
11
12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/irq.h>
16 #include <linux/interrupt.h>
17 #include <linux/spinlock.h>
18
19 #include <asm/byteorder.h>
20 #include <asm/io.h>
21 #include <asm/prom.h>
22 #include <asm/irq.h>
23
24 #include "mv64x60.h"
25
26 /* Interrupt Controller Interface Registers */
27 #define MV64X60_IC_MAIN_CAUSE_LO        0x0004
28 #define MV64X60_IC_MAIN_CAUSE_HI        0x000c
29 #define MV64X60_IC_CPU0_INTR_MASK_LO    0x0014
30 #define MV64X60_IC_CPU0_INTR_MASK_HI    0x001c
31 #define MV64X60_IC_CPU0_SELECT_CAUSE    0x0024
32
33 #define MV64X60_HIGH_GPP_GROUPS         0x0f000000
34 #define MV64X60_SELECT_CAUSE_HIGH       0x40000000
35
36 /* General Purpose Pins Controller Interface Registers */
37 #define MV64x60_GPP_INTR_CAUSE          0x0008
38 #define MV64x60_GPP_INTR_MASK           0x000c
39
40 #define MV64x60_LEVEL1_LOW              0
41 #define MV64x60_LEVEL1_HIGH             1
42 #define MV64x60_LEVEL1_GPP              2
43
44 #define MV64x60_LEVEL1_MASK             0x00000060
45 #define MV64x60_LEVEL1_OFFSET           5
46
47 #define MV64x60_LEVEL2_MASK             0x0000001f
48
49 #define MV64x60_NUM_IRQS                96
50
51 static DEFINE_SPINLOCK(mv64x60_lock);
52
53 static void __iomem *mv64x60_irq_reg_base;
54 static void __iomem *mv64x60_gpp_reg_base;
55
56 /*
57  * Interrupt Controller Handling
58  *
59  * The interrupt controller handles three groups of interrupts:
60  *   main low:  IRQ0-IRQ31
61  *   main high: IRQ32-IRQ63
62  *   gpp:       IRQ64-IRQ95
63  *
64  * This code handles interrupts in two levels.  Level 1 selects the
65  * interrupt group, and level 2 selects an IRQ within that group.
66  * Each group has its own irq_chip structure.
67  */
68
69 static u32 mv64x60_cached_low_mask;
70 static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
71 static u32 mv64x60_cached_gpp_mask;
72
73 static struct irq_host *mv64x60_irq_host;
74
75 /*
76  * mv64x60_chip_low functions
77  */
78
79 static void mv64x60_mask_low(struct irq_data *d)
80 {
81         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
82         unsigned long flags;
83
84         spin_lock_irqsave(&mv64x60_lock, flags);
85         mv64x60_cached_low_mask &= ~(1 << level2);
86         out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
87                  mv64x60_cached_low_mask);
88         spin_unlock_irqrestore(&mv64x60_lock, flags);
89         (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
90 }
91
92 static void mv64x60_unmask_low(struct irq_data *d)
93 {
94         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
95         unsigned long flags;
96
97         spin_lock_irqsave(&mv64x60_lock, flags);
98         mv64x60_cached_low_mask |= 1 << level2;
99         out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
100                  mv64x60_cached_low_mask);
101         spin_unlock_irqrestore(&mv64x60_lock, flags);
102         (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
103 }
104
105 static struct irq_chip mv64x60_chip_low = {
106         .name           = "mv64x60_low",
107         .irq_mask       = mv64x60_mask_low,
108         .irq_mask_ack   = mv64x60_mask_low,
109         .irq_unmask     = mv64x60_unmask_low,
110 };
111
112 /*
113  * mv64x60_chip_high functions
114  */
115
116 static void mv64x60_mask_high(struct irq_data *d)
117 {
118         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
119         unsigned long flags;
120
121         spin_lock_irqsave(&mv64x60_lock, flags);
122         mv64x60_cached_high_mask &= ~(1 << level2);
123         out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
124                  mv64x60_cached_high_mask);
125         spin_unlock_irqrestore(&mv64x60_lock, flags);
126         (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
127 }
128
129 static void mv64x60_unmask_high(struct irq_data *d)
130 {
131         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
132         unsigned long flags;
133
134         spin_lock_irqsave(&mv64x60_lock, flags);
135         mv64x60_cached_high_mask |= 1 << level2;
136         out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
137                  mv64x60_cached_high_mask);
138         spin_unlock_irqrestore(&mv64x60_lock, flags);
139         (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
140 }
141
142 static struct irq_chip mv64x60_chip_high = {
143         .name           = "mv64x60_high",
144         .irq_mask       = mv64x60_mask_high,
145         .irq_mask_ack   = mv64x60_mask_high,
146         .irq_unmask     = mv64x60_unmask_high,
147 };
148
149 /*
150  * mv64x60_chip_gpp functions
151  */
152
153 static void mv64x60_mask_gpp(struct irq_data *d)
154 {
155         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
156         unsigned long flags;
157
158         spin_lock_irqsave(&mv64x60_lock, flags);
159         mv64x60_cached_gpp_mask &= ~(1 << level2);
160         out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
161                  mv64x60_cached_gpp_mask);
162         spin_unlock_irqrestore(&mv64x60_lock, flags);
163         (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
164 }
165
166 static void mv64x60_mask_ack_gpp(struct irq_data *d)
167 {
168         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
169         unsigned long flags;
170
171         spin_lock_irqsave(&mv64x60_lock, flags);
172         mv64x60_cached_gpp_mask &= ~(1 << level2);
173         out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
174                  mv64x60_cached_gpp_mask);
175         out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE,
176                  ~(1 << level2));
177         spin_unlock_irqrestore(&mv64x60_lock, flags);
178         (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
179 }
180
181 static void mv64x60_unmask_gpp(struct irq_data *d)
182 {
183         int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
184         unsigned long flags;
185
186         spin_lock_irqsave(&mv64x60_lock, flags);
187         mv64x60_cached_gpp_mask |= 1 << level2;
188         out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
189                  mv64x60_cached_gpp_mask);
190         spin_unlock_irqrestore(&mv64x60_lock, flags);
191         (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
192 }
193
194 static struct irq_chip mv64x60_chip_gpp = {
195         .name           = "mv64x60_gpp",
196         .irq_mask       = mv64x60_mask_gpp,
197         .irq_mask_ack   = mv64x60_mask_ack_gpp,
198         .irq_unmask     = mv64x60_unmask_gpp,
199 };
200
201 /*
202  * mv64x60_host_ops functions
203  */
204
205 static struct irq_chip *mv64x60_chips[] = {
206         [MV64x60_LEVEL1_LOW]  = &mv64x60_chip_low,
207         [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high,
208         [MV64x60_LEVEL1_GPP]  = &mv64x60_chip_gpp,
209 };
210
211 static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
212                           irq_hw_number_t hwirq)
213 {
214         int level1;
215
216         irq_set_status_flags(virq, IRQ_LEVEL);
217
218         level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
219         BUG_ON(level1 > MV64x60_LEVEL1_GPP);
220         irq_set_chip_and_handler(virq, mv64x60_chips[level1],
221                                  handle_level_irq);
222
223         return 0;
224 }
225
226 static struct irq_host_ops mv64x60_host_ops = {
227         .map   = mv64x60_host_map,
228 };
229
230 /*
231  * Global functions
232  */
233
234 void __init mv64x60_init_irq(void)
235 {
236         struct device_node *np;
237         phys_addr_t paddr;
238         unsigned int size;
239         const unsigned int *reg;
240         unsigned long flags;
241
242         np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
243         reg = of_get_property(np, "reg", &size);
244         paddr = of_translate_address(np, reg);
245         mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
246         of_node_put(np);
247
248         np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic");
249         reg = of_get_property(np, "reg", &size);
250         paddr = of_translate_address(np, reg);
251         mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
252
253         mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
254                                           MV64x60_NUM_IRQS,
255                                           &mv64x60_host_ops, MV64x60_NUM_IRQS);
256
257         spin_lock_irqsave(&mv64x60_lock, flags);
258         out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
259                  mv64x60_cached_gpp_mask);
260         out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
261                  mv64x60_cached_low_mask);
262         out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
263                  mv64x60_cached_high_mask);
264
265         out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0);
266         out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0);
267         out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0);
268         spin_unlock_irqrestore(&mv64x60_lock, flags);
269 }
270
271 unsigned int mv64x60_get_irq(void)
272 {
273         u32 cause;
274         int level1;
275         irq_hw_number_t hwirq;
276         int virq = NO_IRQ;
277
278         cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
279         if (cause & MV64X60_SELECT_CAUSE_HIGH) {
280                 cause &= mv64x60_cached_high_mask;
281                 level1 = MV64x60_LEVEL1_HIGH;
282                 if (cause & MV64X60_HIGH_GPP_GROUPS) {
283                         cause = in_le32(mv64x60_gpp_reg_base +
284                                         MV64x60_GPP_INTR_CAUSE);
285                         cause &= mv64x60_cached_gpp_mask;
286                         level1 = MV64x60_LEVEL1_GPP;
287                 }
288         } else {
289                 cause &= mv64x60_cached_low_mask;
290                 level1 = MV64x60_LEVEL1_LOW;
291         }
292         if (cause) {
293                 hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause);
294                 virq = irq_linear_revmap(mv64x60_irq_host, hwirq);
295         }
296
297         return virq;
298 }