Merge branches 'msm-fixes' and 'msm-video' of git://codeaurora.org/quic/kernel/dwalke...
[pandora-kernel.git] / arch / sh / kernel / irq.c
1 /*
2  * linux/arch/sh/kernel/irq.c
3  *
4  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5  *
6  *
7  * SuperH version:  Copyright (C) 1999  Niibe Yutaka
8  */
9 #include <linux/irq.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/seq_file.h>
14 #include <linux/ftrace.h>
15 #include <linux/delay.h>
16 #include <asm/processor.h>
17 #include <asm/machvec.h>
18 #include <asm/uaccess.h>
19 #include <asm/thread_info.h>
20 #include <cpu/mmu_context.h>
21
22 atomic_t irq_err_count;
23
24 /*
25  * 'what should we do if we get a hw irq event on an illegal vector'.
26  * each architecture has to answer this themselves, it doesn't deserve
27  * a generic callback i think.
28  */
29 void ack_bad_irq(unsigned int irq)
30 {
31         atomic_inc(&irq_err_count);
32         printk("unexpected IRQ trap at vector %02x\n", irq);
33 }
34
35 #if defined(CONFIG_PROC_FS)
36 /*
37  * /proc/interrupts printing:
38  */
39 static int show_other_interrupts(struct seq_file *p, int prec)
40 {
41         int j;
42
43         seq_printf(p, "%*s: ", prec, "NMI");
44         for_each_online_cpu(j)
45                 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
46         seq_printf(p, "  Non-maskable interrupts\n");
47
48         seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
49
50         return 0;
51 }
52
53 int show_interrupts(struct seq_file *p, void *v)
54 {
55         unsigned long flags, any_count = 0;
56         int i = *(loff_t *)v, j, prec;
57         struct irqaction *action;
58         struct irq_desc *desc;
59         struct irq_data *data;
60         struct irq_chip *chip;
61
62         if (i > nr_irqs)
63                 return 0;
64
65         for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
66                 j *= 10;
67
68         if (i == nr_irqs)
69                 return show_other_interrupts(p, prec);
70
71         if (i == 0) {
72                 seq_printf(p, "%*s", prec + 8, "");
73                 for_each_online_cpu(j)
74                         seq_printf(p, "CPU%-8d", j);
75                 seq_putc(p, '\n');
76         }
77
78         desc = irq_to_desc(i);
79         if (!desc)
80                 return 0;
81
82         data = irq_get_irq_data(i);
83         chip = irq_data_get_irq_chip(data);
84
85         raw_spin_lock_irqsave(&desc->lock, flags);
86         for_each_online_cpu(j)
87                 any_count |= kstat_irqs_cpu(i, j);
88         action = desc->action;
89         if (!action && !any_count)
90                 goto out;
91
92         seq_printf(p, "%*d: ", prec, i);
93         for_each_online_cpu(j)
94                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
95         seq_printf(p, " %14s", chip->name);
96         seq_printf(p, "-%-8s", desc->name);
97
98         if (action) {
99                 seq_printf(p, "  %s", action->name);
100                 while ((action = action->next) != NULL)
101                         seq_printf(p, ", %s", action->name);
102         }
103
104         seq_putc(p, '\n');
105 out:
106         raw_spin_unlock_irqrestore(&desc->lock, flags);
107         return 0;
108 }
109 #endif
110
111 #ifdef CONFIG_IRQSTACKS
112 /*
113  * per-CPU IRQ handling contexts (thread information and stack)
114  */
115 union irq_ctx {
116         struct thread_info      tinfo;
117         u32                     stack[THREAD_SIZE/sizeof(u32)];
118 };
119
120 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
121 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
122
123 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
124 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
125
126 static inline void handle_one_irq(unsigned int irq)
127 {
128         union irq_ctx *curctx, *irqctx;
129
130         curctx = (union irq_ctx *)current_thread_info();
131         irqctx = hardirq_ctx[smp_processor_id()];
132
133         /*
134          * this is where we switch to the IRQ stack. However, if we are
135          * already using the IRQ stack (because we interrupted a hardirq
136          * handler) we can't do that and just have to keep using the
137          * current stack (which is the irq stack already after all)
138          */
139         if (curctx != irqctx) {
140                 u32 *isp;
141
142                 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
143                 irqctx->tinfo.task = curctx->tinfo.task;
144                 irqctx->tinfo.previous_sp = current_stack_pointer;
145
146                 /*
147                  * Copy the softirq bits in preempt_count so that the
148                  * softirq checks work in the hardirq context.
149                  */
150                 irqctx->tinfo.preempt_count =
151                         (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
152                         (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
153
154                 __asm__ __volatile__ (
155                         "mov    %0, r4          \n"
156                         "mov    r15, r8         \n"
157                         "jsr    @%1             \n"
158                         /* swith to the irq stack */
159                         " mov   %2, r15         \n"
160                         /* restore the stack (ring zero) */
161                         "mov    r8, r15         \n"
162                         : /* no outputs */
163                         : "r" (irq), "r" (generic_handle_irq), "r" (isp)
164                         : "memory", "r0", "r1", "r2", "r3", "r4",
165                           "r5", "r6", "r7", "r8", "t", "pr"
166                 );
167         } else
168                 generic_handle_irq(irq);
169 }
170
171 /*
172  * allocate per-cpu stacks for hardirq and for softirq processing
173  */
174 void irq_ctx_init(int cpu)
175 {
176         union irq_ctx *irqctx;
177
178         if (hardirq_ctx[cpu])
179                 return;
180
181         irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
182         irqctx->tinfo.task              = NULL;
183         irqctx->tinfo.exec_domain       = NULL;
184         irqctx->tinfo.cpu               = cpu;
185         irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
186         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
187
188         hardirq_ctx[cpu] = irqctx;
189
190         irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
191         irqctx->tinfo.task              = NULL;
192         irqctx->tinfo.exec_domain       = NULL;
193         irqctx->tinfo.cpu               = cpu;
194         irqctx->tinfo.preempt_count     = 0;
195         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
196
197         softirq_ctx[cpu] = irqctx;
198
199         printk("CPU %u irqstacks, hard=%p soft=%p\n",
200                 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
201 }
202
203 void irq_ctx_exit(int cpu)
204 {
205         hardirq_ctx[cpu] = NULL;
206 }
207
208 asmlinkage void do_softirq(void)
209 {
210         unsigned long flags;
211         struct thread_info *curctx;
212         union irq_ctx *irqctx;
213         u32 *isp;
214
215         if (in_interrupt())
216                 return;
217
218         local_irq_save(flags);
219
220         if (local_softirq_pending()) {
221                 curctx = current_thread_info();
222                 irqctx = softirq_ctx[smp_processor_id()];
223                 irqctx->tinfo.task = curctx->task;
224                 irqctx->tinfo.previous_sp = current_stack_pointer;
225
226                 /* build the stack frame on the softirq stack */
227                 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
228
229                 __asm__ __volatile__ (
230                         "mov    r15, r9         \n"
231                         "jsr    @%0             \n"
232                         /* switch to the softirq stack */
233                         " mov   %1, r15         \n"
234                         /* restore the thread stack */
235                         "mov    r9, r15         \n"
236                         : /* no outputs */
237                         : "r" (__do_softirq), "r" (isp)
238                         : "memory", "r0", "r1", "r2", "r3", "r4",
239                           "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
240                 );
241
242                 /*
243                  * Shouldnt happen, we returned above if in_interrupt():
244                  */
245                 WARN_ON_ONCE(softirq_count());
246         }
247
248         local_irq_restore(flags);
249 }
250 #else
251 static inline void handle_one_irq(unsigned int irq)
252 {
253         generic_handle_irq(irq);
254 }
255 #endif
256
257 asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
258 {
259         struct pt_regs *old_regs = set_irq_regs(regs);
260
261         irq_enter();
262
263         irq = irq_demux(irq_lookup(irq));
264
265         if (irq != NO_IRQ_IGNORE) {
266                 handle_one_irq(irq);
267                 irq_finish(irq);
268         }
269
270         irq_exit();
271
272         set_irq_regs(old_regs);
273
274         return IRQ_HANDLED;
275 }
276
277 void __init init_IRQ(void)
278 {
279         plat_irq_setup();
280
281         /* Perform the machine specific initialisation */
282         if (sh_mv.mv_init_irq)
283                 sh_mv.mv_init_irq();
284
285         intc_finalize();
286
287         irq_ctx_init(smp_processor_id());
288 }
289
290 #ifdef CONFIG_SPARSE_IRQ
291 int __init arch_probe_nr_irqs(void)
292 {
293         nr_irqs = sh_mv.mv_nr_irqs;
294         return NR_IRQS_LEGACY;
295 }
296 #endif
297
298 #ifdef CONFIG_HOTPLUG_CPU
299 static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
300 {
301         struct irq_desc *desc = irq_to_desc(irq);
302         struct irq_chip *chip = irq_data_get_irq_chip(data);
303
304         printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
305                irq, data->node, cpu);
306
307         raw_spin_lock_irq(&desc->lock);
308         chip->irq_set_affinity(data, cpumask_of(cpu), false);
309         raw_spin_unlock_irq(&desc->lock);
310 }
311
312 /*
313  * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
314  * the affinity settings do not allow other CPUs, force them onto any
315  * available CPU.
316  */
317 void migrate_irqs(void)
318 {
319         unsigned int irq, cpu = smp_processor_id();
320
321         for_each_active_irq(irq) {
322                 struct irq_data *data = irq_get_irq_data(irq);
323
324                 if (data->node == cpu) {
325                         unsigned int newcpu = cpumask_any_and(data->affinity,
326                                                               cpu_online_mask);
327                         if (newcpu >= nr_cpu_ids) {
328                                 if (printk_ratelimit())
329                                         printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
330                                                irq, cpu);
331
332                                 cpumask_setall(data->affinity);
333                                 newcpu = cpumask_any_and(data->affinity,
334                                                          cpu_online_mask);
335                         }
336
337                         route_irq(data, irq, newcpu);
338                 }
339         }
340 }
341 #endif