Merge branch 'rmobile-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / arch / sh / kernel / irq.c
1 /*
2  * linux/arch/sh/kernel/irq.c
3  *
4  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5  *
6  *
7  * SuperH version:  Copyright (C) 1999  Niibe Yutaka
8  */
9 #include <linux/irq.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/seq_file.h>
14 #include <linux/ftrace.h>
15 #include <linux/delay.h>
16 #include <asm/processor.h>
17 #include <asm/machvec.h>
18 #include <asm/uaccess.h>
19 #include <asm/thread_info.h>
20 #include <cpu/mmu_context.h>
21
22 atomic_t irq_err_count;
23
24 /*
25  * 'what should we do if we get a hw irq event on an illegal vector'.
26  * each architecture has to answer this themselves, it doesn't deserve
27  * a generic callback i think.
28  */
29 void ack_bad_irq(unsigned int irq)
30 {
31         atomic_inc(&irq_err_count);
32         printk("unexpected IRQ trap at vector %02x\n", irq);
33 }
34
35 #if defined(CONFIG_PROC_FS)
36 /*
37  * /proc/interrupts printing for arch specific interrupts
38  */
39 int arch_show_interrupts(struct seq_file *p, int prec)
40 {
41         int j;
42
43         seq_printf(p, "%*s: ", prec, "NMI");
44         for_each_online_cpu(j)
45                 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
46         seq_printf(p, "  Non-maskable interrupts\n");
47
48         seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
49
50         return 0;
51 }
52 #endif
53
54 #ifdef CONFIG_IRQSTACKS
55 /*
56  * per-CPU IRQ handling contexts (thread information and stack)
57  */
58 union irq_ctx {
59         struct thread_info      tinfo;
60         u32                     stack[THREAD_SIZE/sizeof(u32)];
61 };
62
63 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
64 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
65
66 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
67 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
68
69 static inline void handle_one_irq(unsigned int irq)
70 {
71         union irq_ctx *curctx, *irqctx;
72
73         curctx = (union irq_ctx *)current_thread_info();
74         irqctx = hardirq_ctx[smp_processor_id()];
75
76         /*
77          * this is where we switch to the IRQ stack. However, if we are
78          * already using the IRQ stack (because we interrupted a hardirq
79          * handler) we can't do that and just have to keep using the
80          * current stack (which is the irq stack already after all)
81          */
82         if (curctx != irqctx) {
83                 u32 *isp;
84
85                 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
86                 irqctx->tinfo.task = curctx->tinfo.task;
87                 irqctx->tinfo.previous_sp = current_stack_pointer;
88
89                 /*
90                  * Copy the softirq bits in preempt_count so that the
91                  * softirq checks work in the hardirq context.
92                  */
93                 irqctx->tinfo.preempt_count =
94                         (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
95                         (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
96
97                 __asm__ __volatile__ (
98                         "mov    %0, r4          \n"
99                         "mov    r15, r8         \n"
100                         "jsr    @%1             \n"
101                         /* swith to the irq stack */
102                         " mov   %2, r15         \n"
103                         /* restore the stack (ring zero) */
104                         "mov    r8, r15         \n"
105                         : /* no outputs */
106                         : "r" (irq), "r" (generic_handle_irq), "r" (isp)
107                         : "memory", "r0", "r1", "r2", "r3", "r4",
108                           "r5", "r6", "r7", "r8", "t", "pr"
109                 );
110         } else
111                 generic_handle_irq(irq);
112 }
113
114 /*
115  * allocate per-cpu stacks for hardirq and for softirq processing
116  */
117 void irq_ctx_init(int cpu)
118 {
119         union irq_ctx *irqctx;
120
121         if (hardirq_ctx[cpu])
122                 return;
123
124         irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
125         irqctx->tinfo.task              = NULL;
126         irqctx->tinfo.exec_domain       = NULL;
127         irqctx->tinfo.cpu               = cpu;
128         irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
129         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
130
131         hardirq_ctx[cpu] = irqctx;
132
133         irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
134         irqctx->tinfo.task              = NULL;
135         irqctx->tinfo.exec_domain       = NULL;
136         irqctx->tinfo.cpu               = cpu;
137         irqctx->tinfo.preempt_count     = 0;
138         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
139
140         softirq_ctx[cpu] = irqctx;
141
142         printk("CPU %u irqstacks, hard=%p soft=%p\n",
143                 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
144 }
145
146 void irq_ctx_exit(int cpu)
147 {
148         hardirq_ctx[cpu] = NULL;
149 }
150
151 asmlinkage void do_softirq(void)
152 {
153         unsigned long flags;
154         struct thread_info *curctx;
155         union irq_ctx *irqctx;
156         u32 *isp;
157
158         if (in_interrupt())
159                 return;
160
161         local_irq_save(flags);
162
163         if (local_softirq_pending()) {
164                 curctx = current_thread_info();
165                 irqctx = softirq_ctx[smp_processor_id()];
166                 irqctx->tinfo.task = curctx->task;
167                 irqctx->tinfo.previous_sp = current_stack_pointer;
168
169                 /* build the stack frame on the softirq stack */
170                 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
171
172                 __asm__ __volatile__ (
173                         "mov    r15, r9         \n"
174                         "jsr    @%0             \n"
175                         /* switch to the softirq stack */
176                         " mov   %1, r15         \n"
177                         /* restore the thread stack */
178                         "mov    r9, r15         \n"
179                         : /* no outputs */
180                         : "r" (__do_softirq), "r" (isp)
181                         : "memory", "r0", "r1", "r2", "r3", "r4",
182                           "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
183                 );
184
185                 /*
186                  * Shouldn't happen, we returned above if in_interrupt():
187                  */
188                 WARN_ON_ONCE(softirq_count());
189         }
190
191         local_irq_restore(flags);
192 }
193 #else
194 static inline void handle_one_irq(unsigned int irq)
195 {
196         generic_handle_irq(irq);
197 }
198 #endif
199
200 asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
201 {
202         struct pt_regs *old_regs = set_irq_regs(regs);
203
204         irq_enter();
205
206         irq = irq_demux(irq_lookup(irq));
207
208         if (irq != NO_IRQ_IGNORE) {
209                 handle_one_irq(irq);
210                 irq_finish(irq);
211         }
212
213         irq_exit();
214
215         set_irq_regs(old_regs);
216
217         return IRQ_HANDLED;
218 }
219
220 void __init init_IRQ(void)
221 {
222         plat_irq_setup();
223
224         /* Perform the machine specific initialisation */
225         if (sh_mv.mv_init_irq)
226                 sh_mv.mv_init_irq();
227
228         intc_finalize();
229
230         irq_ctx_init(smp_processor_id());
231 }
232
233 #ifdef CONFIG_SPARSE_IRQ
234 int __init arch_probe_nr_irqs(void)
235 {
236         nr_irqs = sh_mv.mv_nr_irqs;
237         return NR_IRQS_LEGACY;
238 }
239 #endif
240
241 #ifdef CONFIG_HOTPLUG_CPU
242 static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
243 {
244         struct irq_desc *desc = irq_to_desc(irq);
245         struct irq_chip *chip = irq_data_get_irq_chip(data);
246
247         printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
248                irq, data->node, cpu);
249
250         raw_spin_lock_irq(&desc->lock);
251         chip->irq_set_affinity(data, cpumask_of(cpu), false);
252         raw_spin_unlock_irq(&desc->lock);
253 }
254
255 /*
256  * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
257  * the affinity settings do not allow other CPUs, force them onto any
258  * available CPU.
259  */
260 void migrate_irqs(void)
261 {
262         unsigned int irq, cpu = smp_processor_id();
263
264         for_each_active_irq(irq) {
265                 struct irq_data *data = irq_get_irq_data(irq);
266
267                 if (data->node == cpu) {
268                         unsigned int newcpu = cpumask_any_and(data->affinity,
269                                                               cpu_online_mask);
270                         if (newcpu >= nr_cpu_ids) {
271                                 if (printk_ratelimit())
272                                         printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
273                                                irq, cpu);
274
275                                 cpumask_setall(data->affinity);
276                                 newcpu = cpumask_any_and(data->affinity,
277                                                          cpu_online_mask);
278                         }
279
280                         route_irq(data, irq, newcpu);
281                 }
282         }
283 }
284 #endif