alpha: Fix uninitialized value in read_persistent_clock.
[pandora-kernel.git] / arch / sparc / kernel / sun4m_smp.c
1 /*
2  *  sun4m SMP support.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  */
6
7 #include <linux/interrupt.h>
8 #include <linux/profile.h>
9 #include <linux/delay.h>
10 #include <linux/cpu.h>
11
12 #include <asm/cacheflush.h>
13 #include <asm/tlbflush.h>
14
15 #include "irq.h"
16 #include "kernel.h"
17
18 #define IRQ_CROSS_CALL          15
19
20 static inline unsigned long
21 swap_ulong(volatile unsigned long *ptr, unsigned long val)
22 {
23         __asm__ __volatile__("swap [%1], %0\n\t" :
24                              "=&r" (val), "=&r" (ptr) :
25                              "0" (val), "1" (ptr));
26         return val;
27 }
28
29 static void smp_setup_percpu_timer(void);
30
31 void __cpuinit smp4m_callin(void)
32 {
33         int cpuid = hard_smp_processor_id();
34
35         local_flush_cache_all();
36         local_flush_tlb_all();
37
38         notify_cpu_starting(cpuid);
39
40         /* Get our local ticker going. */
41         smp_setup_percpu_timer();
42
43         calibrate_delay();
44         smp_store_cpu_info(cpuid);
45
46         local_flush_cache_all();
47         local_flush_tlb_all();
48
49         /*
50          * Unblock the master CPU _only_ when the scheduler state
51          * of all secondary CPUs will be up-to-date, so after
52          * the SMP initialization the master will be just allowed
53          * to call the scheduler code.
54          */
55         /* Allow master to continue. */
56         swap_ulong(&cpu_callin_map[cpuid], 1);
57
58         /* XXX: What's up with all the flushes? */
59         local_flush_cache_all();
60         local_flush_tlb_all();
61
62         cpu_probe();
63
64         /* Fix idle thread fields. */
65         __asm__ __volatile__("ld [%0], %%g6\n\t"
66                              : : "r" (&current_set[cpuid])
67                              : "memory" /* paranoid */);
68
69         /* Attach to the address space of init_task. */
70         atomic_inc(&init_mm.mm_count);
71         current->active_mm = &init_mm;
72
73         while (!cpu_isset(cpuid, smp_commenced_mask))
74                 mb();
75
76         local_irq_enable();
77
78         set_cpu_online(cpuid, true);
79 }
80
81 /*
82  *      Cycle through the processors asking the PROM to start each one.
83  */
84 void __init smp4m_boot_cpus(void)
85 {
86         smp_setup_percpu_timer();
87         local_flush_cache_all();
88 }
89
90 int __cpuinit smp4m_boot_one_cpu(int i)
91 {
92         unsigned long *entry = &sun4m_cpu_startup;
93         struct task_struct *p;
94         int timeout;
95         int cpu_node;
96
97         cpu_find_by_mid(i, &cpu_node);
98
99         /* Cook up an idler for this guy. */
100         p = fork_idle(i);
101         current_set[i] = task_thread_info(p);
102         /* See trampoline.S for details... */
103         entry += ((i - 1) * 3);
104
105         /*
106          * Initialize the contexts table
107          * Since the call to prom_startcpu() trashes the structure,
108          * we need to re-initialize it for each cpu
109          */
110         smp_penguin_ctable.which_io = 0;
111         smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
112         smp_penguin_ctable.reg_size = 0;
113
114         /* whirrr, whirrr, whirrrrrrrrr... */
115         printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
116         local_flush_cache_all();
117         prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
118
119         /* wheee... it's going... */
120         for (timeout = 0; timeout < 10000; timeout++) {
121                 if (cpu_callin_map[i])
122                         break;
123                 udelay(200);
124         }
125
126         if (!(cpu_callin_map[i])) {
127                 printk(KERN_ERR "Processor %d is stuck.\n", i);
128                 return -ENODEV;
129         }
130
131         local_flush_cache_all();
132         return 0;
133 }
134
135 void __init smp4m_smp_done(void)
136 {
137         int i, first;
138         int *prev;
139
140         /* setup cpu list for irq rotation */
141         first = 0;
142         prev = &first;
143         for_each_online_cpu(i) {
144                 *prev = i;
145                 prev = &cpu_data(i).next;
146         }
147         *prev = first;
148         local_flush_cache_all();
149
150         /* Ok, they are spinning and ready to go. */
151 }
152
153 /* At each hardware IRQ, we get this called to forward IRQ reception
154  * to the next processor.  The caller must disable the IRQ level being
155  * serviced globally so that there are no double interrupts received.
156  *
157  * XXX See sparc64 irq.c.
158  */
159 void smp4m_irq_rotate(int cpu)
160 {
161         int next = cpu_data(cpu).next;
162
163         if (next != cpu)
164                 set_irq_udt(next);
165 }
166
167 static struct smp_funcall {
168         smpfunc_t func;
169         unsigned long arg1;
170         unsigned long arg2;
171         unsigned long arg3;
172         unsigned long arg4;
173         unsigned long arg5;
174         unsigned long processors_in[SUN4M_NCPUS];  /* Set when ipi entered. */
175         unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
176 } ccall_info;
177
178 static DEFINE_SPINLOCK(cross_call_lock);
179
180 /* Cross calls must be serialized, at least currently. */
181 static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
182                              unsigned long arg2, unsigned long arg3,
183                              unsigned long arg4)
184 {
185                 register int ncpus = SUN4M_NCPUS;
186                 unsigned long flags;
187
188                 spin_lock_irqsave(&cross_call_lock, flags);
189
190                 /* Init function glue. */
191                 ccall_info.func = func;
192                 ccall_info.arg1 = arg1;
193                 ccall_info.arg2 = arg2;
194                 ccall_info.arg3 = arg3;
195                 ccall_info.arg4 = arg4;
196                 ccall_info.arg5 = 0;
197
198                 /* Init receive/complete mapping, plus fire the IPI's off. */
199                 {
200                         register int i;
201
202                         cpu_clear(smp_processor_id(), mask);
203                         cpus_and(mask, cpu_online_map, mask);
204                         for (i = 0; i < ncpus; i++) {
205                                 if (cpu_isset(i, mask)) {
206                                         ccall_info.processors_in[i] = 0;
207                                         ccall_info.processors_out[i] = 0;
208                                         set_cpu_int(i, IRQ_CROSS_CALL);
209                                 } else {
210                                         ccall_info.processors_in[i] = 1;
211                                         ccall_info.processors_out[i] = 1;
212                                 }
213                         }
214                 }
215
216                 {
217                         register int i;
218
219                         i = 0;
220                         do {
221                                 if (!cpu_isset(i, mask))
222                                         continue;
223                                 while (!ccall_info.processors_in[i])
224                                         barrier();
225                         } while (++i < ncpus);
226
227                         i = 0;
228                         do {
229                                 if (!cpu_isset(i, mask))
230                                         continue;
231                                 while (!ccall_info.processors_out[i])
232                                         barrier();
233                         } while (++i < ncpus);
234                 }
235                 spin_unlock_irqrestore(&cross_call_lock, flags);
236 }
237
238 /* Running cross calls. */
239 void smp4m_cross_call_irq(void)
240 {
241         int i = smp_processor_id();
242
243         ccall_info.processors_in[i] = 1;
244         ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
245                         ccall_info.arg4, ccall_info.arg5);
246         ccall_info.processors_out[i] = 1;
247 }
248
249 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
250 {
251         struct pt_regs *old_regs;
252         int cpu = smp_processor_id();
253
254         old_regs = set_irq_regs(regs);
255
256         sun4m_clear_profile_irq(cpu);
257
258         profile_tick(CPU_PROFILING);
259
260         if (!--prof_counter(cpu)) {
261                 int user = user_mode(regs);
262
263                 irq_enter();
264                 update_process_times(user);
265                 irq_exit();
266
267                 prof_counter(cpu) = prof_multiplier(cpu);
268         }
269         set_irq_regs(old_regs);
270 }
271
272 static void __cpuinit smp_setup_percpu_timer(void)
273 {
274         int cpu = smp_processor_id();
275
276         prof_counter(cpu) = prof_multiplier(cpu) = 1;
277         load_profile_irq(cpu, lvl14_resolution);
278
279         if (cpu == boot_cpu_id)
280                 enable_pil_irq(14);
281 }
282
283 static void __init smp4m_blackbox_id(unsigned *addr)
284 {
285         int rd = *addr & 0x3e000000;
286         int rs1 = rd >> 11;
287
288         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
289         addr[1] = 0x8130200c | rd | rs1;        /* srl reg, 0xc, reg */
290         addr[2] = 0x80082003 | rd | rs1;        /* and reg, 3, reg */
291 }
292
293 static void __init smp4m_blackbox_current(unsigned *addr)
294 {
295         int rd = *addr & 0x3e000000;
296         int rs1 = rd >> 11;
297
298         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
299         addr[2] = 0x8130200a | rd | rs1;        /* srl reg, 0xa, reg */
300         addr[4] = 0x8008200c | rd | rs1;        /* and reg, 0xc, reg */
301 }
302
303 void __init sun4m_init_smp(void)
304 {
305         BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
306         BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
307         BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
308         BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
309 }