Merge branches 'acpica', 'aml-custom', 'bugzilla-16548', 'bugzilla-20242', 'd3-cold...
[pandora-kernel.git] / arch / sparc / kernel / smp_32.c
1 /* smp.c: Sparc SMP support.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5  * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6  */
7
8 #include <asm/head.h>
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
26
27 #include <asm/irq.h>
28 #include <asm/page.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cpudata.h>
35 #include <asm/leon.h>
36
37 #include "irq.h"
38
39 volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
40 unsigned char boot_cpu_id = 0;
41 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
42
43 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
44
45 /* The only guaranteed locking primitive available on all Sparc
46  * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
47  * places the current byte at the effective address into dest_reg and
48  * places 0xff there afterwards.  Pretty lame locking primitive
49  * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
50  * instruction which is much better...
51  */
52
53 void __cpuinit smp_store_cpu_info(int id)
54 {
55         int cpu_node;
56         int mid;
57
58         cpu_data(id).udelay_val = loops_per_jiffy;
59
60         cpu_find_by_mid(id, &cpu_node);
61         cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
62                                                      "clock-frequency", 0);
63         cpu_data(id).prom_node = cpu_node;
64         mid = cpu_get_hwmid(cpu_node);
65
66         if (mid < 0) {
67                 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
68                 mid = 0;
69         }
70         cpu_data(id).mid = mid;
71 }
72
73 void __init smp_cpus_done(unsigned int max_cpus)
74 {
75         extern void smp4m_smp_done(void);
76         extern void smp4d_smp_done(void);
77         unsigned long bogosum = 0;
78         int cpu, num = 0;
79
80         for_each_online_cpu(cpu) {
81                 num++;
82                 bogosum += cpu_data(cpu).udelay_val;
83         }
84
85         printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
86                 num, bogosum/(500000/HZ),
87                 (bogosum/(5000/HZ))%100);
88
89         switch(sparc_cpu_model) {
90         case sun4:
91                 printk("SUN4\n");
92                 BUG();
93                 break;
94         case sun4c:
95                 printk("SUN4C\n");
96                 BUG();
97                 break;
98         case sun4m:
99                 smp4m_smp_done();
100                 break;
101         case sun4d:
102                 smp4d_smp_done();
103                 break;
104         case sparc_leon:
105                 leon_smp_done();
106                 break;
107         case sun4e:
108                 printk("SUN4E\n");
109                 BUG();
110                 break;
111         case sun4u:
112                 printk("SUN4U\n");
113                 BUG();
114                 break;
115         default:
116                 printk("UNKNOWN!\n");
117                 BUG();
118                 break;
119         };
120 }
121
122 void cpu_panic(void)
123 {
124         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
125         panic("SMP bolixed\n");
126 }
127
128 struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
129
130 void smp_send_reschedule(int cpu)
131 {
132         /* See sparc64 */
133 }
134
135 void smp_send_stop(void)
136 {
137 }
138
139 void smp_flush_cache_all(void)
140 {
141         xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
142         local_flush_cache_all();
143 }
144
145 void smp_flush_tlb_all(void)
146 {
147         xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
148         local_flush_tlb_all();
149 }
150
151 void smp_flush_cache_mm(struct mm_struct *mm)
152 {
153         if(mm->context != NO_CONTEXT) {
154                 cpumask_t cpu_mask = *mm_cpumask(mm);
155                 cpu_clear(smp_processor_id(), cpu_mask);
156                 if (!cpus_empty(cpu_mask))
157                         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
158                 local_flush_cache_mm(mm);
159         }
160 }
161
162 void smp_flush_tlb_mm(struct mm_struct *mm)
163 {
164         if(mm->context != NO_CONTEXT) {
165                 cpumask_t cpu_mask = *mm_cpumask(mm);
166                 cpu_clear(smp_processor_id(), cpu_mask);
167                 if (!cpus_empty(cpu_mask)) {
168                         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
169                         if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
170                                 cpumask_copy(mm_cpumask(mm),
171                                              cpumask_of(smp_processor_id()));
172                 }
173                 local_flush_tlb_mm(mm);
174         }
175 }
176
177 void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
178                            unsigned long end)
179 {
180         struct mm_struct *mm = vma->vm_mm;
181
182         if (mm->context != NO_CONTEXT) {
183                 cpumask_t cpu_mask = *mm_cpumask(mm);
184                 cpu_clear(smp_processor_id(), cpu_mask);
185                 if (!cpus_empty(cpu_mask))
186                         xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
187                 local_flush_cache_range(vma, start, end);
188         }
189 }
190
191 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
192                          unsigned long end)
193 {
194         struct mm_struct *mm = vma->vm_mm;
195
196         if (mm->context != NO_CONTEXT) {
197                 cpumask_t cpu_mask = *mm_cpumask(mm);
198                 cpu_clear(smp_processor_id(), cpu_mask);
199                 if (!cpus_empty(cpu_mask))
200                         xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
201                 local_flush_tlb_range(vma, start, end);
202         }
203 }
204
205 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
206 {
207         struct mm_struct *mm = vma->vm_mm;
208
209         if(mm->context != NO_CONTEXT) {
210                 cpumask_t cpu_mask = *mm_cpumask(mm);
211                 cpu_clear(smp_processor_id(), cpu_mask);
212                 if (!cpus_empty(cpu_mask))
213                         xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
214                 local_flush_cache_page(vma, page);
215         }
216 }
217
218 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
219 {
220         struct mm_struct *mm = vma->vm_mm;
221
222         if(mm->context != NO_CONTEXT) {
223                 cpumask_t cpu_mask = *mm_cpumask(mm);
224                 cpu_clear(smp_processor_id(), cpu_mask);
225                 if (!cpus_empty(cpu_mask))
226                         xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
227                 local_flush_tlb_page(vma, page);
228         }
229 }
230
231 void smp_reschedule_irq(void)
232 {
233         set_need_resched();
234 }
235
236 void smp_flush_page_to_ram(unsigned long page)
237 {
238         /* Current theory is that those who call this are the one's
239          * who have just dirtied their cache with the pages contents
240          * in kernel space, therefore we only run this on local cpu.
241          *
242          * XXX This experiment failed, research further... -DaveM
243          */
244 #if 1
245         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
246 #endif
247         local_flush_page_to_ram(page);
248 }
249
250 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
251 {
252         cpumask_t cpu_mask = *mm_cpumask(mm);
253         cpu_clear(smp_processor_id(), cpu_mask);
254         if (!cpus_empty(cpu_mask))
255                 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
256         local_flush_sig_insns(mm, insn_addr);
257 }
258
259 extern unsigned int lvl14_resolution;
260
261 /* /proc/profile writes can call this, don't __init it please. */
262 static DEFINE_SPINLOCK(prof_setup_lock);
263
264 int setup_profiling_timer(unsigned int multiplier)
265 {
266         int i;
267         unsigned long flags;
268
269         /* Prevent level14 ticker IRQ flooding. */
270         if((!multiplier) || (lvl14_resolution / multiplier) < 500)
271                 return -EINVAL;
272
273         spin_lock_irqsave(&prof_setup_lock, flags);
274         for_each_possible_cpu(i) {
275                 load_profile_irq(i, lvl14_resolution / multiplier);
276                 prof_multiplier(i) = multiplier;
277         }
278         spin_unlock_irqrestore(&prof_setup_lock, flags);
279
280         return 0;
281 }
282
283 void __init smp_prepare_cpus(unsigned int max_cpus)
284 {
285         extern void __init smp4m_boot_cpus(void);
286         extern void __init smp4d_boot_cpus(void);
287         int i, cpuid, extra;
288
289         printk("Entering SMP Mode...\n");
290
291         extra = 0;
292         for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
293                 if (cpuid >= NR_CPUS)
294                         extra++;
295         }
296         /* i = number of cpus */
297         if (extra && max_cpus > i - extra)
298                 printk("Warning: NR_CPUS is too low to start all cpus\n");
299
300         smp_store_cpu_info(boot_cpu_id);
301
302         switch(sparc_cpu_model) {
303         case sun4:
304                 printk("SUN4\n");
305                 BUG();
306                 break;
307         case sun4c:
308                 printk("SUN4C\n");
309                 BUG();
310                 break;
311         case sun4m:
312                 smp4m_boot_cpus();
313                 break;
314         case sun4d:
315                 smp4d_boot_cpus();
316                 break;
317         case sparc_leon:
318                 leon_boot_cpus();
319                 break;
320         case sun4e:
321                 printk("SUN4E\n");
322                 BUG();
323                 break;
324         case sun4u:
325                 printk("SUN4U\n");
326                 BUG();
327                 break;
328         default:
329                 printk("UNKNOWN!\n");
330                 BUG();
331                 break;
332         };
333 }
334
335 /* Set this up early so that things like the scheduler can init
336  * properly.  We use the same cpu mask for both the present and
337  * possible cpu map.
338  */
339 void __init smp_setup_cpu_possible_map(void)
340 {
341         int instance, mid;
342
343         instance = 0;
344         while (!cpu_find_by_instance(instance, NULL, &mid)) {
345                 if (mid < NR_CPUS) {
346                         set_cpu_possible(mid, true);
347                         set_cpu_present(mid, true);
348                 }
349                 instance++;
350         }
351 }
352
353 void __init smp_prepare_boot_cpu(void)
354 {
355         int cpuid = hard_smp_processor_id();
356
357         if (cpuid >= NR_CPUS) {
358                 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
359                 prom_halt();
360         }
361         if (cpuid != 0)
362                 printk("boot cpu id != 0, this could work but is untested\n");
363
364         current_thread_info()->cpu = cpuid;
365         set_cpu_online(cpuid, true);
366         set_cpu_possible(cpuid, true);
367 }
368
369 int __cpuinit __cpu_up(unsigned int cpu)
370 {
371         extern int __cpuinit smp4m_boot_one_cpu(int);
372         extern int __cpuinit smp4d_boot_one_cpu(int);
373         int ret=0;
374
375         switch(sparc_cpu_model) {
376         case sun4:
377                 printk("SUN4\n");
378                 BUG();
379                 break;
380         case sun4c:
381                 printk("SUN4C\n");
382                 BUG();
383                 break;
384         case sun4m:
385                 ret = smp4m_boot_one_cpu(cpu);
386                 break;
387         case sun4d:
388                 ret = smp4d_boot_one_cpu(cpu);
389                 break;
390         case sparc_leon:
391                 ret = leon_boot_one_cpu(cpu);
392                 break;
393         case sun4e:
394                 printk("SUN4E\n");
395                 BUG();
396                 break;
397         case sun4u:
398                 printk("SUN4U\n");
399                 BUG();
400                 break;
401         default:
402                 printk("UNKNOWN!\n");
403                 BUG();
404                 break;
405         };
406
407         if (!ret) {
408                 cpu_set(cpu, smp_commenced_mask);
409                 while (!cpu_online(cpu))
410                         mb();
411         }
412         return ret;
413 }
414
415 void smp_bogo(struct seq_file *m)
416 {
417         int i;
418         
419         for_each_online_cpu(i) {
420                 seq_printf(m,
421                            "Cpu%dBogo\t: %lu.%02lu\n",
422                            i,
423                            cpu_data(i).udelay_val/(500000/HZ),
424                            (cpu_data(i).udelay_val/(5000/HZ))%100);
425         }
426 }
427
428 void smp_info(struct seq_file *m)
429 {
430         int i;
431
432         seq_printf(m, "State:\n");
433         for_each_online_cpu(i)
434                 seq_printf(m, "CPU%d\t\t: online\n", i);
435 }