Merge ../linux-2.6-watchdog-mm
[pandora-kernel.git] / arch / sparc / kernel / smp.c
1 /* smp.c: Sparc SMP support.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5  * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6  */
7
8 #include <asm/head.h>
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/cache.h>
23 #include <linux/delay.h>
24
25 #include <asm/ptrace.h>
26 #include <asm/atomic.h>
27
28 #include <asm/irq.h>
29 #include <asm/page.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 #include <asm/oplib.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35 #include <asm/cpudata.h>
36
37 int smp_num_cpus = 1;
38 volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
39 unsigned char boot_cpu_id = 0;
40 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
41 int smp_activated = 0;
42 volatile int __cpu_number_map[NR_CPUS];
43 volatile int __cpu_logical_map[NR_CPUS];
44
45 cpumask_t cpu_online_map = CPU_MASK_NONE;
46 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
47 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
48
49 /* The only guaranteed locking primitive available on all Sparc
50  * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
51  * places the current byte at the effective address into dest_reg and
52  * places 0xff there afterwards.  Pretty lame locking primitive
53  * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
54  * instruction which is much better...
55  */
56
57 /* Used to make bitops atomic */
58 unsigned char bitops_spinlock = 0;
59
60 void __cpuinit smp_store_cpu_info(int id)
61 {
62         int cpu_node;
63
64         cpu_data(id).udelay_val = loops_per_jiffy;
65
66         cpu_find_by_mid(id, &cpu_node);
67         cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
68                                                      "clock-frequency", 0);
69         cpu_data(id).prom_node = cpu_node;
70         cpu_data(id).mid = cpu_get_hwmid(cpu_node);
71
72         /* this is required to tune the scheduler correctly */
73         /* is it possible to have CPUs with different cache sizes? */
74         if (id == boot_cpu_id) {
75                 int cache_line,cache_nlines;
76                 cache_line = 0x20;
77                 cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
78                 cache_nlines = 0x8000;
79                 cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
80                 max_cache_size = cache_line * cache_nlines;
81         }
82         if (cpu_data(id).mid < 0)
83                 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
84 }
85
86 void __init smp_cpus_done(unsigned int max_cpus)
87 {
88         extern void smp4m_smp_done(void);
89         extern void smp4d_smp_done(void);
90         unsigned long bogosum = 0;
91         int cpu, num;
92
93         for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++)
94                 if (cpu_online(cpu)) {
95                         num++;
96                         bogosum += cpu_data(cpu).udelay_val;
97                 }
98
99         printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
100                 num, bogosum/(500000/HZ),
101                 (bogosum/(5000/HZ))%100);
102
103         switch(sparc_cpu_model) {
104         case sun4:
105                 printk("SUN4\n");
106                 BUG();
107                 break;
108         case sun4c:
109                 printk("SUN4C\n");
110                 BUG();
111                 break;
112         case sun4m:
113                 smp4m_smp_done();
114                 break;
115         case sun4d:
116                 smp4d_smp_done();
117                 break;
118         case sun4e:
119                 printk("SUN4E\n");
120                 BUG();
121                 break;
122         case sun4u:
123                 printk("SUN4U\n");
124                 BUG();
125                 break;
126         default:
127                 printk("UNKNOWN!\n");
128                 BUG();
129                 break;
130         };
131 }
132
133 void cpu_panic(void)
134 {
135         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
136         panic("SMP bolixed\n");
137 }
138
139 struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
140
141 void smp_send_reschedule(int cpu)
142 {
143         /* See sparc64 */
144 }
145
146 void smp_send_stop(void)
147 {
148 }
149
150 void smp_flush_cache_all(void)
151 {
152         xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
153         local_flush_cache_all();
154 }
155
156 void smp_flush_tlb_all(void)
157 {
158         xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
159         local_flush_tlb_all();
160 }
161
162 void smp_flush_cache_mm(struct mm_struct *mm)
163 {
164         if(mm->context != NO_CONTEXT) {
165                 cpumask_t cpu_mask = mm->cpu_vm_mask;
166                 cpu_clear(smp_processor_id(), cpu_mask);
167                 if (!cpus_empty(cpu_mask))
168                         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
169                 local_flush_cache_mm(mm);
170         }
171 }
172
173 void smp_flush_tlb_mm(struct mm_struct *mm)
174 {
175         if(mm->context != NO_CONTEXT) {
176                 cpumask_t cpu_mask = mm->cpu_vm_mask;
177                 cpu_clear(smp_processor_id(), cpu_mask);
178                 if (!cpus_empty(cpu_mask)) {
179                         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
180                         if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
181                                 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
182                 }
183                 local_flush_tlb_mm(mm);
184         }
185 }
186
187 void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
188                            unsigned long end)
189 {
190         struct mm_struct *mm = vma->vm_mm;
191
192         if (mm->context != NO_CONTEXT) {
193                 cpumask_t cpu_mask = mm->cpu_vm_mask;
194                 cpu_clear(smp_processor_id(), cpu_mask);
195                 if (!cpus_empty(cpu_mask))
196                         xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
197                 local_flush_cache_range(vma, start, end);
198         }
199 }
200
201 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
202                          unsigned long end)
203 {
204         struct mm_struct *mm = vma->vm_mm;
205
206         if (mm->context != NO_CONTEXT) {
207                 cpumask_t cpu_mask = mm->cpu_vm_mask;
208                 cpu_clear(smp_processor_id(), cpu_mask);
209                 if (!cpus_empty(cpu_mask))
210                         xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
211                 local_flush_tlb_range(vma, start, end);
212         }
213 }
214
215 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
216 {
217         struct mm_struct *mm = vma->vm_mm;
218
219         if(mm->context != NO_CONTEXT) {
220                 cpumask_t cpu_mask = mm->cpu_vm_mask;
221                 cpu_clear(smp_processor_id(), cpu_mask);
222                 if (!cpus_empty(cpu_mask))
223                         xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
224                 local_flush_cache_page(vma, page);
225         }
226 }
227
228 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
229 {
230         struct mm_struct *mm = vma->vm_mm;
231
232         if(mm->context != NO_CONTEXT) {
233                 cpumask_t cpu_mask = mm->cpu_vm_mask;
234                 cpu_clear(smp_processor_id(), cpu_mask);
235                 if (!cpus_empty(cpu_mask))
236                         xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
237                 local_flush_tlb_page(vma, page);
238         }
239 }
240
241 void smp_reschedule_irq(void)
242 {
243         set_need_resched();
244 }
245
246 void smp_flush_page_to_ram(unsigned long page)
247 {
248         /* Current theory is that those who call this are the one's
249          * who have just dirtied their cache with the pages contents
250          * in kernel space, therefore we only run this on local cpu.
251          *
252          * XXX This experiment failed, research further... -DaveM
253          */
254 #if 1
255         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
256 #endif
257         local_flush_page_to_ram(page);
258 }
259
260 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
261 {
262         cpumask_t cpu_mask = mm->cpu_vm_mask;
263         cpu_clear(smp_processor_id(), cpu_mask);
264         if (!cpus_empty(cpu_mask))
265                 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
266         local_flush_sig_insns(mm, insn_addr);
267 }
268
269 extern unsigned int lvl14_resolution;
270
271 /* /proc/profile writes can call this, don't __init it please. */
272 static DEFINE_SPINLOCK(prof_setup_lock);
273
274 int setup_profiling_timer(unsigned int multiplier)
275 {
276         int i;
277         unsigned long flags;
278
279         /* Prevent level14 ticker IRQ flooding. */
280         if((!multiplier) || (lvl14_resolution / multiplier) < 500)
281                 return -EINVAL;
282
283         spin_lock_irqsave(&prof_setup_lock, flags);
284         for_each_possible_cpu(i) {
285                 load_profile_irq(i, lvl14_resolution / multiplier);
286                 prof_multiplier(i) = multiplier;
287         }
288         spin_unlock_irqrestore(&prof_setup_lock, flags);
289
290         return 0;
291 }
292
293 void __init smp_prepare_cpus(unsigned int max_cpus)
294 {
295         extern void __init smp4m_boot_cpus(void);
296         extern void __init smp4d_boot_cpus(void);
297         int i, cpuid, extra;
298
299         printk("Entering SMP Mode...\n");
300
301         extra = 0;
302         for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
303                 if (cpuid >= NR_CPUS)
304                         extra++;
305         }
306         /* i = number of cpus */
307         if (extra && max_cpus > i - extra)
308                 printk("Warning: NR_CPUS is too low to start all cpus\n");
309
310         smp_store_cpu_info(boot_cpu_id);
311
312         switch(sparc_cpu_model) {
313         case sun4:
314                 printk("SUN4\n");
315                 BUG();
316                 break;
317         case sun4c:
318                 printk("SUN4C\n");
319                 BUG();
320                 break;
321         case sun4m:
322                 smp4m_boot_cpus();
323                 break;
324         case sun4d:
325                 smp4d_boot_cpus();
326                 break;
327         case sun4e:
328                 printk("SUN4E\n");
329                 BUG();
330                 break;
331         case sun4u:
332                 printk("SUN4U\n");
333                 BUG();
334                 break;
335         default:
336                 printk("UNKNOWN!\n");
337                 BUG();
338                 break;
339         };
340 }
341
342 /* Set this up early so that things like the scheduler can init
343  * properly.  We use the same cpu mask for both the present and
344  * possible cpu map.
345  */
346 void __init smp_setup_cpu_possible_map(void)
347 {
348         int instance, mid;
349
350         instance = 0;
351         while (!cpu_find_by_instance(instance, NULL, &mid)) {
352                 if (mid < NR_CPUS) {
353                         cpu_set(mid, phys_cpu_present_map);
354                         cpu_set(mid, cpu_present_map);
355                 }
356                 instance++;
357         }
358 }
359
360 void __init smp_prepare_boot_cpu(void)
361 {
362         int cpuid = hard_smp_processor_id();
363
364         if (cpuid >= NR_CPUS) {
365                 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
366                 prom_halt();
367         }
368         if (cpuid != 0)
369                 printk("boot cpu id != 0, this could work but is untested\n");
370
371         current_thread_info()->cpu = cpuid;
372         cpu_set(cpuid, cpu_online_map);
373         cpu_set(cpuid, phys_cpu_present_map);
374 }
375
376 int __cpuinit __cpu_up(unsigned int cpu)
377 {
378         extern int __cpuinit smp4m_boot_one_cpu(int);
379         extern int __cpuinit smp4d_boot_one_cpu(int);
380         int ret=0;
381
382         switch(sparc_cpu_model) {
383         case sun4:
384                 printk("SUN4\n");
385                 BUG();
386                 break;
387         case sun4c:
388                 printk("SUN4C\n");
389                 BUG();
390                 break;
391         case sun4m:
392                 ret = smp4m_boot_one_cpu(cpu);
393                 break;
394         case sun4d:
395                 ret = smp4d_boot_one_cpu(cpu);
396                 break;
397         case sun4e:
398                 printk("SUN4E\n");
399                 BUG();
400                 break;
401         case sun4u:
402                 printk("SUN4U\n");
403                 BUG();
404                 break;
405         default:
406                 printk("UNKNOWN!\n");
407                 BUG();
408                 break;
409         };
410
411         if (!ret) {
412                 cpu_set(cpu, smp_commenced_mask);
413                 while (!cpu_online(cpu))
414                         mb();
415         }
416         return ret;
417 }
418
419 void smp_bogo(struct seq_file *m)
420 {
421         int i;
422         
423         for_each_online_cpu(i) {
424                 seq_printf(m,
425                            "Cpu%dBogo\t: %lu.%02lu\n",
426                            i,
427                            cpu_data(i).udelay_val/(500000/HZ),
428                            (cpu_data(i).udelay_val/(5000/HZ))%100);
429         }
430 }
431
432 void smp_info(struct seq_file *m)
433 {
434         int i;
435
436         seq_printf(m, "State:\n");
437         for_each_online_cpu(i)
438                 seq_printf(m, "CPU%d\t\t: online\n", i);
439 }