1 /* smp.c: Sparc SMP support.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/delay.h>
24 #include <asm/ptrace.h>
25 #include <linux/atomic.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cpudata.h>
39 volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
41 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
43 /* The only guaranteed locking primitive available on all Sparc
44 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
45 * places the current byte at the effective address into dest_reg and
46 * places 0xff there afterwards. Pretty lame locking primitive
47 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
48 * instruction which is much better...
51 void __cpuinit smp_store_cpu_info(int id)
56 cpu_data(id).udelay_val = loops_per_jiffy;
58 cpu_find_by_mid(id, &cpu_node);
59 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
60 "clock-frequency", 0);
61 cpu_data(id).prom_node = cpu_node;
62 mid = cpu_get_hwmid(cpu_node);
65 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
68 cpu_data(id).mid = mid;
71 void __init smp_cpus_done(unsigned int max_cpus)
73 extern void smp4m_smp_done(void);
74 extern void smp4d_smp_done(void);
75 unsigned long bogosum = 0;
78 for_each_online_cpu(cpu) {
80 bogosum += cpu_data(cpu).udelay_val;
83 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
84 num, bogosum/(500000/HZ),
85 (bogosum/(5000/HZ))%100);
87 switch(sparc_cpu_model) {
114 printk("UNKNOWN!\n");
122 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
123 panic("SMP bolixed\n");
126 struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
128 void smp_send_reschedule(int cpu)
131 * CPU model dependent way of implementing IPI generation targeting
132 * a single CPU. The trap handler needs only to do trap entry/return
135 BTFIXUP_CALL(smp_ipi_resched)(cpu);
138 void smp_send_stop(void)
142 void arch_send_call_function_single_ipi(int cpu)
144 /* trigger one IPI single call on one CPU */
145 BTFIXUP_CALL(smp_ipi_single)(cpu);
148 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
152 /* trigger IPI mask call on each CPU */
153 for_each_cpu(cpu, mask)
154 BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
157 void smp_resched_interrupt(void)
161 local_cpu_data().irq_resched_count++;
163 /* re-schedule routine called by interrupt return code. */
166 void smp_call_function_single_interrupt(void)
169 generic_smp_call_function_single_interrupt();
170 local_cpu_data().irq_call_count++;
174 void smp_call_function_interrupt(void)
177 generic_smp_call_function_interrupt();
178 local_cpu_data().irq_call_count++;
182 void smp_flush_cache_all(void)
184 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
185 local_flush_cache_all();
188 void smp_flush_tlb_all(void)
190 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
191 local_flush_tlb_all();
194 void smp_flush_cache_mm(struct mm_struct *mm)
196 if(mm->context != NO_CONTEXT) {
198 cpumask_copy(&cpu_mask, mm_cpumask(mm));
199 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
200 if (!cpumask_empty(&cpu_mask))
201 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
202 local_flush_cache_mm(mm);
206 void smp_flush_tlb_mm(struct mm_struct *mm)
208 if(mm->context != NO_CONTEXT) {
210 cpumask_copy(&cpu_mask, mm_cpumask(mm));
211 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
212 if (!cpumask_empty(&cpu_mask)) {
213 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
214 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
215 cpumask_copy(mm_cpumask(mm),
216 cpumask_of(smp_processor_id()));
218 local_flush_tlb_mm(mm);
222 void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
225 struct mm_struct *mm = vma->vm_mm;
227 if (mm->context != NO_CONTEXT) {
229 cpumask_copy(&cpu_mask, mm_cpumask(mm));
230 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
231 if (!cpumask_empty(&cpu_mask))
232 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
233 local_flush_cache_range(vma, start, end);
237 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
240 struct mm_struct *mm = vma->vm_mm;
242 if (mm->context != NO_CONTEXT) {
244 cpumask_copy(&cpu_mask, mm_cpumask(mm));
245 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
246 if (!cpumask_empty(&cpu_mask))
247 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
248 local_flush_tlb_range(vma, start, end);
252 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
254 struct mm_struct *mm = vma->vm_mm;
256 if(mm->context != NO_CONTEXT) {
258 cpumask_copy(&cpu_mask, mm_cpumask(mm));
259 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
260 if (!cpumask_empty(&cpu_mask))
261 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
262 local_flush_cache_page(vma, page);
266 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
268 struct mm_struct *mm = vma->vm_mm;
270 if(mm->context != NO_CONTEXT) {
272 cpumask_copy(&cpu_mask, mm_cpumask(mm));
273 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
274 if (!cpumask_empty(&cpu_mask))
275 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
276 local_flush_tlb_page(vma, page);
280 void smp_flush_page_to_ram(unsigned long page)
282 /* Current theory is that those who call this are the one's
283 * who have just dirtied their cache with the pages contents
284 * in kernel space, therefore we only run this on local cpu.
286 * XXX This experiment failed, research further... -DaveM
289 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
291 local_flush_page_to_ram(page);
294 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
297 cpumask_copy(&cpu_mask, mm_cpumask(mm));
298 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
299 if (!cpumask_empty(&cpu_mask))
300 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
301 local_flush_sig_insns(mm, insn_addr);
304 int setup_profiling_timer(unsigned int multiplier)
309 void __init smp_prepare_cpus(unsigned int max_cpus)
311 extern void __init smp4m_boot_cpus(void);
312 extern void __init smp4d_boot_cpus(void);
315 printk("Entering SMP Mode...\n");
318 for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
319 if (cpuid >= NR_CPUS)
322 /* i = number of cpus */
323 if (extra && max_cpus > i - extra)
324 printk("Warning: NR_CPUS is too low to start all cpus\n");
326 smp_store_cpu_info(boot_cpu_id);
328 switch(sparc_cpu_model) {
355 printk("UNKNOWN!\n");
361 /* Set this up early so that things like the scheduler can init
362 * properly. We use the same cpu mask for both the present and
365 void __init smp_setup_cpu_possible_map(void)
370 while (!cpu_find_by_instance(instance, NULL, &mid)) {
372 set_cpu_possible(mid, true);
373 set_cpu_present(mid, true);
379 void __init smp_prepare_boot_cpu(void)
381 int cpuid = hard_smp_processor_id();
383 if (cpuid >= NR_CPUS) {
384 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
388 printk("boot cpu id != 0, this could work but is untested\n");
390 current_thread_info()->cpu = cpuid;
391 set_cpu_online(cpuid, true);
392 set_cpu_possible(cpuid, true);
395 int __cpuinit __cpu_up(unsigned int cpu)
397 extern int __cpuinit smp4m_boot_one_cpu(int);
398 extern int __cpuinit smp4d_boot_one_cpu(int);
401 switch(sparc_cpu_model) {
411 ret = smp4m_boot_one_cpu(cpu);
414 ret = smp4d_boot_one_cpu(cpu);
417 ret = leon_boot_one_cpu(cpu);
428 printk("UNKNOWN!\n");
434 cpumask_set_cpu(cpu, &smp_commenced_mask);
435 while (!cpu_online(cpu))
441 void smp_bogo(struct seq_file *m)
445 for_each_online_cpu(i) {
447 "Cpu%dBogo\t: %lu.%02lu\n",
449 cpu_data(i).udelay_val/(500000/HZ),
450 (cpu_data(i).udelay_val/(5000/HZ))%100);
454 void smp_info(struct seq_file *m)
458 seq_printf(m, "State:\n");
459 for_each_online_cpu(i)
460 seq_printf(m, "CPU%d\t\t: online\n", i);