1 /* smp.c: Sparc SMP support.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/delay.h>
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cpudata.h>
39 volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
40 unsigned char boot_cpu_id = 0;
41 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
43 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 /* The only guaranteed locking primitive available on all Sparc
46 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
47 * places the current byte at the effective address into dest_reg and
48 * places 0xff there afterwards. Pretty lame locking primitive
49 * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
50 * instruction which is much better...
53 void __cpuinit smp_store_cpu_info(int id)
58 cpu_data(id).udelay_val = loops_per_jiffy;
60 cpu_find_by_mid(id, &cpu_node);
61 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
62 "clock-frequency", 0);
63 cpu_data(id).prom_node = cpu_node;
64 mid = cpu_get_hwmid(cpu_node);
67 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
70 cpu_data(id).mid = mid;
73 void __init smp_cpus_done(unsigned int max_cpus)
75 extern void smp4m_smp_done(void);
76 extern void smp4d_smp_done(void);
77 unsigned long bogosum = 0;
80 for_each_online_cpu(cpu) {
82 bogosum += cpu_data(cpu).udelay_val;
85 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
86 num, bogosum/(500000/HZ),
87 (bogosum/(5000/HZ))%100);
89 switch(sparc_cpu_model) {
116 printk("UNKNOWN!\n");
124 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
125 panic("SMP bolixed\n");
128 struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
130 void smp_send_reschedule(int cpu)
135 void smp_send_stop(void)
139 void smp_flush_cache_all(void)
141 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
142 local_flush_cache_all();
145 void smp_flush_tlb_all(void)
147 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
148 local_flush_tlb_all();
151 void smp_flush_cache_mm(struct mm_struct *mm)
153 if(mm->context != NO_CONTEXT) {
154 cpumask_t cpu_mask = *mm_cpumask(mm);
155 cpu_clear(smp_processor_id(), cpu_mask);
156 if (!cpus_empty(cpu_mask))
157 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
158 local_flush_cache_mm(mm);
162 void smp_flush_tlb_mm(struct mm_struct *mm)
164 if(mm->context != NO_CONTEXT) {
165 cpumask_t cpu_mask = *mm_cpumask(mm);
166 cpu_clear(smp_processor_id(), cpu_mask);
167 if (!cpus_empty(cpu_mask)) {
168 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
169 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
170 cpumask_copy(mm_cpumask(mm),
171 cpumask_of(smp_processor_id()));
173 local_flush_tlb_mm(mm);
177 void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
180 struct mm_struct *mm = vma->vm_mm;
182 if (mm->context != NO_CONTEXT) {
183 cpumask_t cpu_mask = *mm_cpumask(mm);
184 cpu_clear(smp_processor_id(), cpu_mask);
185 if (!cpus_empty(cpu_mask))
186 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
187 local_flush_cache_range(vma, start, end);
191 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
194 struct mm_struct *mm = vma->vm_mm;
196 if (mm->context != NO_CONTEXT) {
197 cpumask_t cpu_mask = *mm_cpumask(mm);
198 cpu_clear(smp_processor_id(), cpu_mask);
199 if (!cpus_empty(cpu_mask))
200 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
201 local_flush_tlb_range(vma, start, end);
205 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
207 struct mm_struct *mm = vma->vm_mm;
209 if(mm->context != NO_CONTEXT) {
210 cpumask_t cpu_mask = *mm_cpumask(mm);
211 cpu_clear(smp_processor_id(), cpu_mask);
212 if (!cpus_empty(cpu_mask))
213 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
214 local_flush_cache_page(vma, page);
218 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
220 struct mm_struct *mm = vma->vm_mm;
222 if(mm->context != NO_CONTEXT) {
223 cpumask_t cpu_mask = *mm_cpumask(mm);
224 cpu_clear(smp_processor_id(), cpu_mask);
225 if (!cpus_empty(cpu_mask))
226 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
227 local_flush_tlb_page(vma, page);
231 void smp_reschedule_irq(void)
236 void smp_flush_page_to_ram(unsigned long page)
238 /* Current theory is that those who call this are the one's
239 * who have just dirtied their cache with the pages contents
240 * in kernel space, therefore we only run this on local cpu.
242 * XXX This experiment failed, research further... -DaveM
245 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
247 local_flush_page_to_ram(page);
250 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
252 cpumask_t cpu_mask = *mm_cpumask(mm);
253 cpu_clear(smp_processor_id(), cpu_mask);
254 if (!cpus_empty(cpu_mask))
255 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
256 local_flush_sig_insns(mm, insn_addr);
259 extern unsigned int lvl14_resolution;
261 /* /proc/profile writes can call this, don't __init it please. */
262 static DEFINE_SPINLOCK(prof_setup_lock);
264 int setup_profiling_timer(unsigned int multiplier)
269 /* Prevent level14 ticker IRQ flooding. */
270 if((!multiplier) || (lvl14_resolution / multiplier) < 500)
273 spin_lock_irqsave(&prof_setup_lock, flags);
274 for_each_possible_cpu(i) {
275 load_profile_irq(i, lvl14_resolution / multiplier);
276 prof_multiplier(i) = multiplier;
278 spin_unlock_irqrestore(&prof_setup_lock, flags);
283 void __init smp_prepare_cpus(unsigned int max_cpus)
285 extern void __init smp4m_boot_cpus(void);
286 extern void __init smp4d_boot_cpus(void);
289 printk("Entering SMP Mode...\n");
292 for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
293 if (cpuid >= NR_CPUS)
296 /* i = number of cpus */
297 if (extra && max_cpus > i - extra)
298 printk("Warning: NR_CPUS is too low to start all cpus\n");
300 smp_store_cpu_info(boot_cpu_id);
302 switch(sparc_cpu_model) {
329 printk("UNKNOWN!\n");
335 /* Set this up early so that things like the scheduler can init
336 * properly. We use the same cpu mask for both the present and
339 void __init smp_setup_cpu_possible_map(void)
344 while (!cpu_find_by_instance(instance, NULL, &mid)) {
346 set_cpu_possible(mid, true);
347 set_cpu_present(mid, true);
353 void __init smp_prepare_boot_cpu(void)
355 int cpuid = hard_smp_processor_id();
357 if (cpuid >= NR_CPUS) {
358 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
362 printk("boot cpu id != 0, this could work but is untested\n");
364 current_thread_info()->cpu = cpuid;
365 set_cpu_online(cpuid, true);
366 set_cpu_possible(cpuid, true);
369 int __cpuinit __cpu_up(unsigned int cpu)
371 extern int __cpuinit smp4m_boot_one_cpu(int);
372 extern int __cpuinit smp4d_boot_one_cpu(int);
375 switch(sparc_cpu_model) {
385 ret = smp4m_boot_one_cpu(cpu);
388 ret = smp4d_boot_one_cpu(cpu);
391 ret = leon_boot_one_cpu(cpu);
402 printk("UNKNOWN!\n");
408 cpu_set(cpu, smp_commenced_mask);
409 while (!cpu_online(cpu))
415 void smp_bogo(struct seq_file *m)
419 for_each_online_cpu(i) {
421 "Cpu%dBogo\t: %lu.%02lu\n",
423 cpu_data(i).udelay_val/(500000/HZ),
424 (cpu_data(i).udelay_val/(5000/HZ))%100);
428 void smp_info(struct seq_file *m)
432 seq_printf(m, "State:\n");
433 for_each_online_cpu(i)
434 seq_printf(m, "CPU%d\t\t: online\n", i);