1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
33 #include <asm/irq_regs.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
41 #include <asm/sections.h>
43 #include <asm/mdesc.h>
46 extern void calibrate_delay(void);
48 int sparc64_multi_core __read_mostly;
50 /* Please don't make this stuff initdata!!! --DaveM */
51 unsigned char boot_cpu_id;
53 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
58 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
60 EXPORT_SYMBOL(cpu_possible_map);
61 EXPORT_SYMBOL(cpu_online_map);
62 EXPORT_SYMBOL(cpu_sibling_map);
63 EXPORT_SYMBOL(cpu_core_map);
65 static cpumask_t smp_commenced_mask;
66 static cpumask_t cpu_callout_map;
68 void smp_info(struct seq_file *m)
72 seq_printf(m, "State:\n");
73 for_each_online_cpu(i)
74 seq_printf(m, "CPU%d:\t\tonline\n", i);
77 void smp_bogo(struct seq_file *m)
81 for_each_online_cpu(i)
83 "Cpu%dBogo\t: %lu.%02lu\n"
84 "Cpu%dClkTck\t: %016lx\n",
85 i, cpu_data(i).udelay_val / (500000/HZ),
86 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
87 i, cpu_data(i).clock_tick);
90 extern void setup_sparc64_timer(void);
92 static volatile unsigned long callin_flag = 0;
94 void __devinit smp_callin(void)
96 int cpuid = hard_smp_processor_id();
97 struct trap_per_cpu *tb = &trap_block[cpuid];;
99 __local_per_cpu_offset = __per_cpu_offset(cpuid);
101 if (tlb_type == hypervisor)
102 sun4v_ktsb_register();
106 setup_sparc64_timer();
108 if (cheetah_pcache_forced_on)
109 cheetah_enable_pcache();
114 cpu_data(cpuid).udelay_val = loops_per_jiffy;
116 __asm__ __volatile__("membar #Sync\n\t"
117 "flush %%g6" : : : "memory");
119 /* Clear this or we will die instantly when we
120 * schedule back to this idler...
122 current_thread_info()->new_child = 0;
124 /* Attach to the address space of init_task. */
125 atomic_inc(&init_mm.mm_count);
126 current->active_mm = &init_mm;
133 while (!cpu_isset(cpuid, smp_commenced_mask))
136 cpu_set(cpuid, cpu_online_map);
138 /* idle thread is expected to have preempt disabled */
144 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
145 panic("SMP bolixed\n");
148 /* This tick register synchronization scheme is taken entirely from
149 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
151 * The only change I've made is to rework it so that the master
152 * initiates the synchonization instead of the slave. -DaveM
156 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
158 #define NUM_ROUNDS 64 /* magic value */
159 #define NUM_ITERS 5 /* likewise */
161 static DEFINE_SPINLOCK(itc_sync_lock);
162 static unsigned long go[SLAVE + 1];
164 #define DEBUG_TICK_SYNC 0
166 static inline long get_delta (long *rt, long *master)
168 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
169 unsigned long tcenter, t0, t1, tm;
172 for (i = 0; i < NUM_ITERS; i++) {
173 t0 = tick_ops->get_tick();
176 while (!(tm = go[SLAVE]))
180 t1 = tick_ops->get_tick();
182 if (t1 - t0 < best_t1 - best_t0)
183 best_t0 = t0, best_t1 = t1, best_tm = tm;
186 *rt = best_t1 - best_t0;
187 *master = best_tm - best_t0;
189 /* average best_t0 and best_t1 without overflow: */
190 tcenter = (best_t0/2 + best_t1/2);
191 if (best_t0 % 2 + best_t1 % 2 == 2)
193 return tcenter - best_tm;
196 void smp_synchronize_tick_client(void)
198 long i, delta, adj, adjust_latency = 0, done = 0;
199 unsigned long flags, rt, master_time_stamp, bound;
202 long rt; /* roundtrip time */
203 long master; /* master's timestamp */
204 long diff; /* difference between midpoint and master's timestamp */
205 long lat; /* estimate of itc adjustment latency */
214 local_irq_save(flags);
216 for (i = 0; i < NUM_ROUNDS; i++) {
217 delta = get_delta(&rt, &master_time_stamp);
219 done = 1; /* let's lock on to this... */
225 adjust_latency += -delta;
226 adj = -delta + adjust_latency/4;
230 tick_ops->add_tick(adj);
234 t[i].master = master_time_stamp;
236 t[i].lat = adjust_latency/4;
240 local_irq_restore(flags);
243 for (i = 0; i < NUM_ROUNDS; i++)
244 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
245 t[i].rt, t[i].master, t[i].diff, t[i].lat);
248 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
249 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
252 static void smp_start_sync_tick_client(int cpu);
254 static void smp_synchronize_one_tick(int cpu)
256 unsigned long flags, i;
260 smp_start_sync_tick_client(cpu);
262 /* wait for client to be ready */
266 /* now let the client proceed into his loop */
270 spin_lock_irqsave(&itc_sync_lock, flags);
272 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
277 go[SLAVE] = tick_ops->get_tick();
281 spin_unlock_irqrestore(&itc_sync_lock, flags);
284 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
285 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
287 extern unsigned long sparc64_ttable_tl0;
288 extern unsigned long kern_locked_tte_data;
289 extern int bigkernel;
290 struct hvtramp_descr *hdesc;
291 unsigned long trampoline_ra;
292 struct trap_per_cpu *tb;
293 u64 tte_vaddr, tte_data;
294 unsigned long hv_err;
296 hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
298 printk(KERN_ERR PFX "ldom_startcpu_cpuid: Cannot allocate "
304 hdesc->num_mappings = (bigkernel ? 2 : 1);
306 tb = &trap_block[cpu];
309 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
310 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
312 hdesc->thread_reg = thread_reg;
314 tte_vaddr = (unsigned long) KERNBASE;
315 tte_data = kern_locked_tte_data;
317 hdesc->maps[0].vaddr = tte_vaddr;
318 hdesc->maps[0].tte = tte_data;
320 tte_vaddr += 0x400000;
321 tte_data += 0x400000;
322 hdesc->maps[1].vaddr = tte_vaddr;
323 hdesc->maps[1].tte = tte_data;
326 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
328 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
329 kimage_addr_to_ra(&sparc64_ttable_tl0),
334 extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
336 extern unsigned long sparc64_cpu_startup;
338 /* The OBP cpu startup callback truncates the 3rd arg cookie to
339 * 32-bits (I think) so to be safe we have it read the pointer
340 * contained here so we work on >4GB machines. -DaveM
342 static struct thread_info *cpu_new_thread = NULL;
344 static int __devinit smp_boot_one_cpu(unsigned int cpu)
346 unsigned long entry =
347 (unsigned long)(&sparc64_cpu_startup);
348 unsigned long cookie =
349 (unsigned long)(&cpu_new_thread);
350 struct task_struct *p;
355 cpu_new_thread = task_thread_info(p);
356 cpu_set(cpu, cpu_callout_map);
358 if (tlb_type == hypervisor) {
359 /* Alloc the mondo queues, cpu will load them. */
360 sun4v_init_mondo_queues(0, cpu, 1, 0);
362 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
363 if (ldom_domaining_enabled)
364 ldom_startcpu_cpuid(cpu,
365 (unsigned long) cpu_new_thread);
368 prom_startcpu_cpuid(cpu, entry, cookie);
370 struct device_node *dp = of_find_node_by_cpuid(cpu);
372 prom_startcpu(dp->node, entry, cookie);
375 for (timeout = 0; timeout < 50000; timeout++) {
384 printk("Processor %d is stuck.\n", cpu);
385 cpu_clear(cpu, cpu_callout_map);
388 cpu_new_thread = NULL;
393 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
398 if (this_is_starfire) {
399 /* map to real upaid */
400 cpu = (((cpu & 0x3c) << 1) |
401 ((cpu & 0x40) >> 4) |
405 target = (cpu << 14) | 0x70;
407 /* Ok, this is the real Spitfire Errata #54.
408 * One must read back from a UDB internal register
409 * after writes to the UDB interrupt dispatch, but
410 * before the membar Sync for that write.
411 * So we use the high UDB control register (ASI 0x7f,
412 * ADDR 0x20) for the dummy read. -DaveM
415 __asm__ __volatile__(
416 "wrpr %1, %2, %%pstate\n\t"
417 "stxa %4, [%0] %3\n\t"
418 "stxa %5, [%0+%8] %3\n\t"
420 "stxa %6, [%0+%8] %3\n\t"
422 "stxa %%g0, [%7] %3\n\t"
425 "ldxa [%%g1] 0x7f, %%g0\n\t"
428 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
429 "r" (data0), "r" (data1), "r" (data2), "r" (target),
430 "r" (0x10), "0" (tmp)
433 /* NOTE: PSTATE_IE is still clear. */
436 __asm__ __volatile__("ldxa [%%g0] %1, %0"
438 : "i" (ASI_INTR_DISPATCH_STAT));
440 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
447 } while (result & 0x1);
448 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
451 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
452 smp_processor_id(), result);
459 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
464 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
465 for_each_cpu_mask(i, mask)
466 spitfire_xcall_helper(data0, data1, data2, pstate, i);
469 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
470 * packet, but we have no use for that. However we do take advantage of
471 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
473 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
476 int nack_busy_id, is_jbus, need_more;
478 if (cpus_empty(mask))
481 /* Unfortunately, someone at Sun had the brilliant idea to make the
482 * busy/nack fields hard-coded by ITID number for this Ultra-III
483 * derivative processor.
485 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
486 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
487 (ver >> 32) == __SERRANO_ID);
489 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
493 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
494 : : "r" (pstate), "i" (PSTATE_IE));
496 /* Setup the dispatch data registers. */
497 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
498 "stxa %1, [%4] %6\n\t"
499 "stxa %2, [%5] %6\n\t"
502 : "r" (data0), "r" (data1), "r" (data2),
503 "r" (0x40), "r" (0x50), "r" (0x60),
510 for_each_cpu_mask(i, mask) {
511 u64 target = (i << 14) | 0x70;
514 target |= (nack_busy_id << 24);
515 __asm__ __volatile__(
516 "stxa %%g0, [%0] %1\n\t"
519 : "r" (target), "i" (ASI_INTR_W));
521 if (nack_busy_id == 32) {
528 /* Now, poll for completion. */
533 stuck = 100000 * nack_busy_id;
535 __asm__ __volatile__("ldxa [%%g0] %1, %0"
536 : "=r" (dispatch_stat)
537 : "i" (ASI_INTR_DISPATCH_STAT));
538 if (dispatch_stat == 0UL) {
539 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
541 if (unlikely(need_more)) {
543 for_each_cpu_mask(i, mask) {
555 } while (dispatch_stat & 0x5555555555555555UL);
557 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
560 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
561 /* Busy bits will not clear, continue instead
562 * of freezing up on this cpu.
564 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
565 smp_processor_id(), dispatch_stat);
567 int i, this_busy_nack = 0;
569 /* Delay some random time with interrupts enabled
570 * to prevent deadlock.
572 udelay(2 * nack_busy_id);
574 /* Clear out the mask bits for cpus which did not
577 for_each_cpu_mask(i, mask) {
581 check_mask = (0x2UL << (2*i));
583 check_mask = (0x2UL <<
585 if ((dispatch_stat & check_mask) == 0)
588 if (this_busy_nack == 64)
597 /* Multi-cpu list version. */
598 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
600 struct trap_per_cpu *tb;
603 cpumask_t error_mask;
604 unsigned long flags, status;
605 int cnt, retries, this_cpu, prev_sent, i;
607 if (cpus_empty(mask))
610 /* We have to do this whole thing with interrupts fully disabled.
611 * Otherwise if we send an xcall from interrupt context it will
612 * corrupt both our mondo block and cpu list state.
614 * One consequence of this is that we cannot use timeout mechanisms
615 * that depend upon interrupts being delivered locally. So, for
616 * example, we cannot sample jiffies and expect it to advance.
618 * Fortunately, udelay() uses %stick/%tick so we can use that.
620 local_irq_save(flags);
622 this_cpu = smp_processor_id();
623 tb = &trap_block[this_cpu];
625 mondo = __va(tb->cpu_mondo_block_pa);
631 cpu_list = __va(tb->cpu_list_pa);
633 /* Setup the initial cpu list. */
635 for_each_cpu_mask(i, mask)
638 cpus_clear(error_mask);
642 int forward_progress, n_sent;
644 status = sun4v_cpu_mondo_send(cnt,
646 tb->cpu_mondo_block_pa);
648 /* HV_EOK means all cpus received the xcall, we're done. */
649 if (likely(status == HV_EOK))
652 /* First, see if we made any forward progress.
654 * The hypervisor indicates successful sends by setting
655 * cpu list entries to the value 0xffff.
658 for (i = 0; i < cnt; i++) {
659 if (likely(cpu_list[i] == 0xffff))
663 forward_progress = 0;
664 if (n_sent > prev_sent)
665 forward_progress = 1;
669 /* If we get a HV_ECPUERROR, then one or more of the cpus
670 * in the list are in error state. Use the cpu_state()
671 * hypervisor call to find out which cpus are in error state.
673 if (unlikely(status == HV_ECPUERROR)) {
674 for (i = 0; i < cnt; i++) {
682 err = sun4v_cpu_state(cpu);
684 err == HV_CPU_STATE_ERROR) {
685 cpu_list[i] = 0xffff;
686 cpu_set(cpu, error_mask);
689 } else if (unlikely(status != HV_EWOULDBLOCK))
690 goto fatal_mondo_error;
692 /* Don't bother rewriting the CPU list, just leave the
693 * 0xffff and non-0xffff entries in there and the
694 * hypervisor will do the right thing.
696 * Only advance timeout state if we didn't make any
699 if (unlikely(!forward_progress)) {
700 if (unlikely(++retries > 10000))
701 goto fatal_mondo_timeout;
703 /* Delay a little bit to let other cpus catch up
704 * on their cpu mondo queue work.
710 local_irq_restore(flags);
712 if (unlikely(!cpus_empty(error_mask)))
713 goto fatal_mondo_cpu_error;
717 fatal_mondo_cpu_error:
718 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
719 "were in error state\n",
721 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
722 for_each_cpu_mask(i, error_mask)
728 local_irq_restore(flags);
729 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
730 " progress after %d retries.\n",
732 goto dump_cpu_list_and_out;
735 local_irq_restore(flags);
736 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
738 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
739 "mondo_block_pa(%lx)\n",
740 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
742 dump_cpu_list_and_out:
743 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
744 for (i = 0; i < cnt; i++)
745 printk("%u ", cpu_list[i]);
749 /* Send cross call to all processors mentioned in MASK
752 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
754 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
755 int this_cpu = get_cpu();
757 cpus_and(mask, mask, cpu_online_map);
758 cpu_clear(this_cpu, mask);
760 if (tlb_type == spitfire)
761 spitfire_xcall_deliver(data0, data1, data2, mask);
762 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
763 cheetah_xcall_deliver(data0, data1, data2, mask);
765 hypervisor_xcall_deliver(data0, data1, data2, mask);
766 /* NOTE: Caller runs local copy on master. */
771 extern unsigned long xcall_sync_tick;
773 static void smp_start_sync_tick_client(int cpu)
775 cpumask_t mask = cpumask_of_cpu(cpu);
777 smp_cross_call_masked(&xcall_sync_tick,
781 /* Send cross call to all processors except self. */
782 #define smp_cross_call(func, ctx, data1, data2) \
783 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
785 struct call_data_struct {
786 void (*func) (void *info);
792 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
793 static struct call_data_struct *call_data;
795 extern unsigned long xcall_call_function;
798 * smp_call_function(): Run a function on all other CPUs.
799 * @func: The function to run. This must be fast and non-blocking.
800 * @info: An arbitrary pointer to pass to the function.
801 * @nonatomic: currently unused.
802 * @wait: If true, wait (atomically) until function has completed on other CPUs.
804 * Returns 0 on success, else a negative status code. Does not return until
805 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
807 * You must not call this function with disabled interrupts or from a
808 * hardware interrupt handler or from a bottom half handler.
810 static int smp_call_function_mask(void (*func)(void *info), void *info,
811 int nonatomic, int wait, cpumask_t mask)
813 struct call_data_struct data;
816 /* Can deadlock when called with interrupts disabled */
817 WARN_ON(irqs_disabled());
821 atomic_set(&data.finished, 0);
824 spin_lock(&call_lock);
826 cpu_clear(smp_processor_id(), mask);
827 cpus = cpus_weight(mask);
834 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
836 /* Wait for response */
837 while (atomic_read(&data.finished) != cpus)
841 spin_unlock(&call_lock);
846 int smp_call_function(void (*func)(void *info), void *info,
847 int nonatomic, int wait)
849 return smp_call_function_mask(func, info, nonatomic, wait,
853 void smp_call_function_client(int irq, struct pt_regs *regs)
855 void (*func) (void *info) = call_data->func;
856 void *info = call_data->info;
858 clear_softint(1 << irq);
859 if (call_data->wait) {
860 /* let initiator proceed only after completion */
862 atomic_inc(&call_data->finished);
864 /* let initiator proceed after getting data */
865 atomic_inc(&call_data->finished);
870 static void tsb_sync(void *info)
872 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
873 struct mm_struct *mm = info;
875 /* It is not valid to test "currrent->active_mm == mm" here.
877 * The value of "current" is not changed atomically with
878 * switch_mm(). But that's OK, we just need to check the
879 * current cpu's trap block PGD physical address.
881 if (tp->pgd_paddr == __pa(mm->pgd))
882 tsb_context_switch(mm);
885 void smp_tsb_sync(struct mm_struct *mm)
887 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
890 extern unsigned long xcall_flush_tlb_mm;
891 extern unsigned long xcall_flush_tlb_pending;
892 extern unsigned long xcall_flush_tlb_kernel_range;
893 extern unsigned long xcall_report_regs;
894 extern unsigned long xcall_receive_signal;
895 extern unsigned long xcall_new_mmu_context_version;
897 #ifdef DCACHE_ALIASING_POSSIBLE
898 extern unsigned long xcall_flush_dcache_page_cheetah;
900 extern unsigned long xcall_flush_dcache_page_spitfire;
902 #ifdef CONFIG_DEBUG_DCFLUSH
903 extern atomic_t dcpage_flushes;
904 extern atomic_t dcpage_flushes_xcall;
907 static __inline__ void __local_flush_dcache_page(struct page *page)
909 #ifdef DCACHE_ALIASING_POSSIBLE
910 __flush_dcache_page(page_address(page),
911 ((tlb_type == spitfire) &&
912 page_mapping(page) != NULL));
914 if (page_mapping(page) != NULL &&
915 tlb_type == spitfire)
916 __flush_icache_page(__pa(page_address(page)));
920 void smp_flush_dcache_page_impl(struct page *page, int cpu)
922 cpumask_t mask = cpumask_of_cpu(cpu);
925 if (tlb_type == hypervisor)
928 #ifdef CONFIG_DEBUG_DCFLUSH
929 atomic_inc(&dcpage_flushes);
932 this_cpu = get_cpu();
934 if (cpu == this_cpu) {
935 __local_flush_dcache_page(page);
936 } else if (cpu_online(cpu)) {
937 void *pg_addr = page_address(page);
940 if (tlb_type == spitfire) {
942 ((u64)&xcall_flush_dcache_page_spitfire);
943 if (page_mapping(page) != NULL)
944 data0 |= ((u64)1 << 32);
945 spitfire_xcall_deliver(data0,
949 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
950 #ifdef DCACHE_ALIASING_POSSIBLE
952 ((u64)&xcall_flush_dcache_page_cheetah);
953 cheetah_xcall_deliver(data0,
958 #ifdef CONFIG_DEBUG_DCFLUSH
959 atomic_inc(&dcpage_flushes_xcall);
966 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
968 void *pg_addr = page_address(page);
969 cpumask_t mask = cpu_online_map;
973 if (tlb_type == hypervisor)
976 this_cpu = get_cpu();
978 cpu_clear(this_cpu, mask);
980 #ifdef CONFIG_DEBUG_DCFLUSH
981 atomic_inc(&dcpage_flushes);
983 if (cpus_empty(mask))
985 if (tlb_type == spitfire) {
986 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
987 if (page_mapping(page) != NULL)
988 data0 |= ((u64)1 << 32);
989 spitfire_xcall_deliver(data0,
993 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
994 #ifdef DCACHE_ALIASING_POSSIBLE
995 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
996 cheetah_xcall_deliver(data0,
1001 #ifdef CONFIG_DEBUG_DCFLUSH
1002 atomic_inc(&dcpage_flushes_xcall);
1005 __local_flush_dcache_page(page);
1010 static void __smp_receive_signal_mask(cpumask_t mask)
1012 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1015 void smp_receive_signal(int cpu)
1017 cpumask_t mask = cpumask_of_cpu(cpu);
1019 if (cpu_online(cpu))
1020 __smp_receive_signal_mask(mask);
1023 void smp_receive_signal_client(int irq, struct pt_regs *regs)
1025 clear_softint(1 << irq);
1028 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1030 struct mm_struct *mm;
1031 unsigned long flags;
1033 clear_softint(1 << irq);
1035 /* See if we need to allocate a new TLB context because
1036 * the version of the one we are using is now out of date.
1038 mm = current->active_mm;
1039 if (unlikely(!mm || (mm == &init_mm)))
1042 spin_lock_irqsave(&mm->context.lock, flags);
1044 if (unlikely(!CTX_VALID(mm->context)))
1045 get_new_mmu_context(mm);
1047 spin_unlock_irqrestore(&mm->context.lock, flags);
1049 load_secondary_context(mm);
1050 __flush_tlb_mm(CTX_HWBITS(mm->context),
1054 void smp_new_mmu_context_version(void)
1056 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1059 void smp_report_regs(void)
1061 smp_cross_call(&xcall_report_regs, 0, 0, 0);
1064 /* We know that the window frames of the user have been flushed
1065 * to the stack before we get here because all callers of us
1066 * are flush_tlb_*() routines, and these run after flush_cache_*()
1067 * which performs the flushw.
1069 * The SMP TLB coherency scheme we use works as follows:
1071 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1072 * space has (potentially) executed on, this is the heuristic
1073 * we use to avoid doing cross calls.
1075 * Also, for flushing from kswapd and also for clones, we
1076 * use cpu_vm_mask as the list of cpus to make run the TLB.
1078 * 2) TLB context numbers are shared globally across all processors
1079 * in the system, this allows us to play several games to avoid
1082 * One invariant is that when a cpu switches to a process, and
1083 * that processes tsk->active_mm->cpu_vm_mask does not have the
1084 * current cpu's bit set, that tlb context is flushed locally.
1086 * If the address space is non-shared (ie. mm->count == 1) we avoid
1087 * cross calls when we want to flush the currently running process's
1088 * tlb state. This is done by clearing all cpu bits except the current
1089 * processor's in current->active_mm->cpu_vm_mask and performing the
1090 * flush locally only. This will force any subsequent cpus which run
1091 * this task to flush the context from the local tlb if the process
1092 * migrates to another cpu (again).
1094 * 3) For shared address spaces (threads) and swapping we bite the
1095 * bullet for most cases and perform the cross call (but only to
1096 * the cpus listed in cpu_vm_mask).
1098 * The performance gain from "optimizing" away the cross call for threads is
1099 * questionable (in theory the big win for threads is the massive sharing of
1100 * address space state across processors).
1103 /* This currently is only used by the hugetlb arch pre-fault
1104 * hook on UltraSPARC-III+ and later when changing the pagesize
1105 * bits of the context register for an address space.
1107 void smp_flush_tlb_mm(struct mm_struct *mm)
1109 u32 ctx = CTX_HWBITS(mm->context);
1110 int cpu = get_cpu();
1112 if (atomic_read(&mm->mm_users) == 1) {
1113 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1114 goto local_flush_and_out;
1117 smp_cross_call_masked(&xcall_flush_tlb_mm,
1121 local_flush_and_out:
1122 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1127 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1129 u32 ctx = CTX_HWBITS(mm->context);
1130 int cpu = get_cpu();
1132 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1133 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1135 smp_cross_call_masked(&xcall_flush_tlb_pending,
1136 ctx, nr, (unsigned long) vaddrs,
1139 __flush_tlb_pending(ctx, nr, vaddrs);
1144 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1147 end = PAGE_ALIGN(end);
1149 smp_cross_call(&xcall_flush_tlb_kernel_range,
1152 __flush_tlb_kernel_range(start, end);
1157 /* #define CAPTURE_DEBUG */
1158 extern unsigned long xcall_capture;
1160 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1161 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1162 static unsigned long penguins_are_doing_time;
1164 void smp_capture(void)
1166 int result = atomic_add_ret(1, &smp_capture_depth);
1169 int ncpus = num_online_cpus();
1171 #ifdef CAPTURE_DEBUG
1172 printk("CPU[%d]: Sending penguins to jail...",
1173 smp_processor_id());
1175 penguins_are_doing_time = 1;
1176 membar_storestore_loadstore();
1177 atomic_inc(&smp_capture_registry);
1178 smp_cross_call(&xcall_capture, 0, 0, 0);
1179 while (atomic_read(&smp_capture_registry) != ncpus)
1181 #ifdef CAPTURE_DEBUG
1187 void smp_release(void)
1189 if (atomic_dec_and_test(&smp_capture_depth)) {
1190 #ifdef CAPTURE_DEBUG
1191 printk("CPU[%d]: Giving pardon to "
1192 "imprisoned penguins\n",
1193 smp_processor_id());
1195 penguins_are_doing_time = 0;
1196 membar_storeload_storestore();
1197 atomic_dec(&smp_capture_registry);
1201 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1202 * can service tlb flush xcalls...
1204 extern void prom_world(int);
1206 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1208 clear_softint(1 << irq);
1212 __asm__ __volatile__("flushw");
1214 atomic_inc(&smp_capture_registry);
1215 membar_storeload_storestore();
1216 while (penguins_are_doing_time)
1218 atomic_dec(&smp_capture_registry);
1224 void __init smp_tick_init(void)
1226 boot_cpu_id = hard_smp_processor_id();
1229 /* /proc/profile writes can call this, don't __init it please. */
1230 int setup_profiling_timer(unsigned int multiplier)
1235 void __init smp_prepare_cpus(unsigned int max_cpus)
1237 cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
1240 void __devinit smp_prepare_boot_cpu(void)
1244 void __devinit smp_fill_in_sib_core_maps(void)
1248 for_each_possible_cpu(i) {
1251 if (cpu_data(i).core_id == 0) {
1252 cpu_set(i, cpu_core_map[i]);
1256 for_each_possible_cpu(j) {
1257 if (cpu_data(i).core_id ==
1258 cpu_data(j).core_id)
1259 cpu_set(j, cpu_core_map[i]);
1263 for_each_possible_cpu(i) {
1266 if (cpu_data(i).proc_id == -1) {
1267 cpu_set(i, cpu_sibling_map[i]);
1271 for_each_possible_cpu(j) {
1272 if (cpu_data(i).proc_id ==
1273 cpu_data(j).proc_id)
1274 cpu_set(j, cpu_sibling_map[i]);
1279 int __cpuinit __cpu_up(unsigned int cpu)
1281 int ret = smp_boot_one_cpu(cpu);
1284 cpu_set(cpu, smp_commenced_mask);
1285 while (!cpu_isset(cpu, cpu_online_map))
1287 if (!cpu_isset(cpu, cpu_online_map)) {
1290 /* On SUN4V, writes to %tick and %stick are
1293 if (tlb_type != hypervisor)
1294 smp_synchronize_one_tick(cpu);
1300 #ifdef CONFIG_HOTPLUG_CPU
1301 int __cpu_disable(void)
1303 printk(KERN_ERR "SMP: __cpu_disable() on cpu %d\n",
1304 smp_processor_id());
1308 void __cpu_die(unsigned int cpu)
1310 printk(KERN_ERR "SMP: __cpu_die(%u)\n", cpu);
1314 void __init smp_cpus_done(unsigned int max_cpus)
1316 unsigned long bogosum = 0;
1319 for_each_online_cpu(i)
1320 bogosum += cpu_data(i).udelay_val;
1321 printk("Total of %ld processors activated "
1322 "(%lu.%02lu BogoMIPS).\n",
1323 (long) num_online_cpus(),
1324 bogosum/(500000/HZ),
1325 (bogosum/(5000/HZ))%100);
1328 void smp_send_reschedule(int cpu)
1330 smp_receive_signal(cpu);
1333 /* This is a nop because we capture all other cpus
1334 * anyways when making the PROM active.
1336 void smp_send_stop(void)
1340 unsigned long __per_cpu_base __read_mostly;
1341 unsigned long __per_cpu_shift __read_mostly;
1343 EXPORT_SYMBOL(__per_cpu_base);
1344 EXPORT_SYMBOL(__per_cpu_shift);
1346 void __init real_setup_per_cpu_areas(void)
1348 unsigned long goal, size, i;
1351 /* Copy section for each CPU (we discard the original) */
1352 goal = PERCPU_ENOUGH_ROOM;
1354 __per_cpu_shift = PAGE_SHIFT;
1355 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1358 ptr = alloc_bootmem_pages(size * NR_CPUS);
1360 __per_cpu_base = ptr - __per_cpu_start;
1362 for (i = 0; i < NR_CPUS; i++, ptr += size)
1363 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1365 /* Setup %g5 for the boot cpu. */
1366 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());