[SPARC64]: Fix build regressions added by dr-cpu changes.
[pandora-kernel.git] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24
25 #include <asm/head.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
31
32 #include <asm/irq.h>
33 #include <asm/irq_regs.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
40 #include <asm/tlb.h>
41 #include <asm/sections.h>
42 #include <asm/prom.h>
43 #include <asm/mdesc.h>
44 #include <asm/ldc.h>
45
46 extern void calibrate_delay(void);
47
48 int sparc64_multi_core __read_mostly;
49
50 /* Please don't make this stuff initdata!!!  --DaveM */
51 unsigned char boot_cpu_id;
52
53 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
56         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
58         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
59
60 EXPORT_SYMBOL(cpu_possible_map);
61 EXPORT_SYMBOL(cpu_online_map);
62 EXPORT_SYMBOL(cpu_sibling_map);
63 EXPORT_SYMBOL(cpu_core_map);
64
65 static cpumask_t smp_commenced_mask;
66 static cpumask_t cpu_callout_map;
67
68 void smp_info(struct seq_file *m)
69 {
70         int i;
71         
72         seq_printf(m, "State:\n");
73         for_each_online_cpu(i)
74                 seq_printf(m, "CPU%d:\t\tonline\n", i);
75 }
76
77 void smp_bogo(struct seq_file *m)
78 {
79         int i;
80         
81         for_each_online_cpu(i)
82                 seq_printf(m,
83                            "Cpu%dBogo\t: %lu.%02lu\n"
84                            "Cpu%dClkTck\t: %016lx\n",
85                            i, cpu_data(i).udelay_val / (500000/HZ),
86                            (cpu_data(i).udelay_val / (5000/HZ)) % 100,
87                            i, cpu_data(i).clock_tick);
88 }
89
90 extern void setup_sparc64_timer(void);
91
92 static volatile unsigned long callin_flag = 0;
93
94 void __devinit smp_callin(void)
95 {
96         int cpuid = hard_smp_processor_id();
97         struct trap_per_cpu *tb = &trap_block[cpuid];;
98
99         __local_per_cpu_offset = __per_cpu_offset(cpuid);
100
101         if (tlb_type == hypervisor)
102                 sun4v_ktsb_register();
103
104         __flush_tlb_all();
105
106         setup_sparc64_timer();
107
108         if (cheetah_pcache_forced_on)
109                 cheetah_enable_pcache();
110
111         local_irq_enable();
112
113         calibrate_delay();
114         cpu_data(cpuid).udelay_val = loops_per_jiffy;
115         callin_flag = 1;
116         __asm__ __volatile__("membar #Sync\n\t"
117                              "flush  %%g6" : : : "memory");
118
119         /* Clear this or we will die instantly when we
120          * schedule back to this idler...
121          */
122         current_thread_info()->new_child = 0;
123
124         /* Attach to the address space of init_task. */
125         atomic_inc(&init_mm.mm_count);
126         current->active_mm = &init_mm;
127
128         if (tb->hdesc) {
129                 kfree(tb->hdesc);
130                 tb->hdesc = NULL;
131         }
132
133         while (!cpu_isset(cpuid, smp_commenced_mask))
134                 rmb();
135
136         cpu_set(cpuid, cpu_online_map);
137
138         /* idle thread is expected to have preempt disabled */
139         preempt_disable();
140 }
141
142 void cpu_panic(void)
143 {
144         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
145         panic("SMP bolixed\n");
146 }
147
148 /* This tick register synchronization scheme is taken entirely from
149  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
150  *
151  * The only change I've made is to rework it so that the master
152  * initiates the synchonization instead of the slave. -DaveM
153  */
154
155 #define MASTER  0
156 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
157
158 #define NUM_ROUNDS      64      /* magic value */
159 #define NUM_ITERS       5       /* likewise */
160
161 static DEFINE_SPINLOCK(itc_sync_lock);
162 static unsigned long go[SLAVE + 1];
163
164 #define DEBUG_TICK_SYNC 0
165
166 static inline long get_delta (long *rt, long *master)
167 {
168         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
169         unsigned long tcenter, t0, t1, tm;
170         unsigned long i;
171
172         for (i = 0; i < NUM_ITERS; i++) {
173                 t0 = tick_ops->get_tick();
174                 go[MASTER] = 1;
175                 membar_storeload();
176                 while (!(tm = go[SLAVE]))
177                         rmb();
178                 go[SLAVE] = 0;
179                 wmb();
180                 t1 = tick_ops->get_tick();
181
182                 if (t1 - t0 < best_t1 - best_t0)
183                         best_t0 = t0, best_t1 = t1, best_tm = tm;
184         }
185
186         *rt = best_t1 - best_t0;
187         *master = best_tm - best_t0;
188
189         /* average best_t0 and best_t1 without overflow: */
190         tcenter = (best_t0/2 + best_t1/2);
191         if (best_t0 % 2 + best_t1 % 2 == 2)
192                 tcenter++;
193         return tcenter - best_tm;
194 }
195
196 void smp_synchronize_tick_client(void)
197 {
198         long i, delta, adj, adjust_latency = 0, done = 0;
199         unsigned long flags, rt, master_time_stamp, bound;
200 #if DEBUG_TICK_SYNC
201         struct {
202                 long rt;        /* roundtrip time */
203                 long master;    /* master's timestamp */
204                 long diff;      /* difference between midpoint and master's timestamp */
205                 long lat;       /* estimate of itc adjustment latency */
206         } t[NUM_ROUNDS];
207 #endif
208
209         go[MASTER] = 1;
210
211         while (go[MASTER])
212                 rmb();
213
214         local_irq_save(flags);
215         {
216                 for (i = 0; i < NUM_ROUNDS; i++) {
217                         delta = get_delta(&rt, &master_time_stamp);
218                         if (delta == 0) {
219                                 done = 1;       /* let's lock on to this... */
220                                 bound = rt;
221                         }
222
223                         if (!done) {
224                                 if (i > 0) {
225                                         adjust_latency += -delta;
226                                         adj = -delta + adjust_latency/4;
227                                 } else
228                                         adj = -delta;
229
230                                 tick_ops->add_tick(adj);
231                         }
232 #if DEBUG_TICK_SYNC
233                         t[i].rt = rt;
234                         t[i].master = master_time_stamp;
235                         t[i].diff = delta;
236                         t[i].lat = adjust_latency/4;
237 #endif
238                 }
239         }
240         local_irq_restore(flags);
241
242 #if DEBUG_TICK_SYNC
243         for (i = 0; i < NUM_ROUNDS; i++)
244                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
245                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
246 #endif
247
248         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
249                "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
250 }
251
252 static void smp_start_sync_tick_client(int cpu);
253
254 static void smp_synchronize_one_tick(int cpu)
255 {
256         unsigned long flags, i;
257
258         go[MASTER] = 0;
259
260         smp_start_sync_tick_client(cpu);
261
262         /* wait for client to be ready */
263         while (!go[MASTER])
264                 rmb();
265
266         /* now let the client proceed into his loop */
267         go[MASTER] = 0;
268         membar_storeload();
269
270         spin_lock_irqsave(&itc_sync_lock, flags);
271         {
272                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
273                         while (!go[MASTER])
274                                 rmb();
275                         go[MASTER] = 0;
276                         wmb();
277                         go[SLAVE] = tick_ops->get_tick();
278                         membar_storeload();
279                 }
280         }
281         spin_unlock_irqrestore(&itc_sync_lock, flags);
282 }
283
284 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
285 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
286 {
287         extern unsigned long sparc64_ttable_tl0;
288         extern unsigned long kern_locked_tte_data;
289         extern int bigkernel;
290         struct hvtramp_descr *hdesc;
291         unsigned long trampoline_ra;
292         struct trap_per_cpu *tb;
293         u64 tte_vaddr, tte_data;
294         unsigned long hv_err;
295
296         hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
297         if (!hdesc) {
298                 printk(KERN_ERR PFX "ldom_startcpu_cpuid: Cannot allocate "
299                        "hvtramp_descr.\n");
300                 return;
301         }
302
303         hdesc->cpu = cpu;
304         hdesc->num_mappings = (bigkernel ? 2 : 1);
305
306         tb = &trap_block[cpu];
307         tb->hdesc = hdesc;
308
309         hdesc->fault_info_va = (unsigned long) &tb->fault_info;
310         hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
311
312         hdesc->thread_reg = thread_reg;
313
314         tte_vaddr = (unsigned long) KERNBASE;
315         tte_data = kern_locked_tte_data;
316
317         hdesc->maps[0].vaddr = tte_vaddr;
318         hdesc->maps[0].tte   = tte_data;
319         if (bigkernel) {
320                 tte_vaddr += 0x400000;
321                 tte_data  += 0x400000;
322                 hdesc->maps[1].vaddr = tte_vaddr;
323                 hdesc->maps[1].tte   = tte_data;
324         }
325
326         trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
327
328         hv_err = sun4v_cpu_start(cpu, trampoline_ra,
329                                  kimage_addr_to_ra(&sparc64_ttable_tl0),
330                                  __pa(hdesc));
331 }
332 #endif
333
334 extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
335
336 extern unsigned long sparc64_cpu_startup;
337
338 /* The OBP cpu startup callback truncates the 3rd arg cookie to
339  * 32-bits (I think) so to be safe we have it read the pointer
340  * contained here so we work on >4GB machines. -DaveM
341  */
342 static struct thread_info *cpu_new_thread = NULL;
343
344 static int __devinit smp_boot_one_cpu(unsigned int cpu)
345 {
346         unsigned long entry =
347                 (unsigned long)(&sparc64_cpu_startup);
348         unsigned long cookie =
349                 (unsigned long)(&cpu_new_thread);
350         struct task_struct *p;
351         int timeout, ret;
352
353         p = fork_idle(cpu);
354         callin_flag = 0;
355         cpu_new_thread = task_thread_info(p);
356         cpu_set(cpu, cpu_callout_map);
357
358         if (tlb_type == hypervisor) {
359                 /* Alloc the mondo queues, cpu will load them.  */
360                 sun4v_init_mondo_queues(0, cpu, 1, 0);
361
362 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
363                 if (ldom_domaining_enabled)
364                         ldom_startcpu_cpuid(cpu,
365                                             (unsigned long) cpu_new_thread);
366                 else
367 #endif
368                         prom_startcpu_cpuid(cpu, entry, cookie);
369         } else {
370                 struct device_node *dp = of_find_node_by_cpuid(cpu);
371
372                 prom_startcpu(dp->node, entry, cookie);
373         }
374
375         for (timeout = 0; timeout < 50000; timeout++) {
376                 if (callin_flag)
377                         break;
378                 udelay(100);
379         }
380
381         if (callin_flag) {
382                 ret = 0;
383         } else {
384                 printk("Processor %d is stuck.\n", cpu);
385                 cpu_clear(cpu, cpu_callout_map);
386                 ret = -ENODEV;
387         }
388         cpu_new_thread = NULL;
389
390         return ret;
391 }
392
393 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
394 {
395         u64 result, target;
396         int stuck, tmp;
397
398         if (this_is_starfire) {
399                 /* map to real upaid */
400                 cpu = (((cpu & 0x3c) << 1) |
401                         ((cpu & 0x40) >> 4) |
402                         (cpu & 0x3));
403         }
404
405         target = (cpu << 14) | 0x70;
406 again:
407         /* Ok, this is the real Spitfire Errata #54.
408          * One must read back from a UDB internal register
409          * after writes to the UDB interrupt dispatch, but
410          * before the membar Sync for that write.
411          * So we use the high UDB control register (ASI 0x7f,
412          * ADDR 0x20) for the dummy read. -DaveM
413          */
414         tmp = 0x40;
415         __asm__ __volatile__(
416         "wrpr   %1, %2, %%pstate\n\t"
417         "stxa   %4, [%0] %3\n\t"
418         "stxa   %5, [%0+%8] %3\n\t"
419         "add    %0, %8, %0\n\t"
420         "stxa   %6, [%0+%8] %3\n\t"
421         "membar #Sync\n\t"
422         "stxa   %%g0, [%7] %3\n\t"
423         "membar #Sync\n\t"
424         "mov    0x20, %%g1\n\t"
425         "ldxa   [%%g1] 0x7f, %%g0\n\t"
426         "membar #Sync"
427         : "=r" (tmp)
428         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
429           "r" (data0), "r" (data1), "r" (data2), "r" (target),
430           "r" (0x10), "0" (tmp)
431         : "g1");
432
433         /* NOTE: PSTATE_IE is still clear. */
434         stuck = 100000;
435         do {
436                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
437                         : "=r" (result)
438                         : "i" (ASI_INTR_DISPATCH_STAT));
439                 if (result == 0) {
440                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
441                                              : : "r" (pstate));
442                         return;
443                 }
444                 stuck -= 1;
445                 if (stuck == 0)
446                         break;
447         } while (result & 0x1);
448         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
449                              : : "r" (pstate));
450         if (stuck == 0) {
451                 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
452                        smp_processor_id(), result);
453         } else {
454                 udelay(2);
455                 goto again;
456         }
457 }
458
459 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
460 {
461         u64 pstate;
462         int i;
463
464         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
465         for_each_cpu_mask(i, mask)
466                 spitfire_xcall_helper(data0, data1, data2, pstate, i);
467 }
468
469 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
470  * packet, but we have no use for that.  However we do take advantage of
471  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
472  */
473 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
474 {
475         u64 pstate, ver;
476         int nack_busy_id, is_jbus, need_more;
477
478         if (cpus_empty(mask))
479                 return;
480
481         /* Unfortunately, someone at Sun had the brilliant idea to make the
482          * busy/nack fields hard-coded by ITID number for this Ultra-III
483          * derivative processor.
484          */
485         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
486         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
487                    (ver >> 32) == __SERRANO_ID);
488
489         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
490
491 retry:
492         need_more = 0;
493         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
494                              : : "r" (pstate), "i" (PSTATE_IE));
495
496         /* Setup the dispatch data registers. */
497         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
498                              "stxa      %1, [%4] %6\n\t"
499                              "stxa      %2, [%5] %6\n\t"
500                              "membar    #Sync\n\t"
501                              : /* no outputs */
502                              : "r" (data0), "r" (data1), "r" (data2),
503                                "r" (0x40), "r" (0x50), "r" (0x60),
504                                "i" (ASI_INTR_W));
505
506         nack_busy_id = 0;
507         {
508                 int i;
509
510                 for_each_cpu_mask(i, mask) {
511                         u64 target = (i << 14) | 0x70;
512
513                         if (!is_jbus)
514                                 target |= (nack_busy_id << 24);
515                         __asm__ __volatile__(
516                                 "stxa   %%g0, [%0] %1\n\t"
517                                 "membar #Sync\n\t"
518                                 : /* no outputs */
519                                 : "r" (target), "i" (ASI_INTR_W));
520                         nack_busy_id++;
521                         if (nack_busy_id == 32) {
522                                 need_more = 1;
523                                 break;
524                         }
525                 }
526         }
527
528         /* Now, poll for completion. */
529         {
530                 u64 dispatch_stat;
531                 long stuck;
532
533                 stuck = 100000 * nack_busy_id;
534                 do {
535                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
536                                              : "=r" (dispatch_stat)
537                                              : "i" (ASI_INTR_DISPATCH_STAT));
538                         if (dispatch_stat == 0UL) {
539                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
540                                                      : : "r" (pstate));
541                                 if (unlikely(need_more)) {
542                                         int i, cnt = 0;
543                                         for_each_cpu_mask(i, mask) {
544                                                 cpu_clear(i, mask);
545                                                 cnt++;
546                                                 if (cnt == 32)
547                                                         break;
548                                         }
549                                         goto retry;
550                                 }
551                                 return;
552                         }
553                         if (!--stuck)
554                                 break;
555                 } while (dispatch_stat & 0x5555555555555555UL);
556
557                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
558                                      : : "r" (pstate));
559
560                 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
561                         /* Busy bits will not clear, continue instead
562                          * of freezing up on this cpu.
563                          */
564                         printk("CPU[%d]: mondo stuckage result[%016lx]\n",
565                                smp_processor_id(), dispatch_stat);
566                 } else {
567                         int i, this_busy_nack = 0;
568
569                         /* Delay some random time with interrupts enabled
570                          * to prevent deadlock.
571                          */
572                         udelay(2 * nack_busy_id);
573
574                         /* Clear out the mask bits for cpus which did not
575                          * NACK us.
576                          */
577                         for_each_cpu_mask(i, mask) {
578                                 u64 check_mask;
579
580                                 if (is_jbus)
581                                         check_mask = (0x2UL << (2*i));
582                                 else
583                                         check_mask = (0x2UL <<
584                                                       this_busy_nack);
585                                 if ((dispatch_stat & check_mask) == 0)
586                                         cpu_clear(i, mask);
587                                 this_busy_nack += 2;
588                                 if (this_busy_nack == 64)
589                                         break;
590                         }
591
592                         goto retry;
593                 }
594         }
595 }
596
597 /* Multi-cpu list version.  */
598 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
599 {
600         struct trap_per_cpu *tb;
601         u16 *cpu_list;
602         u64 *mondo;
603         cpumask_t error_mask;
604         unsigned long flags, status;
605         int cnt, retries, this_cpu, prev_sent, i;
606
607         if (cpus_empty(mask))
608                 return;
609
610         /* We have to do this whole thing with interrupts fully disabled.
611          * Otherwise if we send an xcall from interrupt context it will
612          * corrupt both our mondo block and cpu list state.
613          *
614          * One consequence of this is that we cannot use timeout mechanisms
615          * that depend upon interrupts being delivered locally.  So, for
616          * example, we cannot sample jiffies and expect it to advance.
617          *
618          * Fortunately, udelay() uses %stick/%tick so we can use that.
619          */
620         local_irq_save(flags);
621
622         this_cpu = smp_processor_id();
623         tb = &trap_block[this_cpu];
624
625         mondo = __va(tb->cpu_mondo_block_pa);
626         mondo[0] = data0;
627         mondo[1] = data1;
628         mondo[2] = data2;
629         wmb();
630
631         cpu_list = __va(tb->cpu_list_pa);
632
633         /* Setup the initial cpu list.  */
634         cnt = 0;
635         for_each_cpu_mask(i, mask)
636                 cpu_list[cnt++] = i;
637
638         cpus_clear(error_mask);
639         retries = 0;
640         prev_sent = 0;
641         do {
642                 int forward_progress, n_sent;
643
644                 status = sun4v_cpu_mondo_send(cnt,
645                                               tb->cpu_list_pa,
646                                               tb->cpu_mondo_block_pa);
647
648                 /* HV_EOK means all cpus received the xcall, we're done.  */
649                 if (likely(status == HV_EOK))
650                         break;
651
652                 /* First, see if we made any forward progress.
653                  *
654                  * The hypervisor indicates successful sends by setting
655                  * cpu list entries to the value 0xffff.
656                  */
657                 n_sent = 0;
658                 for (i = 0; i < cnt; i++) {
659                         if (likely(cpu_list[i] == 0xffff))
660                                 n_sent++;
661                 }
662
663                 forward_progress = 0;
664                 if (n_sent > prev_sent)
665                         forward_progress = 1;
666
667                 prev_sent = n_sent;
668
669                 /* If we get a HV_ECPUERROR, then one or more of the cpus
670                  * in the list are in error state.  Use the cpu_state()
671                  * hypervisor call to find out which cpus are in error state.
672                  */
673                 if (unlikely(status == HV_ECPUERROR)) {
674                         for (i = 0; i < cnt; i++) {
675                                 long err;
676                                 u16 cpu;
677
678                                 cpu = cpu_list[i];
679                                 if (cpu == 0xffff)
680                                         continue;
681
682                                 err = sun4v_cpu_state(cpu);
683                                 if (err >= 0 &&
684                                     err == HV_CPU_STATE_ERROR) {
685                                         cpu_list[i] = 0xffff;
686                                         cpu_set(cpu, error_mask);
687                                 }
688                         }
689                 } else if (unlikely(status != HV_EWOULDBLOCK))
690                         goto fatal_mondo_error;
691
692                 /* Don't bother rewriting the CPU list, just leave the
693                  * 0xffff and non-0xffff entries in there and the
694                  * hypervisor will do the right thing.
695                  *
696                  * Only advance timeout state if we didn't make any
697                  * forward progress.
698                  */
699                 if (unlikely(!forward_progress)) {
700                         if (unlikely(++retries > 10000))
701                                 goto fatal_mondo_timeout;
702
703                         /* Delay a little bit to let other cpus catch up
704                          * on their cpu mondo queue work.
705                          */
706                         udelay(2 * cnt);
707                 }
708         } while (1);
709
710         local_irq_restore(flags);
711
712         if (unlikely(!cpus_empty(error_mask)))
713                 goto fatal_mondo_cpu_error;
714
715         return;
716
717 fatal_mondo_cpu_error:
718         printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
719                "were in error state\n",
720                this_cpu);
721         printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
722         for_each_cpu_mask(i, error_mask)
723                 printk("%d ", i);
724         printk("]\n");
725         return;
726
727 fatal_mondo_timeout:
728         local_irq_restore(flags);
729         printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
730                " progress after %d retries.\n",
731                this_cpu, retries);
732         goto dump_cpu_list_and_out;
733
734 fatal_mondo_error:
735         local_irq_restore(flags);
736         printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
737                this_cpu, status);
738         printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
739                "mondo_block_pa(%lx)\n",
740                this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
741
742 dump_cpu_list_and_out:
743         printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
744         for (i = 0; i < cnt; i++)
745                 printk("%u ", cpu_list[i]);
746         printk("]\n");
747 }
748
749 /* Send cross call to all processors mentioned in MASK
750  * except self.
751  */
752 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
753 {
754         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
755         int this_cpu = get_cpu();
756
757         cpus_and(mask, mask, cpu_online_map);
758         cpu_clear(this_cpu, mask);
759
760         if (tlb_type == spitfire)
761                 spitfire_xcall_deliver(data0, data1, data2, mask);
762         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
763                 cheetah_xcall_deliver(data0, data1, data2, mask);
764         else
765                 hypervisor_xcall_deliver(data0, data1, data2, mask);
766         /* NOTE: Caller runs local copy on master. */
767
768         put_cpu();
769 }
770
771 extern unsigned long xcall_sync_tick;
772
773 static void smp_start_sync_tick_client(int cpu)
774 {
775         cpumask_t mask = cpumask_of_cpu(cpu);
776
777         smp_cross_call_masked(&xcall_sync_tick,
778                               0, 0, 0, mask);
779 }
780
781 /* Send cross call to all processors except self. */
782 #define smp_cross_call(func, ctx, data1, data2) \
783         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
784
785 struct call_data_struct {
786         void (*func) (void *info);
787         void *info;
788         atomic_t finished;
789         int wait;
790 };
791
792 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
793 static struct call_data_struct *call_data;
794
795 extern unsigned long xcall_call_function;
796
797 /**
798  * smp_call_function(): Run a function on all other CPUs.
799  * @func: The function to run. This must be fast and non-blocking.
800  * @info: An arbitrary pointer to pass to the function.
801  * @nonatomic: currently unused.
802  * @wait: If true, wait (atomically) until function has completed on other CPUs.
803  *
804  * Returns 0 on success, else a negative status code. Does not return until
805  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
806  *
807  * You must not call this function with disabled interrupts or from a
808  * hardware interrupt handler or from a bottom half handler.
809  */
810 static int smp_call_function_mask(void (*func)(void *info), void *info,
811                                   int nonatomic, int wait, cpumask_t mask)
812 {
813         struct call_data_struct data;
814         int cpus;
815
816         /* Can deadlock when called with interrupts disabled */
817         WARN_ON(irqs_disabled());
818
819         data.func = func;
820         data.info = info;
821         atomic_set(&data.finished, 0);
822         data.wait = wait;
823
824         spin_lock(&call_lock);
825
826         cpu_clear(smp_processor_id(), mask);
827         cpus = cpus_weight(mask);
828         if (!cpus)
829                 goto out_unlock;
830
831         call_data = &data;
832         mb();
833
834         smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
835
836         /* Wait for response */
837         while (atomic_read(&data.finished) != cpus)
838                 cpu_relax();
839
840 out_unlock:
841         spin_unlock(&call_lock);
842
843         return 0;
844 }
845
846 int smp_call_function(void (*func)(void *info), void *info,
847                       int nonatomic, int wait)
848 {
849         return smp_call_function_mask(func, info, nonatomic, wait,
850                                       cpu_online_map);
851 }
852
853 void smp_call_function_client(int irq, struct pt_regs *regs)
854 {
855         void (*func) (void *info) = call_data->func;
856         void *info = call_data->info;
857
858         clear_softint(1 << irq);
859         if (call_data->wait) {
860                 /* let initiator proceed only after completion */
861                 func(info);
862                 atomic_inc(&call_data->finished);
863         } else {
864                 /* let initiator proceed after getting data */
865                 atomic_inc(&call_data->finished);
866                 func(info);
867         }
868 }
869
870 static void tsb_sync(void *info)
871 {
872         struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
873         struct mm_struct *mm = info;
874
875         /* It is not valid to test "currrent->active_mm == mm" here.
876          *
877          * The value of "current" is not changed atomically with
878          * switch_mm().  But that's OK, we just need to check the
879          * current cpu's trap block PGD physical address.
880          */
881         if (tp->pgd_paddr == __pa(mm->pgd))
882                 tsb_context_switch(mm);
883 }
884
885 void smp_tsb_sync(struct mm_struct *mm)
886 {
887         smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
888 }
889
890 extern unsigned long xcall_flush_tlb_mm;
891 extern unsigned long xcall_flush_tlb_pending;
892 extern unsigned long xcall_flush_tlb_kernel_range;
893 extern unsigned long xcall_report_regs;
894 extern unsigned long xcall_receive_signal;
895 extern unsigned long xcall_new_mmu_context_version;
896
897 #ifdef DCACHE_ALIASING_POSSIBLE
898 extern unsigned long xcall_flush_dcache_page_cheetah;
899 #endif
900 extern unsigned long xcall_flush_dcache_page_spitfire;
901
902 #ifdef CONFIG_DEBUG_DCFLUSH
903 extern atomic_t dcpage_flushes;
904 extern atomic_t dcpage_flushes_xcall;
905 #endif
906
907 static __inline__ void __local_flush_dcache_page(struct page *page)
908 {
909 #ifdef DCACHE_ALIASING_POSSIBLE
910         __flush_dcache_page(page_address(page),
911                             ((tlb_type == spitfire) &&
912                              page_mapping(page) != NULL));
913 #else
914         if (page_mapping(page) != NULL &&
915             tlb_type == spitfire)
916                 __flush_icache_page(__pa(page_address(page)));
917 #endif
918 }
919
920 void smp_flush_dcache_page_impl(struct page *page, int cpu)
921 {
922         cpumask_t mask = cpumask_of_cpu(cpu);
923         int this_cpu;
924
925         if (tlb_type == hypervisor)
926                 return;
927
928 #ifdef CONFIG_DEBUG_DCFLUSH
929         atomic_inc(&dcpage_flushes);
930 #endif
931
932         this_cpu = get_cpu();
933
934         if (cpu == this_cpu) {
935                 __local_flush_dcache_page(page);
936         } else if (cpu_online(cpu)) {
937                 void *pg_addr = page_address(page);
938                 u64 data0;
939
940                 if (tlb_type == spitfire) {
941                         data0 =
942                                 ((u64)&xcall_flush_dcache_page_spitfire);
943                         if (page_mapping(page) != NULL)
944                                 data0 |= ((u64)1 << 32);
945                         spitfire_xcall_deliver(data0,
946                                                __pa(pg_addr),
947                                                (u64) pg_addr,
948                                                mask);
949                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
950 #ifdef DCACHE_ALIASING_POSSIBLE
951                         data0 =
952                                 ((u64)&xcall_flush_dcache_page_cheetah);
953                         cheetah_xcall_deliver(data0,
954                                               __pa(pg_addr),
955                                               0, mask);
956 #endif
957                 }
958 #ifdef CONFIG_DEBUG_DCFLUSH
959                 atomic_inc(&dcpage_flushes_xcall);
960 #endif
961         }
962
963         put_cpu();
964 }
965
966 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
967 {
968         void *pg_addr = page_address(page);
969         cpumask_t mask = cpu_online_map;
970         u64 data0;
971         int this_cpu;
972
973         if (tlb_type == hypervisor)
974                 return;
975
976         this_cpu = get_cpu();
977
978         cpu_clear(this_cpu, mask);
979
980 #ifdef CONFIG_DEBUG_DCFLUSH
981         atomic_inc(&dcpage_flushes);
982 #endif
983         if (cpus_empty(mask))
984                 goto flush_self;
985         if (tlb_type == spitfire) {
986                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
987                 if (page_mapping(page) != NULL)
988                         data0 |= ((u64)1 << 32);
989                 spitfire_xcall_deliver(data0,
990                                        __pa(pg_addr),
991                                        (u64) pg_addr,
992                                        mask);
993         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
994 #ifdef DCACHE_ALIASING_POSSIBLE
995                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
996                 cheetah_xcall_deliver(data0,
997                                       __pa(pg_addr),
998                                       0, mask);
999 #endif
1000         }
1001 #ifdef CONFIG_DEBUG_DCFLUSH
1002         atomic_inc(&dcpage_flushes_xcall);
1003 #endif
1004  flush_self:
1005         __local_flush_dcache_page(page);
1006
1007         put_cpu();
1008 }
1009
1010 static void __smp_receive_signal_mask(cpumask_t mask)
1011 {
1012         smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1013 }
1014
1015 void smp_receive_signal(int cpu)
1016 {
1017         cpumask_t mask = cpumask_of_cpu(cpu);
1018
1019         if (cpu_online(cpu))
1020                 __smp_receive_signal_mask(mask);
1021 }
1022
1023 void smp_receive_signal_client(int irq, struct pt_regs *regs)
1024 {
1025         clear_softint(1 << irq);
1026 }
1027
1028 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1029 {
1030         struct mm_struct *mm;
1031         unsigned long flags;
1032
1033         clear_softint(1 << irq);
1034
1035         /* See if we need to allocate a new TLB context because
1036          * the version of the one we are using is now out of date.
1037          */
1038         mm = current->active_mm;
1039         if (unlikely(!mm || (mm == &init_mm)))
1040                 return;
1041
1042         spin_lock_irqsave(&mm->context.lock, flags);
1043
1044         if (unlikely(!CTX_VALID(mm->context)))
1045                 get_new_mmu_context(mm);
1046
1047         spin_unlock_irqrestore(&mm->context.lock, flags);
1048
1049         load_secondary_context(mm);
1050         __flush_tlb_mm(CTX_HWBITS(mm->context),
1051                        SECONDARY_CONTEXT);
1052 }
1053
1054 void smp_new_mmu_context_version(void)
1055 {
1056         smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1057 }
1058
1059 void smp_report_regs(void)
1060 {
1061         smp_cross_call(&xcall_report_regs, 0, 0, 0);
1062 }
1063
1064 /* We know that the window frames of the user have been flushed
1065  * to the stack before we get here because all callers of us
1066  * are flush_tlb_*() routines, and these run after flush_cache_*()
1067  * which performs the flushw.
1068  *
1069  * The SMP TLB coherency scheme we use works as follows:
1070  *
1071  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1072  *    space has (potentially) executed on, this is the heuristic
1073  *    we use to avoid doing cross calls.
1074  *
1075  *    Also, for flushing from kswapd and also for clones, we
1076  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1077  *
1078  * 2) TLB context numbers are shared globally across all processors
1079  *    in the system, this allows us to play several games to avoid
1080  *    cross calls.
1081  *
1082  *    One invariant is that when a cpu switches to a process, and
1083  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1084  *    current cpu's bit set, that tlb context is flushed locally.
1085  *
1086  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1087  *    cross calls when we want to flush the currently running process's
1088  *    tlb state.  This is done by clearing all cpu bits except the current
1089  *    processor's in current->active_mm->cpu_vm_mask and performing the
1090  *    flush locally only.  This will force any subsequent cpus which run
1091  *    this task to flush the context from the local tlb if the process
1092  *    migrates to another cpu (again).
1093  *
1094  * 3) For shared address spaces (threads) and swapping we bite the
1095  *    bullet for most cases and perform the cross call (but only to
1096  *    the cpus listed in cpu_vm_mask).
1097  *
1098  *    The performance gain from "optimizing" away the cross call for threads is
1099  *    questionable (in theory the big win for threads is the massive sharing of
1100  *    address space state across processors).
1101  */
1102
1103 /* This currently is only used by the hugetlb arch pre-fault
1104  * hook on UltraSPARC-III+ and later when changing the pagesize
1105  * bits of the context register for an address space.
1106  */
1107 void smp_flush_tlb_mm(struct mm_struct *mm)
1108 {
1109         u32 ctx = CTX_HWBITS(mm->context);
1110         int cpu = get_cpu();
1111
1112         if (atomic_read(&mm->mm_users) == 1) {
1113                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1114                 goto local_flush_and_out;
1115         }
1116
1117         smp_cross_call_masked(&xcall_flush_tlb_mm,
1118                               ctx, 0, 0,
1119                               mm->cpu_vm_mask);
1120
1121 local_flush_and_out:
1122         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1123
1124         put_cpu();
1125 }
1126
1127 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1128 {
1129         u32 ctx = CTX_HWBITS(mm->context);
1130         int cpu = get_cpu();
1131
1132         if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1133                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1134         else
1135                 smp_cross_call_masked(&xcall_flush_tlb_pending,
1136                                       ctx, nr, (unsigned long) vaddrs,
1137                                       mm->cpu_vm_mask);
1138
1139         __flush_tlb_pending(ctx, nr, vaddrs);
1140
1141         put_cpu();
1142 }
1143
1144 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1145 {
1146         start &= PAGE_MASK;
1147         end    = PAGE_ALIGN(end);
1148         if (start != end) {
1149                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1150                                0, start, end);
1151
1152                 __flush_tlb_kernel_range(start, end);
1153         }
1154 }
1155
1156 /* CPU capture. */
1157 /* #define CAPTURE_DEBUG */
1158 extern unsigned long xcall_capture;
1159
1160 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1161 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1162 static unsigned long penguins_are_doing_time;
1163
1164 void smp_capture(void)
1165 {
1166         int result = atomic_add_ret(1, &smp_capture_depth);
1167
1168         if (result == 1) {
1169                 int ncpus = num_online_cpus();
1170
1171 #ifdef CAPTURE_DEBUG
1172                 printk("CPU[%d]: Sending penguins to jail...",
1173                        smp_processor_id());
1174 #endif
1175                 penguins_are_doing_time = 1;
1176                 membar_storestore_loadstore();
1177                 atomic_inc(&smp_capture_registry);
1178                 smp_cross_call(&xcall_capture, 0, 0, 0);
1179                 while (atomic_read(&smp_capture_registry) != ncpus)
1180                         rmb();
1181 #ifdef CAPTURE_DEBUG
1182                 printk("done\n");
1183 #endif
1184         }
1185 }
1186
1187 void smp_release(void)
1188 {
1189         if (atomic_dec_and_test(&smp_capture_depth)) {
1190 #ifdef CAPTURE_DEBUG
1191                 printk("CPU[%d]: Giving pardon to "
1192                        "imprisoned penguins\n",
1193                        smp_processor_id());
1194 #endif
1195                 penguins_are_doing_time = 0;
1196                 membar_storeload_storestore();
1197                 atomic_dec(&smp_capture_registry);
1198         }
1199 }
1200
1201 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1202  * can service tlb flush xcalls...
1203  */
1204 extern void prom_world(int);
1205
1206 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1207 {
1208         clear_softint(1 << irq);
1209
1210         preempt_disable();
1211
1212         __asm__ __volatile__("flushw");
1213         prom_world(1);
1214         atomic_inc(&smp_capture_registry);
1215         membar_storeload_storestore();
1216         while (penguins_are_doing_time)
1217                 rmb();
1218         atomic_dec(&smp_capture_registry);
1219         prom_world(0);
1220
1221         preempt_enable();
1222 }
1223
1224 void __init smp_tick_init(void)
1225 {
1226         boot_cpu_id = hard_smp_processor_id();
1227 }
1228
1229 /* /proc/profile writes can call this, don't __init it please. */
1230 int setup_profiling_timer(unsigned int multiplier)
1231 {
1232         return -EINVAL;
1233 }
1234
1235 void __init smp_prepare_cpus(unsigned int max_cpus)
1236 {
1237         cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
1238 }
1239
1240 void __devinit smp_prepare_boot_cpu(void)
1241 {
1242 }
1243
1244 void __devinit smp_fill_in_sib_core_maps(void)
1245 {
1246         unsigned int i;
1247
1248         for_each_possible_cpu(i) {
1249                 unsigned int j;
1250
1251                 if (cpu_data(i).core_id == 0) {
1252                         cpu_set(i, cpu_core_map[i]);
1253                         continue;
1254                 }
1255
1256                 for_each_possible_cpu(j) {
1257                         if (cpu_data(i).core_id ==
1258                             cpu_data(j).core_id)
1259                                 cpu_set(j, cpu_core_map[i]);
1260                 }
1261         }
1262
1263         for_each_possible_cpu(i) {
1264                 unsigned int j;
1265
1266                 if (cpu_data(i).proc_id == -1) {
1267                         cpu_set(i, cpu_sibling_map[i]);
1268                         continue;
1269                 }
1270
1271                 for_each_possible_cpu(j) {
1272                         if (cpu_data(i).proc_id ==
1273                             cpu_data(j).proc_id)
1274                                 cpu_set(j, cpu_sibling_map[i]);
1275                 }
1276         }
1277 }
1278
1279 int __cpuinit __cpu_up(unsigned int cpu)
1280 {
1281         int ret = smp_boot_one_cpu(cpu);
1282
1283         if (!ret) {
1284                 cpu_set(cpu, smp_commenced_mask);
1285                 while (!cpu_isset(cpu, cpu_online_map))
1286                         mb();
1287                 if (!cpu_isset(cpu, cpu_online_map)) {
1288                         ret = -ENODEV;
1289                 } else {
1290                         /* On SUN4V, writes to %tick and %stick are
1291                          * not allowed.
1292                          */
1293                         if (tlb_type != hypervisor)
1294                                 smp_synchronize_one_tick(cpu);
1295                 }
1296         }
1297         return ret;
1298 }
1299
1300 #ifdef CONFIG_HOTPLUG_CPU
1301 int __cpu_disable(void)
1302 {
1303         printk(KERN_ERR "SMP: __cpu_disable() on cpu %d\n",
1304                smp_processor_id());
1305         return -ENODEV;
1306 }
1307
1308 void __cpu_die(unsigned int cpu)
1309 {
1310         printk(KERN_ERR "SMP: __cpu_die(%u)\n", cpu);
1311 }
1312 #endif
1313
1314 void __init smp_cpus_done(unsigned int max_cpus)
1315 {
1316         unsigned long bogosum = 0;
1317         int i;
1318
1319         for_each_online_cpu(i)
1320                 bogosum += cpu_data(i).udelay_val;
1321         printk("Total of %ld processors activated "
1322                "(%lu.%02lu BogoMIPS).\n",
1323                (long) num_online_cpus(),
1324                bogosum/(500000/HZ),
1325                (bogosum/(5000/HZ))%100);
1326 }
1327
1328 void smp_send_reschedule(int cpu)
1329 {
1330         smp_receive_signal(cpu);
1331 }
1332
1333 /* This is a nop because we capture all other cpus
1334  * anyways when making the PROM active.
1335  */
1336 void smp_send_stop(void)
1337 {
1338 }
1339
1340 unsigned long __per_cpu_base __read_mostly;
1341 unsigned long __per_cpu_shift __read_mostly;
1342
1343 EXPORT_SYMBOL(__per_cpu_base);
1344 EXPORT_SYMBOL(__per_cpu_shift);
1345
1346 void __init real_setup_per_cpu_areas(void)
1347 {
1348         unsigned long goal, size, i;
1349         char *ptr;
1350
1351         /* Copy section for each CPU (we discard the original) */
1352         goal = PERCPU_ENOUGH_ROOM;
1353
1354         __per_cpu_shift = PAGE_SHIFT;
1355         for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1356                 __per_cpu_shift++;
1357
1358         ptr = alloc_bootmem_pages(size * NR_CPUS);
1359
1360         __per_cpu_base = ptr - __per_cpu_start;
1361
1362         for (i = 0; i < NR_CPUS; i++, ptr += size)
1363                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1364
1365         /* Setup %g5 for the boot cpu.  */
1366         __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1367 }