8e049dab4e6321f28354e0e821fe0569f36bac35
[pandora-kernel.git] / arch / ppc / platforms / pmac_smp.c
1 /*
2  * SMP support for power macintosh.
3  *
4  * We support both the old "powersurge" SMP architecture
5  * and the current Core99 (G4 PowerMac) machines.
6  *
7  * Note that we don't support the very first rev. of
8  * Apple/DayStar 2 CPUs board, the one with the funky
9  * watchdog. Hopefully, none of these should be there except
10  * maybe internally to Apple. I should probably still add some
11  * code to detect this card though and disable SMP. --BenH.
12  *
13  * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14  * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15  *
16  * Support for DayStar quad CPU cards
17  * Copyright (C) XLR8, Inc. 1994-2000
18  *
19  *  This program is free software; you can redistribute it and/or
20  *  modify it under the terms of the GNU General Public License
21  *  as published by the Free Software Foundation; either version
22  *  2 of the License, or (at your option) any later version.
23  */
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/smp_lock.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/delay.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/errno.h>
35 #include <linux/hardirq.h>
36
37 #include <asm/ptrace.h>
38 #include <asm/atomic.h>
39 #include <asm/irq.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/sections.h>
43 #include <asm/io.h>
44 #include <asm/prom.h>
45 #include <asm/smp.h>
46 #include <asm/residual.h>
47 #include <asm/machdep.h>
48 #include <asm/pmac_feature.h>
49 #include <asm/time.h>
50 #include <asm/open_pic.h>
51 #include <asm/cacheflush.h>
52 #include <asm/keylargo.h>
53
54 /*
55  * Powersurge (old powermac SMP) support.
56  */
57
58 extern void __secondary_start_psurge(void);
59 extern void __secondary_start_psurge2(void);    /* Temporary horrible hack */
60 extern void __secondary_start_psurge3(void);    /* Temporary horrible hack */
61
62 /* Addresses for powersurge registers */
63 #define HAMMERHEAD_BASE         0xf8000000
64 #define HHEAD_CONFIG            0x90
65 #define HHEAD_SEC_INTR          0xc0
66
67 /* register for interrupting the primary processor on the powersurge */
68 /* N.B. this is actually the ethernet ROM! */
69 #define PSURGE_PRI_INTR         0xf3019000
70
71 /* register for storing the start address for the secondary processor */
72 /* N.B. this is the PCI config space address register for the 1st bridge */
73 #define PSURGE_START            0xf2800000
74
75 /* Daystar/XLR8 4-CPU card */
76 #define PSURGE_QUAD_REG_ADDR    0xf8800000
77
78 #define PSURGE_QUAD_IRQ_SET     0
79 #define PSURGE_QUAD_IRQ_CLR     1
80 #define PSURGE_QUAD_IRQ_PRIMARY 2
81 #define PSURGE_QUAD_CKSTOP_CTL  3
82 #define PSURGE_QUAD_PRIMARY_ARB 4
83 #define PSURGE_QUAD_BOARD_ID    6
84 #define PSURGE_QUAD_WHICH_CPU   7
85 #define PSURGE_QUAD_CKSTOP_RDBK 8
86 #define PSURGE_QUAD_RESET_CTL   11
87
88 #define PSURGE_QUAD_OUT(r, v)   (out_8(quad_base + ((r) << 4) + 4, (v)))
89 #define PSURGE_QUAD_IN(r)       (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
90 #define PSURGE_QUAD_BIS(r, v)   (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
91 #define PSURGE_QUAD_BIC(r, v)   (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
92
93 /* virtual addresses for the above */
94 static volatile u8 __iomem *hhead_base;
95 static volatile u8 __iomem *quad_base;
96 static volatile u32 __iomem *psurge_pri_intr;
97 static volatile u8 __iomem *psurge_sec_intr;
98 static volatile u32 __iomem *psurge_start;
99
100 /* values for psurge_type */
101 #define PSURGE_NONE             -1
102 #define PSURGE_DUAL             0
103 #define PSURGE_QUAD_OKEE        1
104 #define PSURGE_QUAD_COTTON      2
105 #define PSURGE_QUAD_ICEGRASS    3
106
107 /* what sort of powersurge board we have */
108 static int psurge_type = PSURGE_NONE;
109
110 /* L2 and L3 cache settings to pass from CPU0 to CPU1 */
111 volatile static long int core99_l2_cache;
112 volatile static long int core99_l3_cache;
113
114 /* Timebase freeze GPIO */
115 static unsigned int core99_tb_gpio;
116
117 /* Sync flag for HW tb sync */
118 static volatile int sec_tb_reset = 0;
119 static unsigned int pri_tb_hi, pri_tb_lo;
120 static unsigned int pri_tb_stamp;
121
122 static void __init core99_init_caches(int cpu)
123 {
124         if (!cpu_has_feature(CPU_FTR_L2CR))
125                 return;
126
127         if (cpu == 0) {
128                 core99_l2_cache = _get_L2CR();
129                 printk("CPU0: L2CR is %lx\n", core99_l2_cache);
130         } else {
131                 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
132                 _set_L2CR(0);
133                 _set_L2CR(core99_l2_cache);
134                 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
135         }
136
137         if (!cpu_has_feature(CPU_FTR_L3CR))
138                 return;
139
140         if (cpu == 0){
141                 core99_l3_cache = _get_L3CR();
142                 printk("CPU0: L3CR is %lx\n", core99_l3_cache);
143         } else {
144                 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
145                 _set_L3CR(0);
146                 _set_L3CR(core99_l3_cache);
147                 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
148         }
149 }
150
151 /*
152  * Set and clear IPIs for powersurge.
153  */
154 static inline void psurge_set_ipi(int cpu)
155 {
156         if (psurge_type == PSURGE_NONE)
157                 return;
158         if (cpu == 0)
159                 in_be32(psurge_pri_intr);
160         else if (psurge_type == PSURGE_DUAL)
161                 out_8(psurge_sec_intr, 0);
162         else
163                 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
164 }
165
166 static inline void psurge_clr_ipi(int cpu)
167 {
168         if (cpu > 0) {
169                 switch(psurge_type) {
170                 case PSURGE_DUAL:
171                         out_8(psurge_sec_intr, ~0);
172                 case PSURGE_NONE:
173                         break;
174                 default:
175                         PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
176                 }
177         }
178 }
179
180 /*
181  * On powersurge (old SMP powermac architecture) we don't have
182  * separate IPIs for separate messages like openpic does.  Instead
183  * we have a bitmap for each processor, where a 1 bit means that
184  * the corresponding message is pending for that processor.
185  * Ideally each cpu's entry would be in a different cache line.
186  *  -- paulus.
187  */
188 static unsigned long psurge_smp_message[NR_CPUS];
189
190 void __pmac psurge_smp_message_recv(struct pt_regs *regs)
191 {
192         int cpu = smp_processor_id();
193         int msg;
194
195         /* clear interrupt */
196         psurge_clr_ipi(cpu);
197
198         if (num_online_cpus() < 2)
199                 return;
200
201         /* make sure there is a message there */
202         for (msg = 0; msg < 4; msg++)
203                 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
204                         smp_message_recv(msg, regs);
205 }
206
207 irqreturn_t __pmac psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
208 {
209         psurge_smp_message_recv(regs);
210         return IRQ_HANDLED;
211 }
212
213 static void __pmac smp_psurge_message_pass(int target, int msg, unsigned long data,
214                                            int wait)
215 {
216         int i;
217
218         if (num_online_cpus() < 2)
219                 return;
220
221         for (i = 0; i < NR_CPUS; i++) {
222                 if (!cpu_online(i))
223                         continue;
224                 if (target == MSG_ALL
225                     || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
226                     || target == i) {
227                         set_bit(msg, &psurge_smp_message[i]);
228                         psurge_set_ipi(i);
229                 }
230         }
231 }
232
233 /*
234  * Determine a quad card presence. We read the board ID register, we
235  * force the data bus to change to something else, and we read it again.
236  * It it's stable, then the register probably exist (ugh !)
237  */
238 static int __init psurge_quad_probe(void)
239 {
240         int type;
241         unsigned int i;
242
243         type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
244         if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
245             || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
246                 return PSURGE_DUAL;
247
248         /* looks OK, try a slightly more rigorous test */
249         /* bogus is not necessarily cacheline-aligned,
250            though I don't suppose that really matters.  -- paulus */
251         for (i = 0; i < 100; i++) {
252                 volatile u32 bogus[8];
253                 bogus[(0+i)%8] = 0x00000000;
254                 bogus[(1+i)%8] = 0x55555555;
255                 bogus[(2+i)%8] = 0xFFFFFFFF;
256                 bogus[(3+i)%8] = 0xAAAAAAAA;
257                 bogus[(4+i)%8] = 0x33333333;
258                 bogus[(5+i)%8] = 0xCCCCCCCC;
259                 bogus[(6+i)%8] = 0xCCCCCCCC;
260                 bogus[(7+i)%8] = 0x33333333;
261                 wmb();
262                 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
263                 mb();
264                 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
265                         return PSURGE_DUAL;
266         }
267         return type;
268 }
269
270 static void __init psurge_quad_init(void)
271 {
272         int procbits;
273
274         if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
275         procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
276         if (psurge_type == PSURGE_QUAD_ICEGRASS)
277                 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
278         else
279                 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
280         mdelay(33);
281         out_8(psurge_sec_intr, ~0);
282         PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
283         PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
284         if (psurge_type != PSURGE_QUAD_ICEGRASS)
285                 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
286         PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
287         mdelay(33);
288         PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
289         mdelay(33);
290         PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
291         mdelay(33);
292 }
293
294 static int __init smp_psurge_probe(void)
295 {
296         int i, ncpus;
297
298         /* We don't do SMP on the PPC601 -- paulus */
299         if (PVR_VER(mfspr(SPRN_PVR)) == 1)
300                 return 1;
301
302         /*
303          * The powersurge cpu board can be used in the generation
304          * of powermacs that have a socket for an upgradeable cpu card,
305          * including the 7500, 8500, 9500, 9600.
306          * The device tree doesn't tell you if you have 2 cpus because
307          * OF doesn't know anything about the 2nd processor.
308          * Instead we look for magic bits in magic registers,
309          * in the hammerhead memory controller in the case of the
310          * dual-cpu powersurge board.  -- paulus.
311          */
312         if (find_devices("hammerhead") == NULL)
313                 return 1;
314
315         hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
316         quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
317         psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
318
319         psurge_type = psurge_quad_probe();
320         if (psurge_type != PSURGE_DUAL) {
321                 psurge_quad_init();
322                 /* All released cards using this HW design have 4 CPUs */
323                 ncpus = 4;
324         } else {
325                 iounmap(quad_base);
326                 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
327                         /* not a dual-cpu card */
328                         iounmap(hhead_base);
329                         psurge_type = PSURGE_NONE;
330                         return 1;
331                 }
332                 ncpus = 2;
333         }
334
335         psurge_start = ioremap(PSURGE_START, 4);
336         psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
337
338         /* this is not actually strictly necessary -- paulus. */
339         for (i = 1; i < ncpus; ++i)
340                 smp_hw_index[i] = i;
341
342         if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
343
344         return ncpus;
345 }
346
347 static void __init smp_psurge_kick_cpu(int nr)
348 {
349         void (*start)(void) = __secondary_start_psurge;
350         unsigned long a;
351
352         /* may need to flush here if secondary bats aren't setup */
353         for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
354                 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
355         asm volatile("sync");
356
357         if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
358
359         /* setup entry point of secondary processor */
360         switch (nr) {
361         case 2:
362                 start = __secondary_start_psurge2;
363                 break;
364         case 3:
365                 start = __secondary_start_psurge3;
366                 break;
367         }
368
369         out_be32(psurge_start, __pa(start));
370         mb();
371
372         psurge_set_ipi(nr);
373         udelay(10);
374         psurge_clr_ipi(nr);
375
376         if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
377 }
378
379 /*
380  * With the dual-cpu powersurge board, the decrementers and timebases
381  * of both cpus are frozen after the secondary cpu is started up,
382  * until we give the secondary cpu another interrupt.  This routine
383  * uses this to get the timebases synchronized.
384  *  -- paulus.
385  */
386 static void __init psurge_dual_sync_tb(int cpu_nr)
387 {
388         int t;
389
390         set_dec(tb_ticks_per_jiffy);
391         set_tb(0, 0);
392         last_jiffy_stamp(cpu_nr) = 0;
393
394         if (cpu_nr > 0) {
395                 mb();
396                 sec_tb_reset = 1;
397                 return;
398         }
399
400         /* wait for the secondary to have reset its TB before proceeding */
401         for (t = 10000000; t > 0 && !sec_tb_reset; --t)
402                 ;
403
404         /* now interrupt the secondary, starting both TBs */
405         psurge_set_ipi(1);
406
407         smp_tb_synchronized = 1;
408 }
409
410 static struct irqaction psurge_irqaction = {
411         .handler = psurge_primary_intr,
412         .flags = SA_INTERRUPT,
413         .mask = CPU_MASK_NONE,
414         .name = "primary IPI",
415 };
416
417 static void __init smp_psurge_setup_cpu(int cpu_nr)
418 {
419
420         if (cpu_nr == 0) {
421                 /* If we failed to start the second CPU, we should still
422                  * send it an IPI to start the timebase & DEC or we might
423                  * have them stuck.
424                  */
425                 if (num_online_cpus() < 2) {
426                         if (psurge_type == PSURGE_DUAL)
427                                 psurge_set_ipi(1);
428                         return;
429                 }
430                 /* reset the entry point so if we get another intr we won't
431                  * try to startup again */
432                 out_be32(psurge_start, 0x100);
433                 if (setup_irq(30, &psurge_irqaction))
434                         printk(KERN_ERR "Couldn't get primary IPI interrupt");
435         }
436
437         if (psurge_type == PSURGE_DUAL)
438                 psurge_dual_sync_tb(cpu_nr);
439 }
440
441 void __init smp_psurge_take_timebase(void)
442 {
443         /* Dummy implementation */
444 }
445
446 void __init smp_psurge_give_timebase(void)
447 {
448         /* Dummy implementation */
449 }
450
451 static int __init smp_core99_probe(void)
452 {
453 #ifdef CONFIG_6xx
454         extern int powersave_nap;
455 #endif
456         struct device_node *cpus, *firstcpu;
457         int i, ncpus = 0, boot_cpu = -1;
458         u32 *tbprop = NULL;
459
460         if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
461         cpus = firstcpu = find_type_devices("cpu");
462         while(cpus != NULL) {
463                 u32 *regprop = (u32 *)get_property(cpus, "reg", NULL);
464                 char *stateprop = (char *)get_property(cpus, "state", NULL);
465                 if (regprop != NULL && stateprop != NULL &&
466                     !strncmp(stateprop, "running", 7))
467                         boot_cpu = *regprop;
468                 ++ncpus;
469                 cpus = cpus->next;
470         }
471         if (boot_cpu == -1)
472                 printk(KERN_WARNING "Couldn't detect boot CPU !\n");
473         if (boot_cpu != 0)
474                 printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu);
475
476         if (machine_is_compatible("MacRISC4")) {
477                 extern struct smp_ops_t core99_smp_ops;
478
479                 core99_smp_ops.take_timebase = smp_generic_take_timebase;
480                 core99_smp_ops.give_timebase = smp_generic_give_timebase;
481         } else {
482                 if (firstcpu != NULL)
483                         tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL);
484                 if (tbprop)
485                         core99_tb_gpio = *tbprop;
486                 else
487                         core99_tb_gpio = KL_GPIO_TB_ENABLE;
488         }
489
490         if (ncpus > 1) {
491                 openpic_request_IPIs();
492                 for (i = 1; i < ncpus; ++i)
493                         smp_hw_index[i] = i;
494 #ifdef CONFIG_6xx
495                 powersave_nap = 0;
496 #endif
497                 core99_init_caches(0);
498         }
499
500         return ncpus;
501 }
502
503 static void __init smp_core99_kick_cpu(int nr)
504 {
505         unsigned long save_vector, new_vector;
506         unsigned long flags;
507
508         volatile unsigned long *vector
509                  = ((volatile unsigned long *)(KERNELBASE+0x100));
510         if (nr < 1 || nr > 3)
511                 return;
512         if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
513
514         local_irq_save(flags);
515         local_irq_disable();
516
517         /* Save reset vector */
518         save_vector = *vector;
519
520         /* Setup fake reset vector that does    
521          *   b __secondary_start_psurge - KERNELBASE
522          */
523         switch(nr) {
524                 case 1:
525                         new_vector = (unsigned long)__secondary_start_psurge;
526                         break;
527                 case 2:
528                         new_vector = (unsigned long)__secondary_start_psurge2;
529                         break;
530                 case 3:
531                         new_vector = (unsigned long)__secondary_start_psurge3;
532                         break;
533         }
534         *vector = 0x48000002 + new_vector - KERNELBASE;
535
536         /* flush data cache and inval instruction cache */
537         flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
538
539         /* Put some life in our friend */
540         pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
541
542         /* FIXME: We wait a bit for the CPU to take the exception, I should
543          * instead wait for the entry code to set something for me. Well,
544          * ideally, all that crap will be done in prom.c and the CPU left
545          * in a RAM-based wait loop like CHRP.
546          */
547         mdelay(1);
548
549         /* Restore our exception vector */
550         *vector = save_vector;
551         flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
552
553         local_irq_restore(flags);
554         if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
555 }
556
557 static void __init smp_core99_setup_cpu(int cpu_nr)
558 {
559         /* Setup L2/L3 */
560         if (cpu_nr != 0)
561                 core99_init_caches(cpu_nr);
562
563         /* Setup openpic */
564         do_openpic_setup_cpu();
565
566         if (cpu_nr == 0) {
567 #ifdef CONFIG_POWER4
568                 extern void g5_phy_disable_cpu1(void);
569
570                 /* If we didn't start the second CPU, we must take
571                  * it off the bus
572                  */
573                 if (machine_is_compatible("MacRISC4") &&
574                     num_online_cpus() < 2)              
575                         g5_phy_disable_cpu1();
576 #endif /* CONFIG_POWER4 */
577                 if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
578         }
579 }
580
581 /* not __init, called in sleep/wakeup code */
582 void smp_core99_take_timebase(void)
583 {
584         unsigned long flags;
585
586         /* tell the primary we're here */
587         sec_tb_reset = 1;
588         mb();
589
590         /* wait for the primary to set pri_tb_hi/lo */
591         while (sec_tb_reset < 2)
592                 mb();
593
594         /* set our stuff the same as the primary */
595         local_irq_save(flags);
596         set_dec(1);
597         set_tb(pri_tb_hi, pri_tb_lo);
598         last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
599         mb();
600
601         /* tell the primary we're done */
602         sec_tb_reset = 0;
603         mb();
604         local_irq_restore(flags);
605 }
606
607 /* not __init, called in sleep/wakeup code */
608 void smp_core99_give_timebase(void)
609 {
610         unsigned long flags;
611         unsigned int t;
612
613         /* wait for the secondary to be in take_timebase */
614         for (t = 100000; t > 0 && !sec_tb_reset; --t)
615                 udelay(10);
616         if (!sec_tb_reset) {
617                 printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
618                 return;
619         }
620
621         /* freeze the timebase and read it */
622         /* disable interrupts so the timebase is disabled for the
623            shortest possible time */
624         local_irq_save(flags);
625         pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
626         pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
627         mb();
628         pri_tb_hi = get_tbu();
629         pri_tb_lo = get_tbl();
630         pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
631         mb();
632
633         /* tell the secondary we're ready */
634         sec_tb_reset = 2;
635         mb();
636
637         /* wait for the secondary to have taken it */
638         for (t = 100000; t > 0 && sec_tb_reset; --t)
639                 udelay(10);
640         if (sec_tb_reset)
641                 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
642         else
643                 smp_tb_synchronized = 1;
644
645         /* Now, restart the timebase by leaving the GPIO to an open collector */
646         pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
647         pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
648         local_irq_restore(flags);
649 }
650
651
652 /* PowerSurge-style Macs */
653 struct smp_ops_t psurge_smp_ops __pmacdata = {
654         .message_pass   = smp_psurge_message_pass,
655         .probe          = smp_psurge_probe,
656         .kick_cpu       = smp_psurge_kick_cpu,
657         .setup_cpu      = smp_psurge_setup_cpu,
658         .give_timebase  = smp_psurge_give_timebase,
659         .take_timebase  = smp_psurge_take_timebase,
660 };
661
662 /* Core99 Macs (dual G4s) */
663 struct smp_ops_t core99_smp_ops __pmacdata = {
664         .message_pass   = smp_openpic_message_pass,
665         .probe          = smp_core99_probe,
666         .kick_cpu       = smp_core99_kick_cpu,
667         .setup_cpu      = smp_core99_setup_cpu,
668         .give_timebase  = smp_core99_give_timebase,
669         .take_timebase  = smp_core99_take_timebase,
670 };