ixgb: Increment version to 1.0.109-k4
[pandora-kernel.git] / arch / powerpc / platforms / pseries / xics.c
1 /*
2  * arch/powerpc/platforms/pseries/xics.c
3  *
4  * Copyright 2000 IBM Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  */
11
12 #undef DEBUG
13
14 #include <linux/types.h>
15 #include <linux/threads.h>
16 #include <linux/kernel.h>
17 #include <linux/irq.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <linux/signal.h>
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/radix-tree.h>
24 #include <linux/cpu.h>
25
26 #include <asm/firmware.h>
27 #include <asm/prom.h>
28 #include <asm/io.h>
29 #include <asm/pgtable.h>
30 #include <asm/smp.h>
31 #include <asm/rtas.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/i8259.h>
35
36 #include "xics.h"
37
38 #define XICS_IPI                2
39 #define XICS_IRQ_SPURIOUS       0
40
41 /* Want a priority other than 0.  Various HW issues require this. */
42 #define DEFAULT_PRIORITY        5
43
44 /*
45  * Mark IPIs as higher priority so we can take them inside interrupts that
46  * arent marked IRQF_DISABLED
47  */
48 #define IPI_PRIORITY            4
49
50 struct xics_ipl {
51         union {
52                 u32 word;
53                 u8 bytes[4];
54         } xirr_poll;
55         union {
56                 u32 word;
57                 u8 bytes[4];
58         } xirr;
59         u32 dummy;
60         union {
61                 u32 word;
62                 u8 bytes[4];
63         } qirr;
64 };
65
66 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
67
68 static unsigned int default_server = 0xFF;
69 static unsigned int default_distrib_server = 0;
70 static unsigned int interrupt_server_size = 8;
71
72 static struct irq_host *xics_host;
73
74 /*
75  * XICS only has a single IPI, so encode the messages per CPU
76  */
77 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
78
79 /* RTAS service tokens */
80 static int ibm_get_xive;
81 static int ibm_set_xive;
82 static int ibm_int_on;
83 static int ibm_int_off;
84
85
86 /* Direct HW low level accessors */
87
88
89 static inline unsigned int direct_xirr_info_get(int n_cpu)
90 {
91         return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
92 }
93
94 static inline void direct_xirr_info_set(int n_cpu, int value)
95 {
96         out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
97 }
98
99 static inline void direct_cppr_info(int n_cpu, u8 value)
100 {
101         out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
102 }
103
104 static inline void direct_qirr_info(int n_cpu, u8 value)
105 {
106         out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
107 }
108
109
110 /* LPAR low level accessors */
111
112
113 static inline long plpar_eoi(unsigned long xirr)
114 {
115         return plpar_hcall_norets(H_EOI, xirr);
116 }
117
118 static inline long plpar_cppr(unsigned long cppr)
119 {
120         return plpar_hcall_norets(H_CPPR, cppr);
121 }
122
123 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
124 {
125         return plpar_hcall_norets(H_IPI, servernum, mfrr);
126 }
127
128 static inline long plpar_xirr(unsigned long *xirr_ret)
129 {
130         unsigned long dummy;
131         return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
132 }
133
134 static inline unsigned int lpar_xirr_info_get(int n_cpu)
135 {
136         unsigned long lpar_rc;
137         unsigned long return_value;
138
139         lpar_rc = plpar_xirr(&return_value);
140         if (lpar_rc != H_SUCCESS)
141                 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
142         return (unsigned int)return_value;
143 }
144
145 static inline void lpar_xirr_info_set(int n_cpu, int value)
146 {
147         unsigned long lpar_rc;
148         unsigned long val64 = value & 0xffffffff;
149
150         lpar_rc = plpar_eoi(val64);
151         if (lpar_rc != H_SUCCESS)
152                 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
153                       val64);
154 }
155
156 static inline void lpar_cppr_info(int n_cpu, u8 value)
157 {
158         unsigned long lpar_rc;
159
160         lpar_rc = plpar_cppr(value);
161         if (lpar_rc != H_SUCCESS)
162                 panic("bad return code cppr - rc = %lx\n", lpar_rc);
163 }
164
165 static inline void lpar_qirr_info(int n_cpu , u8 value)
166 {
167         unsigned long lpar_rc;
168
169         lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
170         if (lpar_rc != H_SUCCESS)
171                 panic("bad return code qirr - rc = %lx\n", lpar_rc);
172 }
173
174
175 /* High level handlers and init code */
176
177
178 #ifdef CONFIG_SMP
179 static int get_irq_server(unsigned int virq)
180 {
181         unsigned int server;
182         /* For the moment only implement delivery to all cpus or one cpu */
183         cpumask_t cpumask = irq_desc[virq].affinity;
184         cpumask_t tmp = CPU_MASK_NONE;
185
186         if (!distribute_irqs)
187                 return default_server;
188
189         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
190                 server = default_distrib_server;
191         } else {
192                 cpus_and(tmp, cpu_online_map, cpumask);
193
194                 if (cpus_empty(tmp))
195                         server = default_distrib_server;
196                 else
197                         server = get_hard_smp_processor_id(first_cpu(tmp));
198         }
199
200         return server;
201
202 }
203 #else
204 static int get_irq_server(unsigned int virq)
205 {
206         return default_server;
207 }
208 #endif
209
210
211 static void xics_unmask_irq(unsigned int virq)
212 {
213         unsigned int irq;
214         int call_status;
215         unsigned int server;
216
217         pr_debug("xics: unmask virq %d\n", virq);
218
219         irq = (unsigned int)irq_map[virq].hwirq;
220         pr_debug(" -> map to hwirq 0x%x\n", irq);
221         if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
222                 return;
223
224         server = get_irq_server(virq);
225
226         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
227                                 DEFAULT_PRIORITY);
228         if (call_status != 0) {
229                 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
230                        "returned %d\n", irq, call_status);
231                 printk("set_xive %x, server %x\n", ibm_set_xive, server);
232                 return;
233         }
234
235         /* Now unmask the interrupt (often a no-op) */
236         call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
237         if (call_status != 0) {
238                 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
239                        "returned %d\n", irq, call_status);
240                 return;
241         }
242 }
243
244 static void xics_mask_real_irq(unsigned int irq)
245 {
246         int call_status;
247         unsigned int server;
248
249         if (irq == XICS_IPI)
250                 return;
251
252         call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
253         if (call_status != 0) {
254                 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
255                        "ibm_int_off returned %d\n", irq, call_status);
256                 return;
257         }
258
259         server = get_irq_server(irq);
260         /* Have to set XIVE to 0xff to be able to remove a slot */
261         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
262         if (call_status != 0) {
263                 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
264                        " returned %d\n", irq, call_status);
265                 return;
266         }
267 }
268
269 static void xics_mask_irq(unsigned int virq)
270 {
271         unsigned int irq;
272
273         pr_debug("xics: mask virq %d\n", virq);
274
275         irq = (unsigned int)irq_map[virq].hwirq;
276         if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
277                 return;
278         xics_mask_real_irq(irq);
279 }
280
281 static unsigned int xics_startup(unsigned int virq)
282 {
283         unsigned int irq;
284
285         /* force a reverse mapping of the interrupt so it gets in the cache */
286         irq = (unsigned int)irq_map[virq].hwirq;
287         irq_radix_revmap(xics_host, irq);
288
289         /* unmask it */
290         xics_unmask_irq(virq);
291         return 0;
292 }
293
294 static void xics_eoi_direct(unsigned int virq)
295 {
296         int cpu = smp_processor_id();
297         unsigned int irq = (unsigned int)irq_map[virq].hwirq;
298
299         iosync();
300         direct_xirr_info_set(cpu, (0xff << 24) | irq);
301 }
302
303
304 static void xics_eoi_lpar(unsigned int virq)
305 {
306         int cpu = smp_processor_id();
307         unsigned int irq = (unsigned int)irq_map[virq].hwirq;
308
309         iosync();
310         lpar_xirr_info_set(cpu, (0xff << 24) | irq);
311 }
312
313 static inline unsigned int xics_remap_irq(unsigned int vec)
314 {
315         unsigned int irq;
316
317         vec &= 0x00ffffff;
318
319         if (vec == XICS_IRQ_SPURIOUS)
320                 return NO_IRQ;
321         irq = irq_radix_revmap(xics_host, vec);
322         if (likely(irq != NO_IRQ))
323                 return irq;
324
325         printk(KERN_ERR "Interrupt %u (real) is invalid,"
326                " disabling it.\n", vec);
327         xics_mask_real_irq(vec);
328         return NO_IRQ;
329 }
330
331 static unsigned int xics_get_irq_direct(struct pt_regs *regs)
332 {
333         unsigned int cpu = smp_processor_id();
334
335         return xics_remap_irq(direct_xirr_info_get(cpu));
336 }
337
338 static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
339 {
340         unsigned int cpu = smp_processor_id();
341
342         return xics_remap_irq(lpar_xirr_info_get(cpu));
343 }
344
345 #ifdef CONFIG_SMP
346
347 static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
348 {
349         WARN_ON(cpu_is_offline(cpu));
350
351         while (xics_ipi_message[cpu].value) {
352                 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
353                                        &xics_ipi_message[cpu].value)) {
354                         mb();
355                         smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
356                 }
357                 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
358                                        &xics_ipi_message[cpu].value)) {
359                         mb();
360                         smp_message_recv(PPC_MSG_RESCHEDULE, regs);
361                 }
362 #if 0
363                 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
364                                        &xics_ipi_message[cpu].value)) {
365                         mb();
366                         smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
367                 }
368 #endif
369 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
370                 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
371                                        &xics_ipi_message[cpu].value)) {
372                         mb();
373                         smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
374                 }
375 #endif
376         }
377         return IRQ_HANDLED;
378 }
379
380 static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
381 {
382         int cpu = smp_processor_id();
383
384         direct_qirr_info(cpu, 0xff);
385
386         return xics_ipi_dispatch(cpu, regs);
387 }
388
389 static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
390 {
391         int cpu = smp_processor_id();
392
393         lpar_qirr_info(cpu, 0xff);
394
395         return xics_ipi_dispatch(cpu, regs);
396 }
397
398 void xics_cause_IPI(int cpu)
399 {
400         if (firmware_has_feature(FW_FEATURE_LPAR))
401                 lpar_qirr_info(cpu, IPI_PRIORITY);
402         else
403                 direct_qirr_info(cpu, IPI_PRIORITY);
404 }
405
406 #endif /* CONFIG_SMP */
407
408 static void xics_set_cpu_priority(int cpu, unsigned char cppr)
409 {
410         if (firmware_has_feature(FW_FEATURE_LPAR))
411                 lpar_cppr_info(cpu, cppr);
412         else
413                 direct_cppr_info(cpu, cppr);
414         iosync();
415 }
416
417 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
418 {
419         unsigned int irq;
420         int status;
421         int xics_status[2];
422         unsigned long newmask;
423         cpumask_t tmp = CPU_MASK_NONE;
424
425         irq = (unsigned int)irq_map[virq].hwirq;
426         if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
427                 return;
428
429         status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
430
431         if (status) {
432                 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
433                        "returns %d\n", irq, status);
434                 return;
435         }
436
437         /* For the moment only implement delivery to all cpus or one cpu */
438         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
439                 newmask = default_distrib_server;
440         } else {
441                 cpus_and(tmp, cpu_online_map, cpumask);
442                 if (cpus_empty(tmp))
443                         return;
444                 newmask = get_hard_smp_processor_id(first_cpu(tmp));
445         }
446
447         status = rtas_call(ibm_set_xive, 3, 1, NULL,
448                                 irq, newmask, xics_status[1]);
449
450         if (status) {
451                 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
452                        "returns %d\n", irq, status);
453                 return;
454         }
455 }
456
457 void xics_setup_cpu(void)
458 {
459         int cpu = smp_processor_id();
460
461         xics_set_cpu_priority(cpu, 0xff);
462
463         /*
464          * Put the calling processor into the GIQ.  This is really only
465          * necessary from a secondary thread as the OF start-cpu interface
466          * performs this function for us on primary threads.
467          *
468          * XXX: undo of teardown on kexec needs this too, as may hotplug
469          */
470         rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
471                 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
472 }
473
474
475 static struct irq_chip xics_pic_direct = {
476         .typename = " XICS     ",
477         .startup = xics_startup,
478         .mask = xics_mask_irq,
479         .unmask = xics_unmask_irq,
480         .eoi = xics_eoi_direct,
481         .set_affinity = xics_set_affinity
482 };
483
484
485 static struct irq_chip xics_pic_lpar = {
486         .typename = " XICS     ",
487         .startup = xics_startup,
488         .mask = xics_mask_irq,
489         .unmask = xics_unmask_irq,
490         .eoi = xics_eoi_lpar,
491         .set_affinity = xics_set_affinity
492 };
493
494
495 static int xics_host_match(struct irq_host *h, struct device_node *node)
496 {
497         /* IBM machines have interrupt parents of various funky types for things
498          * like vdevices, events, etc... The trick we use here is to match
499          * everything here except the legacy 8259 which is compatible "chrp,iic"
500          */
501         return !device_is_compatible(node, "chrp,iic");
502 }
503
504 static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
505                                 irq_hw_number_t hw)
506 {
507         pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
508
509         get_irq_desc(virq)->status |= IRQ_LEVEL;
510         set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
511         return 0;
512 }
513
514 static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
515                               irq_hw_number_t hw)
516 {
517         pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
518
519         get_irq_desc(virq)->status |= IRQ_LEVEL;
520         set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
521         return 0;
522 }
523
524 static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
525                            u32 *intspec, unsigned int intsize,
526                            irq_hw_number_t *out_hwirq, unsigned int *out_flags)
527
528 {
529         /* Current xics implementation translates everything
530          * to level. It is not technically right for MSIs but this
531          * is irrelevant at this point. We might get smarter in the future
532          */
533         *out_hwirq = intspec[0];
534         *out_flags = IRQ_TYPE_LEVEL_LOW;
535
536         return 0;
537 }
538
539 static struct irq_host_ops xics_host_direct_ops = {
540         .match = xics_host_match,
541         .map = xics_host_map_direct,
542         .xlate = xics_host_xlate,
543 };
544
545 static struct irq_host_ops xics_host_lpar_ops = {
546         .match = xics_host_match,
547         .map = xics_host_map_lpar,
548         .xlate = xics_host_xlate,
549 };
550
551 static void __init xics_init_host(void)
552 {
553         struct irq_host_ops *ops;
554
555         if (firmware_has_feature(FW_FEATURE_LPAR))
556                 ops = &xics_host_lpar_ops;
557         else
558                 ops = &xics_host_direct_ops;
559         xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops,
560                                    XICS_IRQ_SPURIOUS);
561         BUG_ON(xics_host == NULL);
562         irq_set_default_host(xics_host);
563 }
564
565 static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
566                                      unsigned long size)
567 {
568 #ifdef CONFIG_SMP
569         int i;
570
571         /* This may look gross but it's good enough for now, we don't quite
572          * have a hard -> linux processor id matching.
573          */
574         for_each_possible_cpu(i) {
575                 if (!cpu_present(i))
576                         continue;
577                 if (hw_id == get_hard_smp_processor_id(i)) {
578                         xics_per_cpu[i] = ioremap(addr, size);
579                         return;
580                 }
581         }
582 #else
583         if (hw_id != 0)
584                 return;
585         xics_per_cpu[0] = ioremap(addr, size);
586 #endif /* CONFIG_SMP */
587 }
588
589 static void __init xics_init_one_node(struct device_node *np,
590                                       unsigned int *indx)
591 {
592         unsigned int ilen;
593         u32 *ireg;
594
595         /* This code does the theorically broken assumption that the interrupt
596          * server numbers are the same as the hard CPU numbers.
597          * This happens to be the case so far but we are playing with fire...
598          * should be fixed one of these days. -BenH.
599          */
600         ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL);
601
602         /* Do that ever happen ? we'll know soon enough... but even good'old
603          * f80 does have that property ..
604          */
605         WARN_ON(ireg == NULL);
606         if (ireg) {
607                 /*
608                  * set node starting index for this node
609                  */
610                 *indx = *ireg;
611         }
612         ireg = (u32 *)get_property(np, "reg", &ilen);
613         if (!ireg)
614                 panic("xics_init_IRQ: can't find interrupt reg property");
615
616         while (ilen >= (4 * sizeof(u32))) {
617                 unsigned long addr, size;
618
619                 /* XXX Use proper OF parsing code here !!! */
620                 addr = (unsigned long)*ireg++ << 32;
621                 ilen -= sizeof(u32);
622                 addr |= *ireg++;
623                 ilen -= sizeof(u32);
624                 size = (unsigned long)*ireg++ << 32;
625                 ilen -= sizeof(u32);
626                 size |= *ireg++;
627                 ilen -= sizeof(u32);
628                 xics_map_one_cpu(*indx, addr, size);
629                 (*indx)++;
630         }
631 }
632
633
634 static void __init xics_setup_8259_cascade(void)
635 {
636         struct device_node *np, *old, *found = NULL;
637         int cascade, naddr;
638         u32 *addrp;
639         unsigned long intack = 0;
640
641         for_each_node_by_type(np, "interrupt-controller")
642                 if (device_is_compatible(np, "chrp,iic")) {
643                         found = np;
644                         break;
645                 }
646         if (found == NULL) {
647                 printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
648                 return;
649         }
650         cascade = irq_of_parse_and_map(found, 0);
651         if (cascade == NO_IRQ) {
652                 printk(KERN_ERR "xics: failed to map cascade interrupt");
653                 return;
654         }
655         pr_debug("xics: cascade mapped to irq %d\n", cascade);
656
657         for (old = of_node_get(found); old != NULL ; old = np) {
658                 np = of_get_parent(old);
659                 of_node_put(old);
660                 if (np == NULL)
661                         break;
662                 if (strcmp(np->name, "pci") != 0)
663                         continue;
664                 addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL);
665                 if (addrp == NULL)
666                         continue;
667                 naddr = prom_n_addr_cells(np);
668                 intack = addrp[naddr-1];
669                 if (naddr > 1)
670                         intack |= ((unsigned long)addrp[naddr-2]) << 32;
671         }
672         if (intack)
673                 printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
674         i8259_init(found, intack);
675         of_node_put(found);
676         set_irq_chained_handler(cascade, pseries_8259_cascade);
677 }
678
679 void __init xics_init_IRQ(void)
680 {
681         int i;
682         struct device_node *np;
683         u32 *ireg, ilen, indx = 0;
684         int found = 0;
685
686         ppc64_boot_msg(0x20, "XICS Init");
687
688         ibm_get_xive = rtas_token("ibm,get-xive");
689         ibm_set_xive = rtas_token("ibm,set-xive");
690         ibm_int_on  = rtas_token("ibm,int-on");
691         ibm_int_off = rtas_token("ibm,int-off");
692
693         for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
694                 found = 1;
695                 if (firmware_has_feature(FW_FEATURE_LPAR))
696                         break;
697                 xics_init_one_node(np, &indx);
698         }
699         if (found == 0)
700                 return;
701
702         xics_init_host();
703
704         /* Find the server numbers for the boot cpu. */
705         for (np = of_find_node_by_type(NULL, "cpu");
706              np;
707              np = of_find_node_by_type(np, "cpu")) {
708                 ireg = (u32 *)get_property(np, "reg", &ilen);
709                 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
710                         ireg = (u32 *)get_property(np,
711                                                   "ibm,ppc-interrupt-gserver#s",
712                                                    &ilen);
713                         i = ilen / sizeof(int);
714                         if (ireg && i > 0) {
715                                 default_server = ireg[0];
716                                 /* take last element */
717                                 default_distrib_server = ireg[i-1];
718                         }
719                         ireg = (u32 *)get_property(np,
720                                         "ibm,interrupt-server#-size", NULL);
721                         if (ireg)
722                                 interrupt_server_size = *ireg;
723                         break;
724                 }
725         }
726         of_node_put(np);
727
728         if (firmware_has_feature(FW_FEATURE_LPAR))
729                 ppc_md.get_irq = xics_get_irq_lpar;
730         else
731                 ppc_md.get_irq = xics_get_irq_direct;
732
733         xics_setup_cpu();
734
735         xics_setup_8259_cascade();
736
737         ppc64_boot_msg(0x21, "XICS Done");
738 }
739
740
741 #ifdef CONFIG_SMP
742 void xics_request_IPIs(void)
743 {
744         unsigned int ipi;
745
746         ipi = irq_create_mapping(xics_host, XICS_IPI);
747         BUG_ON(ipi == NO_IRQ);
748
749         /*
750          * IPIs are marked IRQF_DISABLED as they must run with irqs
751          * disabled
752          */
753         set_irq_handler(ipi, handle_percpu_irq);
754         if (firmware_has_feature(FW_FEATURE_LPAR))
755                 request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
756                             "IPI", NULL);
757         else
758                 request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
759                             "IPI", NULL);
760 }
761 #endif /* CONFIG_SMP */
762
763 void xics_teardown_cpu(int secondary)
764 {
765         int cpu = smp_processor_id();
766         unsigned int ipi;
767         struct irq_desc *desc;
768
769         xics_set_cpu_priority(cpu, 0);
770
771         /*
772          * Clear IPI
773          */
774         if (firmware_has_feature(FW_FEATURE_LPAR))
775                 lpar_qirr_info(cpu, 0xff);
776         else
777                 direct_qirr_info(cpu, 0xff);
778
779         /*
780          * we need to EOI the IPI if we got here from kexec down IPI
781          *
782          * probably need to check all the other interrupts too
783          * should we be flagging idle loop instead?
784          * or creating some task to be scheduled?
785          */
786
787         ipi = irq_find_mapping(xics_host, XICS_IPI);
788         if (ipi == XICS_IRQ_SPURIOUS)
789                 return;
790         desc = get_irq_desc(ipi);
791         if (desc->chip && desc->chip->eoi)
792                 desc->chip->eoi(ipi);
793
794         /*
795          * Some machines need to have at least one cpu in the GIQ,
796          * so leave the master cpu in the group.
797          */
798         if (secondary)
799                 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
800                                    (1UL << interrupt_server_size) - 1 -
801                                    default_distrib_server, 0);
802 }
803
804 #ifdef CONFIG_HOTPLUG_CPU
805
806 /* Interrupts are disabled. */
807 void xics_migrate_irqs_away(void)
808 {
809         int status;
810         unsigned int irq, virq, cpu = smp_processor_id();
811
812         /* Reject any interrupt that was queued to us... */
813         xics_set_cpu_priority(cpu, 0);
814
815         /* remove ourselves from the global interrupt queue */
816         status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
817                 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
818         WARN_ON(status < 0);
819
820         /* Allow IPIs again... */
821         xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
822
823         for_each_irq(virq) {
824                 struct irq_desc *desc;
825                 int xics_status[2];
826                 unsigned long flags;
827
828                 /* We cant set affinity on ISA interrupts */
829                 if (virq < NUM_ISA_INTERRUPTS)
830                         continue;
831                 if (irq_map[virq].host != xics_host)
832                         continue;
833                 irq = (unsigned int)irq_map[virq].hwirq;
834                 /* We need to get IPIs still. */
835                 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
836                         continue;
837                 desc = get_irq_desc(virq);
838
839                 /* We only need to migrate enabled IRQS */
840                 if (desc == NULL || desc->chip == NULL
841                     || desc->action == NULL
842                     || desc->chip->set_affinity == NULL)
843                         continue;
844
845                 spin_lock_irqsave(&desc->lock, flags);
846
847                 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
848                 if (status) {
849                         printk(KERN_ERR "migrate_irqs_away: irq=%u "
850                                         "ibm,get-xive returns %d\n",
851                                         virq, status);
852                         goto unlock;
853                 }
854
855                 /*
856                  * We only support delivery to all cpus or to one cpu.
857                  * The irq has to be migrated only in the single cpu
858                  * case.
859                  */
860                 if (xics_status[0] != get_hard_smp_processor_id(cpu))
861                         goto unlock;
862
863                 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
864                        virq, cpu);
865
866                 /* Reset affinity to all cpus */
867                 desc->chip->set_affinity(virq, CPU_MASK_ALL);
868                 irq_desc[irq].affinity = CPU_MASK_ALL;
869 unlock:
870                 spin_unlock_irqrestore(&desc->lock, flags);
871         }
872 }
873 #endif