2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
50 #include <asm/proto.h>
53 #include <asm/timer.h>
54 #include <asm/i8259.h>
56 #include <asm/msidef.h>
57 #include <asm/hypertransport.h>
58 #include <asm/setup.h>
59 #include <asm/irq_remapping.h>
63 #include <mach_apic.h>
64 #include <mach_apicdef.h>
66 #define __apicdebuginit(type) static type __init
69 * Is the SiS APIC rmw bug present ?
70 * -1 = don't know, 0 = no, 1 = yes
72 int sis_apic_bug = -1;
74 static DEFINE_SPINLOCK(ioapic_lock);
75 static DEFINE_SPINLOCK(vector_lock);
78 * # of IRQ routing registers
80 int nr_ioapic_registers[MAX_IO_APICS];
82 /* I/O APIC entries */
83 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
86 /* MP IRQ source entries */
87 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
89 /* # of MP IRQ source entries */
92 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
93 int mp_bus_id_to_type[MAX_MP_BUSSES];
96 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
98 int skip_ioapic_setup;
100 static int __init parse_noapic(char *str)
102 /* disable IO-APIC */
103 disable_ioapic_setup();
106 early_param("noapic", parse_noapic);
112 #ifdef CONFIG_HAVE_SPARSE_IRQ
113 struct irq_cfg *next;
115 struct irq_pin_list *irq_2_pin;
117 cpumask_t old_domain;
118 unsigned move_cleanup_count;
120 u8 move_in_progress : 1;
123 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
124 static struct irq_cfg irq_cfg_legacy[] __initdata = {
125 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
126 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
127 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
128 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
129 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
130 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
131 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
132 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
133 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
134 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
135 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
136 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
137 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
138 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
139 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
140 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
143 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
145 static void init_one_irq_cfg(struct irq_cfg *cfg)
147 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
150 static struct irq_cfg *irq_cfgx;
152 #ifdef CONFIG_HAVE_SPARSE_IRQ
154 * Protect the irq_cfgx_free freelist:
156 static DEFINE_SPINLOCK(irq_cfg_lock);
158 static struct irq_cfg *irq_cfgx_free;
161 static void __init init_work(void *data)
163 struct dyn_array *da = data;
170 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
172 legacy_count = ARRAY_SIZE(irq_cfg_legacy);
173 for (i = legacy_count; i < *da->nr; i++)
174 init_one_irq_cfg(&cfg[i]);
176 #ifdef CONFIG_HAVE_SPARSE_IRQ
177 for (i = 1; i < *da->nr; i++)
178 cfg[i-1].next = &cfg[i];
180 irq_cfgx_free = &irq_cfgx[legacy_count];
181 irq_cfgx[legacy_count - 1].next = NULL;
185 #ifdef CONFIG_HAVE_SPARSE_IRQ
186 /* need to be biger than size of irq_cfg_legacy */
187 static int nr_irq_cfg = 32;
189 static int __init parse_nr_irq_cfg(char *arg)
192 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
199 early_param("nr_irq_cfg", parse_nr_irq_cfg);
201 #define for_each_irq_cfg(irqX, cfg) \
202 for (cfg = irq_cfgx, irqX = cfg->irq; cfg; cfg = cfg->next, irqX = cfg ? cfg->irq : -1U)
205 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
207 static struct irq_cfg *irq_cfg(unsigned int irq)
222 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
224 struct irq_cfg *cfg, *cfg_pri;
229 cfg_pri = cfg = irq_cfgx;
239 spin_lock_irqsave(&irq_cfg_lock, flags);
240 if (!irq_cfgx_free) {
242 unsigned long total_bytes;
244 * we run out of pre-allocate ones, allocate more
246 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
248 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
250 cfg = kzalloc(total_bytes, GFP_ATOMIC);
252 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
255 panic("please boot with nr_irq_cfg= %d\n", count * 2);
258 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
260 for (i = 0; i < nr_irq_cfg; i++)
261 init_one_irq_cfg(&cfg[i]);
263 for (i = 1; i < nr_irq_cfg; i++)
264 cfg[i-1].next = &cfg[i];
270 irq_cfgx_free = irq_cfgx_free->next;
278 spin_unlock_irqrestore(&irq_cfg_lock, flags);
280 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
281 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
283 /* dump the results */
286 unsigned long bytes = sizeof(struct irq_cfg);
288 printk(KERN_DEBUG "=========================== %d\n", irq);
289 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
290 for_each_irq_cfg(cfg) {
292 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
294 printk(KERN_DEBUG "===========================\n");
301 #define for_each_irq_cfg(irq, cfg) \
302 for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
304 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work);
306 struct irq_cfg *irq_cfg(unsigned int irq)
309 return &irq_cfgx[irq];
313 struct irq_cfg *irq_cfg_alloc(unsigned int irq)
320 * This is performance-critical, we want to do it O(1)
322 * the indexing order of this array favors 1:1 mappings
323 * between pins and IRQs.
326 struct irq_pin_list {
328 struct irq_pin_list *next;
331 static struct irq_pin_list *irq_2_pin_head;
332 /* fill one page ? */
333 static int nr_irq_2_pin = 0x100;
334 static struct irq_pin_list *irq_2_pin_ptr;
335 static void __init irq_2_pin_init_work(void *data)
337 struct dyn_array *da = data;
338 struct irq_pin_list *pin;
343 for (i = 1; i < *da->nr; i++)
344 pin[i-1].next = &pin[i];
346 irq_2_pin_ptr = &pin[0];
348 DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
350 static struct irq_pin_list *get_one_free_irq_2_pin(void)
352 struct irq_pin_list *pin;
358 irq_2_pin_ptr = pin->next;
364 * we run out of pre-allocate ones, allocate more
366 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
369 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
372 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
373 nr_irq_2_pin, PAGE_SIZE, 0);
376 panic("can not get more irq_2_pin\n");
378 for (i = 1; i < nr_irq_2_pin; i++)
379 pin[i-1].next = &pin[i];
381 irq_2_pin_ptr = pin->next;
389 unsigned int unused[3];
393 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
395 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
396 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
399 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
401 struct io_apic __iomem *io_apic = io_apic_base(apic);
402 writel(reg, &io_apic->index);
403 return readl(&io_apic->data);
406 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
408 struct io_apic __iomem *io_apic = io_apic_base(apic);
409 writel(reg, &io_apic->index);
410 writel(value, &io_apic->data);
414 * Re-write a value: to be used for read-modify-write
415 * cycles where the read already set up the index register.
417 * Older SiS APIC requires we rewrite the index register
419 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
421 struct io_apic __iomem *io_apic = io_apic_base(apic);
423 writel(reg, &io_apic->index);
424 writel(value, &io_apic->data);
427 static bool io_apic_level_ack_pending(unsigned int irq)
429 struct irq_pin_list *entry;
431 struct irq_cfg *cfg = irq_cfg(irq);
433 spin_lock_irqsave(&ioapic_lock, flags);
434 entry = cfg->irq_2_pin;
442 reg = io_apic_read(entry->apic, 0x10 + pin*2);
443 /* Is the remote IRR bit set? */
444 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
445 spin_unlock_irqrestore(&ioapic_lock, flags);
452 spin_unlock_irqrestore(&ioapic_lock, flags);
458 struct { u32 w1, w2; };
459 struct IO_APIC_route_entry entry;
462 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
464 union entry_union eu;
466 spin_lock_irqsave(&ioapic_lock, flags);
467 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
468 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
469 spin_unlock_irqrestore(&ioapic_lock, flags);
474 * When we write a new IO APIC routing entry, we need to write the high
475 * word first! If the mask bit in the low word is clear, we will enable
476 * the interrupt, and we need to make sure the entry is fully populated
477 * before that happens.
480 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
482 union entry_union eu;
484 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
485 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
488 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
491 spin_lock_irqsave(&ioapic_lock, flags);
492 __ioapic_write_entry(apic, pin, e);
493 spin_unlock_irqrestore(&ioapic_lock, flags);
497 * When we mask an IO APIC routing entry, we need to write the low
498 * word first, in order to set the mask bit before we change the
501 static void ioapic_mask_entry(int apic, int pin)
504 union entry_union eu = { .entry.mask = 1 };
506 spin_lock_irqsave(&ioapic_lock, flags);
507 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
508 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
509 spin_unlock_irqrestore(&ioapic_lock, flags);
513 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
517 struct irq_pin_list *entry;
520 entry = cfg->irq_2_pin;
529 #ifdef CONFIG_INTR_REMAP
531 * With interrupt-remapping, destination information comes
532 * from interrupt-remapping table entry.
534 if (!irq_remapped(irq))
535 io_apic_write(apic, 0x11 + pin*2, dest);
537 io_apic_write(apic, 0x11 + pin*2, dest);
539 reg = io_apic_read(apic, 0x10 + pin*2);
540 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
542 io_apic_modify(apic, 0x10 + pin*2, reg);
549 static int assign_irq_vector(int irq, cpumask_t mask);
551 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
557 struct irq_desc *desc;
559 cpus_and(tmp, mask, cpu_online_map);
564 if (assign_irq_vector(irq, mask))
567 cpus_and(tmp, cfg->domain, mask);
568 dest = cpu_mask_to_apicid(tmp);
570 * Only the high 8 bits are valid.
572 dest = SET_APIC_LOGICAL_ID(dest);
574 desc = irq_to_desc(irq);
575 spin_lock_irqsave(&ioapic_lock, flags);
576 __target_IO_APIC_irq(irq, dest, cfg->vector);
577 desc->affinity = mask;
578 spin_unlock_irqrestore(&ioapic_lock, flags);
580 #endif /* CONFIG_SMP */
583 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
584 * shared ISA-space IRQs, so we have to support them. We are super
585 * fast in the common case, and fast for shared ISA-space IRQs.
587 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
590 struct irq_pin_list *entry;
592 /* first time to refer irq_cfg, so with new */
593 cfg = irq_cfg_alloc(irq);
594 entry = cfg->irq_2_pin;
596 entry = get_one_free_irq_2_pin();
597 cfg->irq_2_pin = entry;
600 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
604 while (entry->next) {
605 /* not again, please */
606 if (entry->apic == apic && entry->pin == pin)
612 entry->next = get_one_free_irq_2_pin();
616 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
620 * Reroute an IRQ to a different pin.
622 static void __init replace_pin_at_irq(unsigned int irq,
623 int oldapic, int oldpin,
624 int newapic, int newpin)
626 struct irq_cfg *cfg = irq_cfg(irq);
627 struct irq_pin_list *entry = cfg->irq_2_pin;
631 if (entry->apic == oldapic && entry->pin == oldpin) {
632 entry->apic = newapic;
635 /* every one is different, right? */
641 /* why? call replace before add? */
643 add_pin_to_irq(irq, newapic, newpin);
646 #define __DO_ACTION(R, ACTION_ENABLE, ACTION_DISABLE, FINAL) \
650 struct irq_cfg *cfg; \
651 struct irq_pin_list *entry; \
653 cfg = irq_cfg(irq); \
654 entry = cfg->irq_2_pin; \
660 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
661 reg ACTION_DISABLE; \
663 io_apic_modify(entry->apic, 0x10 + R + pin*2, reg); \
667 entry = entry->next; \
671 #define DO_ACTION(name,R, ACTION_ENABLE, ACTION_DISABLE, FINAL) \
673 static void name##_IO_APIC_irq (unsigned int irq) \
674 __DO_ACTION(R, ACTION_ENABLE, ACTION_DISABLE, FINAL)
677 DO_ACTION(__unmask, 0, |= 0, &= ~IO_APIC_REDIR_MASKED, )
681 * Synchronize the IO-APIC and the CPU by doing
682 * a dummy read from the IO-APIC
684 static inline void io_apic_sync(unsigned int apic)
686 struct io_apic __iomem *io_apic = io_apic_base(apic);
687 readl(&io_apic->data);
691 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, &= ~0, io_apic_sync(entry->apic))
696 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, &= ~0, )
698 /* mask = 1, trigger = 0 */
699 DO_ACTION(__mask_and_edge, 0, |= IO_APIC_REDIR_MASKED, &= ~IO_APIC_REDIR_LEVEL_TRIGGER, )
701 /* mask = 0, trigger = 1 */
702 DO_ACTION(__unmask_and_level, 0, |= IO_APIC_REDIR_LEVEL_TRIGGER, &= ~IO_APIC_REDIR_MASKED, )
706 static void mask_IO_APIC_irq (unsigned int irq)
710 spin_lock_irqsave(&ioapic_lock, flags);
711 __mask_IO_APIC_irq(irq);
712 spin_unlock_irqrestore(&ioapic_lock, flags);
715 static void unmask_IO_APIC_irq (unsigned int irq)
719 spin_lock_irqsave(&ioapic_lock, flags);
720 __unmask_IO_APIC_irq(irq);
721 spin_unlock_irqrestore(&ioapic_lock, flags);
724 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
726 struct IO_APIC_route_entry entry;
728 /* Check delivery_mode to be sure we're not clearing an SMI pin */
729 entry = ioapic_read_entry(apic, pin);
730 if (entry.delivery_mode == dest_SMI)
733 * Disable it in the IO-APIC irq-routing table:
735 ioapic_mask_entry(apic, pin);
738 static void clear_IO_APIC (void)
742 for (apic = 0; apic < nr_ioapics; apic++)
743 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
744 clear_IO_APIC_pin(apic, pin);
747 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
748 void send_IPI_self(int vector)
755 apic_wait_icr_idle();
756 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
758 * Send the IPI. The write to APIC_ICR fires this off.
760 apic_write(APIC_ICR, cfg);
762 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
766 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
767 * specific CPU-side IRQs.
771 static int pirq_entries [MAX_PIRQS];
772 static int pirqs_enabled;
774 static int __init ioapic_pirq_setup(char *str)
777 int ints[MAX_PIRQS+1];
779 get_options(str, ARRAY_SIZE(ints), ints);
781 for (i = 0; i < MAX_PIRQS; i++)
782 pirq_entries[i] = -1;
785 apic_printk(APIC_VERBOSE, KERN_INFO
786 "PIRQ redirection, working around broken MP-BIOS.\n");
788 if (ints[0] < MAX_PIRQS)
791 for (i = 0; i < max; i++) {
792 apic_printk(APIC_VERBOSE, KERN_DEBUG
793 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
795 * PIRQs are mapped upside down, usually.
797 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
802 __setup("pirq=", ioapic_pirq_setup);
803 #endif /* CONFIG_X86_32 */
805 #ifdef CONFIG_INTR_REMAP
806 /* I/O APIC RTE contents at the OS boot up */
807 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
810 * Saves and masks all the unmasked IO-APIC RTE's
812 int save_mask_IO_APIC_setup(void)
814 union IO_APIC_reg_01 reg_01;
819 * The number of IO-APIC IRQ registers (== #pins):
821 for (apic = 0; apic < nr_ioapics; apic++) {
822 spin_lock_irqsave(&ioapic_lock, flags);
823 reg_01.raw = io_apic_read(apic, 1);
824 spin_unlock_irqrestore(&ioapic_lock, flags);
825 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
828 for (apic = 0; apic < nr_ioapics; apic++) {
829 early_ioapic_entries[apic] =
830 kzalloc(sizeof(struct IO_APIC_route_entry) *
831 nr_ioapic_registers[apic], GFP_KERNEL);
832 if (!early_ioapic_entries[apic])
836 for (apic = 0; apic < nr_ioapics; apic++)
837 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
838 struct IO_APIC_route_entry entry;
840 entry = early_ioapic_entries[apic][pin] =
841 ioapic_read_entry(apic, pin);
844 ioapic_write_entry(apic, pin, entry);
850 void restore_IO_APIC_setup(void)
854 for (apic = 0; apic < nr_ioapics; apic++)
855 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
856 ioapic_write_entry(apic, pin,
857 early_ioapic_entries[apic][pin]);
860 void reinit_intr_remapped_IO_APIC(int intr_remapping)
863 * for now plain restore of previous settings.
864 * TBD: In the case of OS enabling interrupt-remapping,
865 * IO-APIC RTE's need to be setup to point to interrupt-remapping
866 * table entries. for now, do a plain restore, and wait for
867 * the setup_IO_APIC_irqs() to do proper initialization.
869 restore_IO_APIC_setup();
874 * Find the IRQ entry number of a certain pin.
876 static int find_irq_entry(int apic, int pin, int type)
880 for (i = 0; i < mp_irq_entries; i++)
881 if (mp_irqs[i].mp_irqtype == type &&
882 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
883 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
884 mp_irqs[i].mp_dstirq == pin)
891 * Find the pin to which IRQ[irq] (ISA) is connected
893 static int __init find_isa_irq_pin(int irq, int type)
897 for (i = 0; i < mp_irq_entries; i++) {
898 int lbus = mp_irqs[i].mp_srcbus;
900 if (test_bit(lbus, mp_bus_not_pci) &&
901 (mp_irqs[i].mp_irqtype == type) &&
902 (mp_irqs[i].mp_srcbusirq == irq))
904 return mp_irqs[i].mp_dstirq;
909 static int __init find_isa_irq_apic(int irq, int type)
913 for (i = 0; i < mp_irq_entries; i++) {
914 int lbus = mp_irqs[i].mp_srcbus;
916 if (test_bit(lbus, mp_bus_not_pci) &&
917 (mp_irqs[i].mp_irqtype == type) &&
918 (mp_irqs[i].mp_srcbusirq == irq))
921 if (i < mp_irq_entries) {
923 for(apic = 0; apic < nr_ioapics; apic++) {
924 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
933 * Find a specific PCI IRQ entry.
934 * Not an __init, possibly needed by modules
936 static int pin_2_irq(int idx, int apic, int pin);
938 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
940 int apic, i, best_guess = -1;
942 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
944 if (test_bit(bus, mp_bus_not_pci)) {
945 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
948 for (i = 0; i < mp_irq_entries; i++) {
949 int lbus = mp_irqs[i].mp_srcbus;
951 for (apic = 0; apic < nr_ioapics; apic++)
952 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
953 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
956 if (!test_bit(lbus, mp_bus_not_pci) &&
957 !mp_irqs[i].mp_irqtype &&
959 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
960 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
962 if (!(apic || IO_APIC_IRQ(irq)))
965 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
968 * Use the first all-but-pin matching entry as a
969 * best-guess fuzzy result for broken mptables.
978 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
980 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
982 * EISA Edge/Level control register, ELCR
984 static int EISA_ELCR(unsigned int irq)
987 unsigned int port = 0x4d0 + (irq >> 3);
988 return (inb(port) >> (irq & 7)) & 1;
990 apic_printk(APIC_VERBOSE, KERN_INFO
991 "Broken MPtable reports ISA irq %d\n", irq);
997 /* ISA interrupts are always polarity zero edge triggered,
998 * when listed as conforming in the MP table. */
1000 #define default_ISA_trigger(idx) (0)
1001 #define default_ISA_polarity(idx) (0)
1003 /* EISA interrupts are always polarity zero and can be edge or level
1004 * trigger depending on the ELCR value. If an interrupt is listed as
1005 * EISA conforming in the MP table, that means its trigger type must
1006 * be read in from the ELCR */
1008 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
1009 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
1011 /* PCI interrupts are always polarity one level triggered,
1012 * when listed as conforming in the MP table. */
1014 #define default_PCI_trigger(idx) (1)
1015 #define default_PCI_polarity(idx) (1)
1017 /* MCA interrupts are always polarity zero level triggered,
1018 * when listed as conforming in the MP table. */
1020 #define default_MCA_trigger(idx) (1)
1021 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
1023 static int MPBIOS_polarity(int idx)
1025 int bus = mp_irqs[idx].mp_srcbus;
1029 * Determine IRQ line polarity (high active or low active):
1031 switch (mp_irqs[idx].mp_irqflag & 3)
1033 case 0: /* conforms, ie. bus-type dependent polarity */
1034 if (test_bit(bus, mp_bus_not_pci))
1035 polarity = default_ISA_polarity(idx);
1037 polarity = default_PCI_polarity(idx);
1039 case 1: /* high active */
1044 case 2: /* reserved */
1046 printk(KERN_WARNING "broken BIOS!!\n");
1050 case 3: /* low active */
1055 default: /* invalid */
1057 printk(KERN_WARNING "broken BIOS!!\n");
1065 static int MPBIOS_trigger(int idx)
1067 int bus = mp_irqs[idx].mp_srcbus;
1071 * Determine IRQ trigger mode (edge or level sensitive):
1073 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
1075 case 0: /* conforms, ie. bus-type dependent */
1076 if (test_bit(bus, mp_bus_not_pci))
1077 trigger = default_ISA_trigger(idx);
1079 trigger = default_PCI_trigger(idx);
1080 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1081 switch (mp_bus_id_to_type[bus]) {
1082 case MP_BUS_ISA: /* ISA pin */
1084 /* set before the switch */
1087 case MP_BUS_EISA: /* EISA pin */
1089 trigger = default_EISA_trigger(idx);
1092 case MP_BUS_PCI: /* PCI pin */
1094 /* set before the switch */
1097 case MP_BUS_MCA: /* MCA pin */
1099 trigger = default_MCA_trigger(idx);
1104 printk(KERN_WARNING "broken BIOS!!\n");
1116 case 2: /* reserved */
1118 printk(KERN_WARNING "broken BIOS!!\n");
1127 default: /* invalid */
1129 printk(KERN_WARNING "broken BIOS!!\n");
1137 static inline int irq_polarity(int idx)
1139 return MPBIOS_polarity(idx);
1142 static inline int irq_trigger(int idx)
1144 return MPBIOS_trigger(idx);
1147 int (*ioapic_renumber_irq)(int ioapic, int irq);
1148 static int pin_2_irq(int idx, int apic, int pin)
1151 int bus = mp_irqs[idx].mp_srcbus;
1154 * Debugging check, we are in big trouble if this message pops up!
1156 if (mp_irqs[idx].mp_dstirq != pin)
1157 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1159 if (test_bit(bus, mp_bus_not_pci)) {
1160 irq = mp_irqs[idx].mp_srcbusirq;
1163 * PCI IRQs are mapped in order
1167 irq += nr_ioapic_registers[i++];
1170 * For MPS mode, so far only needed by ES7000 platform
1172 if (ioapic_renumber_irq)
1173 irq = ioapic_renumber_irq(apic, irq);
1176 #ifdef CONFIG_X86_32
1178 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1180 if ((pin >= 16) && (pin <= 23)) {
1181 if (pirq_entries[pin-16] != -1) {
1182 if (!pirq_entries[pin-16]) {
1183 apic_printk(APIC_VERBOSE, KERN_DEBUG
1184 "disabling PIRQ%d\n", pin-16);
1186 irq = pirq_entries[pin-16];
1187 apic_printk(APIC_VERBOSE, KERN_DEBUG
1188 "using PIRQ%d -> IRQ %d\n",
1198 void lock_vector_lock(void)
1200 /* Used to the online set of cpus does not change
1201 * during assign_irq_vector.
1203 spin_lock(&vector_lock);
1206 void unlock_vector_lock(void)
1208 spin_unlock(&vector_lock);
1211 static int __assign_irq_vector(int irq, cpumask_t mask)
1214 * NOTE! The local APIC isn't very good at handling
1215 * multiple interrupts at the same interrupt level.
1216 * As the interrupt level is determined by taking the
1217 * vector number and shifting that right by 4, we
1218 * want to spread these out a bit so that they don't
1219 * all fall in the same interrupt level.
1221 * Also, we've got to be careful not to trash gate
1222 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1224 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1225 unsigned int old_vector;
1227 struct irq_cfg *cfg;
1231 /* Only try and allocate irqs on cpus that are present */
1232 cpus_and(mask, mask, cpu_online_map);
1234 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1237 old_vector = cfg->vector;
1240 cpus_and(tmp, cfg->domain, mask);
1241 if (!cpus_empty(tmp))
1245 for_each_cpu_mask_nr(cpu, mask) {
1246 cpumask_t domain, new_mask;
1250 domain = vector_allocation_domain(cpu);
1251 cpus_and(new_mask, domain, cpu_online_map);
1253 vector = current_vector;
1254 offset = current_offset;
1257 if (vector >= first_system_vector) {
1258 /* If we run out of vectors on large boxen, must share them. */
1259 offset = (offset + 1) % 8;
1260 vector = FIRST_DEVICE_VECTOR + offset;
1262 if (unlikely(current_vector == vector))
1264 #ifdef CONFIG_X86_64
1265 if (vector == IA32_SYSCALL_VECTOR)
1268 if (vector == SYSCALL_VECTOR)
1271 for_each_cpu_mask_nr(new_cpu, new_mask)
1272 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1275 current_vector = vector;
1276 current_offset = offset;
1278 cfg->move_in_progress = 1;
1279 cfg->old_domain = cfg->domain;
1281 for_each_cpu_mask_nr(new_cpu, new_mask)
1282 per_cpu(vector_irq, new_cpu)[vector] = irq;
1283 cfg->vector = vector;
1284 cfg->domain = domain;
1290 static int assign_irq_vector(int irq, cpumask_t mask)
1293 unsigned long flags;
1295 spin_lock_irqsave(&vector_lock, flags);
1296 err = __assign_irq_vector(irq, mask);
1297 spin_unlock_irqrestore(&vector_lock, flags);
1301 static void __clear_irq_vector(int irq)
1303 struct irq_cfg *cfg;
1308 BUG_ON(!cfg->vector);
1310 vector = cfg->vector;
1311 cpus_and(mask, cfg->domain, cpu_online_map);
1312 for_each_cpu_mask_nr(cpu, mask)
1313 per_cpu(vector_irq, cpu)[vector] = -1;
1316 cpus_clear(cfg->domain);
1319 void __setup_vector_irq(int cpu)
1321 /* Initialize vector_irq on a new cpu */
1322 /* This function must be called with vector_lock held */
1324 struct irq_cfg *cfg;
1326 /* Mark the inuse vectors */
1327 for_each_irq_cfg(irq, cfg) {
1328 if (!cpu_isset(cpu, cfg->domain))
1330 vector = cfg->vector;
1331 per_cpu(vector_irq, cpu)[vector] = irq;
1333 /* Mark the free vectors */
1334 for (vector = 0; vector < NR_VECTORS; ++vector) {
1335 irq = per_cpu(vector_irq, cpu)[vector];
1340 if (!cpu_isset(cpu, cfg->domain))
1341 per_cpu(vector_irq, cpu)[vector] = -1;
1345 static struct irq_chip ioapic_chip;
1346 #ifdef CONFIG_INTR_REMAP
1347 static struct irq_chip ir_ioapic_chip;
1350 #define IOAPIC_AUTO -1
1351 #define IOAPIC_EDGE 0
1352 #define IOAPIC_LEVEL 1
1354 #ifdef CONFIG_X86_32
1355 static inline int IO_APIC_irq_trigger(int irq)
1359 for (apic = 0; apic < nr_ioapics; apic++) {
1360 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1361 idx = find_irq_entry(apic, pin, mp_INT);
1362 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1363 return irq_trigger(idx);
1367 * nonexistent IRQs are edge default
1372 static inline int IO_APIC_irq_trigger(int irq)
1378 static void ioapic_register_intr(int irq, unsigned long trigger)
1380 struct irq_desc *desc;
1382 /* first time to use this irq_desc */
1384 desc = irq_to_desc(irq);
1386 desc = irq_to_desc_alloc(irq);
1388 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1389 trigger == IOAPIC_LEVEL)
1390 desc->status |= IRQ_LEVEL;
1392 desc->status &= ~IRQ_LEVEL;
1394 #ifdef CONFIG_INTR_REMAP
1395 if (irq_remapped(irq)) {
1396 desc->status |= IRQ_MOVE_PCNTXT;
1398 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1402 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1403 handle_edge_irq, "edge");
1407 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1408 trigger == IOAPIC_LEVEL)
1409 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1413 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1414 handle_edge_irq, "edge");
1417 static int setup_ioapic_entry(int apic, int irq,
1418 struct IO_APIC_route_entry *entry,
1419 unsigned int destination, int trigger,
1420 int polarity, int vector)
1423 * add it to the IO-APIC irq-routing table:
1425 memset(entry,0,sizeof(*entry));
1427 #ifdef CONFIG_INTR_REMAP
1428 if (intr_remapping_enabled) {
1429 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1431 struct IR_IO_APIC_route_entry *ir_entry =
1432 (struct IR_IO_APIC_route_entry *) entry;
1436 panic("No mapping iommu for ioapic %d\n", apic);
1438 index = alloc_irte(iommu, irq, 1);
1440 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1442 memset(&irte, 0, sizeof(irte));
1445 irte.dst_mode = INT_DEST_MODE;
1446 irte.trigger_mode = trigger;
1447 irte.dlvry_mode = INT_DELIVERY_MODE;
1448 irte.vector = vector;
1449 irte.dest_id = IRTE_DEST(destination);
1451 modify_irte(irq, &irte);
1453 ir_entry->index2 = (index >> 15) & 0x1;
1455 ir_entry->format = 1;
1456 ir_entry->index = (index & 0x7fff);
1460 entry->delivery_mode = INT_DELIVERY_MODE;
1461 entry->dest_mode = INT_DEST_MODE;
1462 entry->dest = destination;
1465 entry->mask = 0; /* enable IRQ */
1466 entry->trigger = trigger;
1467 entry->polarity = polarity;
1468 entry->vector = vector;
1470 /* Mask level triggered irqs.
1471 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1478 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1479 int trigger, int polarity)
1481 struct irq_cfg *cfg;
1482 struct IO_APIC_route_entry entry;
1485 if (!IO_APIC_IRQ(irq))
1491 if (assign_irq_vector(irq, mask))
1494 cpus_and(mask, cfg->domain, mask);
1496 apic_printk(APIC_VERBOSE,KERN_DEBUG
1497 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1498 "IRQ %d Mode:%i Active:%i)\n",
1499 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1500 irq, trigger, polarity);
1503 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1504 cpu_mask_to_apicid(mask), trigger, polarity,
1506 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1507 mp_ioapics[apic].mp_apicid, pin);
1508 __clear_irq_vector(irq);
1512 ioapic_register_intr(irq, trigger);
1514 disable_8259A_irq(irq);
1516 ioapic_write_entry(apic, pin, entry);
1519 static void __init setup_IO_APIC_irqs(void)
1521 int apic, pin, idx, irq;
1524 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1526 for (apic = 0; apic < nr_ioapics; apic++) {
1527 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1529 idx = find_irq_entry(apic, pin, mp_INT);
1531 apic_printk(APIC_VERBOSE,
1532 KERN_DEBUG " %d-%d",
1533 mp_ioapics[apic].mp_apicid, pin);
1539 irq = pin_2_irq(idx, apic, pin);
1540 #ifdef CONFIG_X86_32
1541 if (multi_timer_check(apic, irq))
1544 add_pin_to_irq(irq, apic, pin);
1546 setup_IO_APIC_irq(apic, pin, irq,
1547 irq_trigger(idx), irq_polarity(idx));
1550 apic_printk(APIC_VERBOSE,
1551 KERN_DEBUG " (apicid-pin) not connected\n");
1557 apic_printk(APIC_VERBOSE,
1558 KERN_DEBUG " (apicid-pin) not connected\n");
1562 * Set up the timer pin, possibly with the 8259A-master behind.
1564 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1567 struct IO_APIC_route_entry entry;
1569 #ifdef CONFIG_INTR_REMAP
1570 if (intr_remapping_enabled)
1574 memset(&entry, 0, sizeof(entry));
1577 * We use logical delivery to get the timer IRQ
1580 entry.dest_mode = INT_DEST_MODE;
1581 entry.mask = 1; /* mask IRQ now */
1582 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1583 entry.delivery_mode = INT_DELIVERY_MODE;
1586 entry.vector = vector;
1589 * The timer IRQ doesn't have to know that behind the
1590 * scene we may have a 8259A-master in AEOI mode ...
1592 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1595 * Add it to the IO-APIC irq-routing table:
1597 ioapic_write_entry(apic, pin, entry);
1601 __apicdebuginit(void) print_IO_APIC(void)
1604 union IO_APIC_reg_00 reg_00;
1605 union IO_APIC_reg_01 reg_01;
1606 union IO_APIC_reg_02 reg_02;
1607 union IO_APIC_reg_03 reg_03;
1608 unsigned long flags;
1609 struct irq_cfg *cfg;
1612 if (apic_verbosity == APIC_QUIET)
1615 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1616 for (i = 0; i < nr_ioapics; i++)
1617 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1618 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1621 * We are a bit conservative about what we expect. We have to
1622 * know about every hardware change ASAP.
1624 printk(KERN_INFO "testing the IO APIC.......................\n");
1626 for (apic = 0; apic < nr_ioapics; apic++) {
1628 spin_lock_irqsave(&ioapic_lock, flags);
1629 reg_00.raw = io_apic_read(apic, 0);
1630 reg_01.raw = io_apic_read(apic, 1);
1631 if (reg_01.bits.version >= 0x10)
1632 reg_02.raw = io_apic_read(apic, 2);
1633 if (reg_01.bits.version >= 0x20)
1634 reg_03.raw = io_apic_read(apic, 3);
1635 spin_unlock_irqrestore(&ioapic_lock, flags);
1638 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1639 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1640 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1641 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1642 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1644 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1645 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1647 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1648 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1651 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1652 * but the value of reg_02 is read as the previous read register
1653 * value, so ignore it if reg_02 == reg_01.
1655 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1656 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1657 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1661 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1662 * or reg_03, but the value of reg_0[23] is read as the previous read
1663 * register value, so ignore it if reg_03 == reg_0[12].
1665 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1666 reg_03.raw != reg_01.raw) {
1667 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1668 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1671 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1673 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1674 " Stat Dmod Deli Vect: \n");
1676 for (i = 0; i <= reg_01.bits.entries; i++) {
1677 struct IO_APIC_route_entry entry;
1679 entry = ioapic_read_entry(apic, i);
1681 printk(KERN_DEBUG " %02x %03X ",
1686 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1691 entry.delivery_status,
1693 entry.delivery_mode,
1698 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1699 for_each_irq_cfg(irq, cfg) {
1700 struct irq_pin_list *entry = cfg->irq_2_pin;
1703 printk(KERN_DEBUG "IRQ%d ", irq);
1705 printk("-> %d:%d", entry->apic, entry->pin);
1708 entry = entry->next;
1713 printk(KERN_INFO ".................................... done.\n");
1718 __apicdebuginit(void) print_APIC_bitfield(int base)
1723 if (apic_verbosity == APIC_QUIET)
1726 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1727 for (i = 0; i < 8; i++) {
1728 v = apic_read(base + i*0x10);
1729 for (j = 0; j < 32; j++) {
1739 __apicdebuginit(void) print_local_APIC(void *dummy)
1741 unsigned int v, ver, maxlvt;
1744 if (apic_verbosity == APIC_QUIET)
1747 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1748 smp_processor_id(), hard_smp_processor_id());
1749 v = apic_read(APIC_ID);
1750 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1751 v = apic_read(APIC_LVR);
1752 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1753 ver = GET_APIC_VERSION(v);
1754 maxlvt = lapic_get_maxlvt();
1756 v = apic_read(APIC_TASKPRI);
1757 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1759 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1760 if (!APIC_XAPIC(ver)) {
1761 v = apic_read(APIC_ARBPRI);
1762 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1763 v & APIC_ARBPRI_MASK);
1765 v = apic_read(APIC_PROCPRI);
1766 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1770 * Remote read supported only in the 82489DX and local APIC for
1771 * Pentium processors.
1773 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1774 v = apic_read(APIC_RRR);
1775 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1778 v = apic_read(APIC_LDR);
1779 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1780 if (!x2apic_enabled()) {
1781 v = apic_read(APIC_DFR);
1782 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1784 v = apic_read(APIC_SPIV);
1785 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1787 printk(KERN_DEBUG "... APIC ISR field:\n");
1788 print_APIC_bitfield(APIC_ISR);
1789 printk(KERN_DEBUG "... APIC TMR field:\n");
1790 print_APIC_bitfield(APIC_TMR);
1791 printk(KERN_DEBUG "... APIC IRR field:\n");
1792 print_APIC_bitfield(APIC_IRR);
1794 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1795 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1796 apic_write(APIC_ESR, 0);
1798 v = apic_read(APIC_ESR);
1799 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1802 icr = apic_icr_read();
1803 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1804 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1806 v = apic_read(APIC_LVTT);
1807 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1809 if (maxlvt > 3) { /* PC is LVT#4. */
1810 v = apic_read(APIC_LVTPC);
1811 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1813 v = apic_read(APIC_LVT0);
1814 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1815 v = apic_read(APIC_LVT1);
1816 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1818 if (maxlvt > 2) { /* ERR is LVT#3. */
1819 v = apic_read(APIC_LVTERR);
1820 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1823 v = apic_read(APIC_TMICT);
1824 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1825 v = apic_read(APIC_TMCCT);
1826 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1827 v = apic_read(APIC_TDCR);
1828 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1832 __apicdebuginit(void) print_all_local_APICs(void)
1837 for_each_online_cpu(cpu)
1838 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1842 __apicdebuginit(void) print_PIC(void)
1845 unsigned long flags;
1847 if (apic_verbosity == APIC_QUIET)
1850 printk(KERN_DEBUG "\nprinting PIC contents\n");
1852 spin_lock_irqsave(&i8259A_lock, flags);
1854 v = inb(0xa1) << 8 | inb(0x21);
1855 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1857 v = inb(0xa0) << 8 | inb(0x20);
1858 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1862 v = inb(0xa0) << 8 | inb(0x20);
1866 spin_unlock_irqrestore(&i8259A_lock, flags);
1868 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1870 v = inb(0x4d1) << 8 | inb(0x4d0);
1871 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1874 __apicdebuginit(int) print_all_ICs(void)
1877 print_all_local_APICs();
1883 fs_initcall(print_all_ICs);
1886 /* Where if anywhere is the i8259 connect in external int mode */
1887 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1889 void __init enable_IO_APIC(void)
1891 union IO_APIC_reg_01 reg_01;
1892 int i8259_apic, i8259_pin;
1894 unsigned long flags;
1896 #ifdef CONFIG_X86_32
1899 for (i = 0; i < MAX_PIRQS; i++)
1900 pirq_entries[i] = -1;
1904 * The number of IO-APIC IRQ registers (== #pins):
1906 for (apic = 0; apic < nr_ioapics; apic++) {
1907 spin_lock_irqsave(&ioapic_lock, flags);
1908 reg_01.raw = io_apic_read(apic, 1);
1909 spin_unlock_irqrestore(&ioapic_lock, flags);
1910 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1912 for(apic = 0; apic < nr_ioapics; apic++) {
1914 /* See if any of the pins is in ExtINT mode */
1915 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1916 struct IO_APIC_route_entry entry;
1917 entry = ioapic_read_entry(apic, pin);
1919 /* If the interrupt line is enabled and in ExtInt mode
1920 * I have found the pin where the i8259 is connected.
1922 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1923 ioapic_i8259.apic = apic;
1924 ioapic_i8259.pin = pin;
1930 /* Look to see what if the MP table has reported the ExtINT */
1931 /* If we could not find the appropriate pin by looking at the ioapic
1932 * the i8259 probably is not connected the ioapic but give the
1933 * mptable a chance anyway.
1935 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1936 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1937 /* Trust the MP table if nothing is setup in the hardware */
1938 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1939 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1940 ioapic_i8259.pin = i8259_pin;
1941 ioapic_i8259.apic = i8259_apic;
1943 /* Complain if the MP table and the hardware disagree */
1944 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1945 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1947 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1951 * Do not trust the IO-APIC being empty at bootup
1957 * Not an __init, needed by the reboot code
1959 void disable_IO_APIC(void)
1962 * Clear the IO-APIC before rebooting:
1967 * If the i8259 is routed through an IOAPIC
1968 * Put that IOAPIC in virtual wire mode
1969 * so legacy interrupts can be delivered.
1971 if (ioapic_i8259.pin != -1) {
1972 struct IO_APIC_route_entry entry;
1974 memset(&entry, 0, sizeof(entry));
1975 entry.mask = 0; /* Enabled */
1976 entry.trigger = 0; /* Edge */
1978 entry.polarity = 0; /* High */
1979 entry.delivery_status = 0;
1980 entry.dest_mode = 0; /* Physical */
1981 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1983 entry.dest = read_apic_id();
1986 * Add it to the IO-APIC irq-routing table:
1988 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1991 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1994 #ifdef CONFIG_X86_32
1996 * function to set the IO-APIC physical IDs based on the
1997 * values stored in the MPC table.
1999 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2002 static void __init setup_ioapic_ids_from_mpc(void)
2004 union IO_APIC_reg_00 reg_00;
2005 physid_mask_t phys_id_present_map;
2008 unsigned char old_id;
2009 unsigned long flags;
2011 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
2015 * Don't check I/O APIC IDs for xAPIC systems. They have
2016 * no meaning without the serial APIC bus.
2018 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2019 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2022 * This is broken; anything with a real cpu count has to
2023 * circumvent this idiocy regardless.
2025 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
2028 * Set the IOAPIC ID to the value stored in the MPC table.
2030 for (apic = 0; apic < nr_ioapics; apic++) {
2032 /* Read the register 0 value */
2033 spin_lock_irqsave(&ioapic_lock, flags);
2034 reg_00.raw = io_apic_read(apic, 0);
2035 spin_unlock_irqrestore(&ioapic_lock, flags);
2037 old_id = mp_ioapics[apic].mp_apicid;
2039 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
2040 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2041 apic, mp_ioapics[apic].mp_apicid);
2042 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2044 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
2048 * Sanity check, is the ID really free? Every APIC in a
2049 * system must have a unique ID or we get lots of nice
2050 * 'stuck on smp_invalidate_needed IPI wait' messages.
2052 if (check_apicid_used(phys_id_present_map,
2053 mp_ioapics[apic].mp_apicid)) {
2054 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2055 apic, mp_ioapics[apic].mp_apicid);
2056 for (i = 0; i < get_physical_broadcast(); i++)
2057 if (!physid_isset(i, phys_id_present_map))
2059 if (i >= get_physical_broadcast())
2060 panic("Max APIC ID exceeded!\n");
2061 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2063 physid_set(i, phys_id_present_map);
2064 mp_ioapics[apic].mp_apicid = i;
2067 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
2068 apic_printk(APIC_VERBOSE, "Setting %d in the "
2069 "phys_id_present_map\n",
2070 mp_ioapics[apic].mp_apicid);
2071 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2076 * We need to adjust the IRQ routing table
2077 * if the ID changed.
2079 if (old_id != mp_ioapics[apic].mp_apicid)
2080 for (i = 0; i < mp_irq_entries; i++)
2081 if (mp_irqs[i].mp_dstapic == old_id)
2082 mp_irqs[i].mp_dstapic
2083 = mp_ioapics[apic].mp_apicid;
2086 * Read the right value from the MPC table and
2087 * write it into the ID register.
2089 apic_printk(APIC_VERBOSE, KERN_INFO
2090 "...changing IO-APIC physical APIC ID to %d ...",
2091 mp_ioapics[apic].mp_apicid);
2093 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
2094 spin_lock_irqsave(&ioapic_lock, flags);
2095 io_apic_write(apic, 0, reg_00.raw);
2096 spin_unlock_irqrestore(&ioapic_lock, flags);
2101 spin_lock_irqsave(&ioapic_lock, flags);
2102 reg_00.raw = io_apic_read(apic, 0);
2103 spin_unlock_irqrestore(&ioapic_lock, flags);
2104 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
2105 printk("could not set ID!\n");
2107 apic_printk(APIC_VERBOSE, " ok.\n");
2112 int no_timer_check __initdata;
2114 static int __init notimercheck(char *s)
2119 __setup("no_timer_check", notimercheck);
2122 * There is a nasty bug in some older SMP boards, their mptable lies
2123 * about the timer IRQ. We do the following to work around the situation:
2125 * - timer IRQ defaults to IO-APIC IRQ
2126 * - if this function detects that timer IRQs are defunct, then we fall
2127 * back to ISA timer IRQs
2129 static int __init timer_irq_works(void)
2131 unsigned long t1 = jiffies;
2132 unsigned long flags;
2137 local_save_flags(flags);
2139 /* Let ten ticks pass... */
2140 mdelay((10 * 1000) / HZ);
2141 local_irq_restore(flags);
2144 * Expect a few ticks at least, to be sure some possible
2145 * glue logic does not lock up after one or two first
2146 * ticks in a non-ExtINT mode. Also the local APIC
2147 * might have cached one ExtINT interrupt. Finally, at
2148 * least one tick may be lost due to delays.
2152 if (time_after(jiffies, t1 + 4))
2158 * In the SMP+IOAPIC case it might happen that there are an unspecified
2159 * number of pending IRQ events unhandled. These cases are very rare,
2160 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2161 * better to do it this way as thus we do not have to be aware of
2162 * 'pending' interrupts in the IRQ path, except at this point.
2165 * Edge triggered needs to resend any interrupt
2166 * that was delayed but this is now handled in the device
2171 * Starting up a edge-triggered IO-APIC interrupt is
2172 * nasty - we need to make sure that we get the edge.
2173 * If it is already asserted for some reason, we need
2174 * return 1 to indicate that is was pending.
2176 * This is not complete - we should be able to fake
2177 * an edge even if it isn't on the 8259A...
2180 static unsigned int startup_ioapic_irq(unsigned int irq)
2182 int was_pending = 0;
2183 unsigned long flags;
2185 spin_lock_irqsave(&ioapic_lock, flags);
2187 disable_8259A_irq(irq);
2188 if (i8259A_irq_pending(irq))
2191 __unmask_IO_APIC_irq(irq);
2192 spin_unlock_irqrestore(&ioapic_lock, flags);
2197 #ifdef CONFIG_X86_64
2198 static int ioapic_retrigger_irq(unsigned int irq)
2201 struct irq_cfg *cfg = irq_cfg(irq);
2202 unsigned long flags;
2204 spin_lock_irqsave(&vector_lock, flags);
2205 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2206 spin_unlock_irqrestore(&vector_lock, flags);
2211 static int ioapic_retrigger_irq(unsigned int irq)
2213 send_IPI_self(irq_cfg(irq)->vector);
2220 * Level and edge triggered IO-APIC interrupts need different handling,
2221 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2222 * handled with the level-triggered descriptor, but that one has slightly
2223 * more overhead. Level-triggered interrupts cannot be handled with the
2224 * edge-triggered handler, without risking IRQ storms and other ugly
2230 #ifdef CONFIG_INTR_REMAP
2231 static void ir_irq_migration(struct work_struct *work);
2233 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2236 * Migrate the IO-APIC irq in the presence of intr-remapping.
2238 * For edge triggered, irq migration is a simple atomic update(of vector
2239 * and cpu destination) of IRTE and flush the hardware cache.
2241 * For level triggered, we need to modify the io-apic RTE aswell with the update
2242 * vector information, along with modifying IRTE with vector and destination.
2243 * So irq migration for level triggered is little bit more complex compared to
2244 * edge triggered migration. But the good news is, we use the same algorithm
2245 * for level triggered migration as we have today, only difference being,
2246 * we now initiate the irq migration from process context instead of the
2247 * interrupt context.
2249 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2250 * suppression) to the IO-APIC, level triggered irq migration will also be
2251 * as simple as edge triggered migration and we can do the irq migration
2252 * with a simple atomic update to IO-APIC RTE.
2254 static void migrate_ioapic_irq(int irq, cpumask_t mask)
2256 struct irq_cfg *cfg;
2257 struct irq_desc *desc;
2258 cpumask_t tmp, cleanup_mask;
2260 int modify_ioapic_rte;
2262 unsigned long flags;
2264 cpus_and(tmp, mask, cpu_online_map);
2265 if (cpus_empty(tmp))
2268 if (get_irte(irq, &irte))
2271 if (assign_irq_vector(irq, mask))
2275 cpus_and(tmp, cfg->domain, mask);
2276 dest = cpu_mask_to_apicid(tmp);
2278 desc = irq_to_desc(irq);
2279 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2280 if (modify_ioapic_rte) {
2281 spin_lock_irqsave(&ioapic_lock, flags);
2282 __target_IO_APIC_irq(irq, dest, cfg->vector);
2283 spin_unlock_irqrestore(&ioapic_lock, flags);
2286 irte.vector = cfg->vector;
2287 irte.dest_id = IRTE_DEST(dest);
2290 * Modified the IRTE and flushes the Interrupt entry cache.
2292 modify_irte(irq, &irte);
2294 if (cfg->move_in_progress) {
2295 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2296 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2297 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2298 cfg->move_in_progress = 0;
2301 desc->affinity = mask;
2304 static int migrate_irq_remapped_level(int irq)
2307 struct irq_desc *desc = irq_to_desc(irq);
2309 mask_IO_APIC_irq(irq);
2311 if (io_apic_level_ack_pending(irq)) {
2313 * Interrupt in progress. Migrating irq now will change the
2314 * vector information in the IO-APIC RTE and that will confuse
2315 * the EOI broadcast performed by cpu.
2316 * So, delay the irq migration to the next instance.
2318 schedule_delayed_work(&ir_migration_work, 1);
2322 /* everthing is clear. we have right of way */
2323 migrate_ioapic_irq(irq, desc->pending_mask);
2326 desc->status &= ~IRQ_MOVE_PENDING;
2327 cpus_clear(desc->pending_mask);
2330 unmask_IO_APIC_irq(irq);
2334 static void ir_irq_migration(struct work_struct *work)
2337 struct irq_desc *desc;
2339 for_each_irq_desc(irq, desc) {
2340 if (desc->status & IRQ_MOVE_PENDING) {
2341 unsigned long flags;
2343 spin_lock_irqsave(&desc->lock, flags);
2344 if (!desc->chip->set_affinity ||
2345 !(desc->status & IRQ_MOVE_PENDING)) {
2346 desc->status &= ~IRQ_MOVE_PENDING;
2347 spin_unlock_irqrestore(&desc->lock, flags);
2351 desc->chip->set_affinity(irq, desc->pending_mask);
2352 spin_unlock_irqrestore(&desc->lock, flags);
2358 * Migrates the IRQ destination in the process context.
2360 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2362 struct irq_desc *desc = irq_to_desc(irq);
2364 if (desc->status & IRQ_LEVEL) {
2365 desc->status |= IRQ_MOVE_PENDING;
2366 desc->pending_mask = mask;
2367 migrate_irq_remapped_level(irq);
2371 migrate_ioapic_irq(irq, mask);
2375 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2377 unsigned vector, me;
2379 #ifdef CONFIG_X86_64
2384 me = smp_processor_id();
2385 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2387 struct irq_desc *desc;
2388 struct irq_cfg *cfg;
2389 irq = __get_cpu_var(vector_irq)[vector];
2391 desc = irq_to_desc(irq);
2396 spin_lock(&desc->lock);
2397 if (!cfg->move_cleanup_count)
2400 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2403 __get_cpu_var(vector_irq)[vector] = -1;
2404 cfg->move_cleanup_count--;
2406 spin_unlock(&desc->lock);
2412 static void irq_complete_move(unsigned int irq)
2414 struct irq_cfg *cfg = irq_cfg(irq);
2415 unsigned vector, me;
2417 if (likely(!cfg->move_in_progress))
2420 vector = ~get_irq_regs()->orig_ax;
2421 me = smp_processor_id();
2422 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2423 cpumask_t cleanup_mask;
2425 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2426 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2427 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2428 cfg->move_in_progress = 0;
2432 static inline void irq_complete_move(unsigned int irq) {}
2434 #ifdef CONFIG_INTR_REMAP
2435 static void ack_x2apic_level(unsigned int irq)
2440 static void ack_x2apic_edge(unsigned int irq)
2446 static void ack_apic_edge(unsigned int irq)
2448 irq_complete_move(irq);
2449 move_native_irq(irq);
2453 #ifdef CONFIG_X86_32
2454 atomic_t irq_mis_count;
2457 static void ack_apic_level(unsigned int irq)
2459 #ifdef CONFIG_X86_32
2463 int do_unmask_irq = 0;
2465 irq_complete_move(irq);
2466 #ifdef CONFIG_GENERIC_PENDING_IRQ
2467 /* If we are moving the irq we need to mask it */
2468 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2470 mask_IO_APIC_irq(irq);
2474 #ifdef CONFIG_X86_32
2476 * It appears there is an erratum which affects at least version 0x11
2477 * of I/O APIC (that's the 82093AA and cores integrated into various
2478 * chipsets). Under certain conditions a level-triggered interrupt is
2479 * erroneously delivered as edge-triggered one but the respective IRR
2480 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2481 * message but it will never arrive and further interrupts are blocked
2482 * from the source. The exact reason is so far unknown, but the
2483 * phenomenon was observed when two consecutive interrupt requests
2484 * from a given source get delivered to the same CPU and the source is
2485 * temporarily disabled in between.
2487 * A workaround is to simulate an EOI message manually. We achieve it
2488 * by setting the trigger mode to edge and then to level when the edge
2489 * trigger mode gets detected in the TMR of a local APIC for a
2490 * level-triggered interrupt. We mask the source for the time of the
2491 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2492 * The idea is from Manfred Spraul. --macro
2494 i = irq_cfg(irq)->vector;
2496 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2500 * We must acknowledge the irq before we move it or the acknowledge will
2501 * not propagate properly.
2505 /* Now we can move and renable the irq */
2506 if (unlikely(do_unmask_irq)) {
2507 /* Only migrate the irq if the ack has been received.
2509 * On rare occasions the broadcast level triggered ack gets
2510 * delayed going to ioapics, and if we reprogram the
2511 * vector while Remote IRR is still set the irq will never
2514 * To prevent this scenario we read the Remote IRR bit
2515 * of the ioapic. This has two effects.
2516 * - On any sane system the read of the ioapic will
2517 * flush writes (and acks) going to the ioapic from
2519 * - We get to see if the ACK has actually been delivered.
2521 * Based on failed experiments of reprogramming the
2522 * ioapic entry from outside of irq context starting
2523 * with masking the ioapic entry and then polling until
2524 * Remote IRR was clear before reprogramming the
2525 * ioapic I don't trust the Remote IRR bit to be
2526 * completey accurate.
2528 * However there appears to be no other way to plug
2529 * this race, so if the Remote IRR bit is not
2530 * accurate and is causing problems then it is a hardware bug
2531 * and you can go talk to the chipset vendor about it.
2533 if (!io_apic_level_ack_pending(irq))
2534 move_masked_irq(irq);
2535 unmask_IO_APIC_irq(irq);
2538 #ifdef CONFIG_X86_32
2539 if (!(v & (1 << (i & 0x1f)))) {
2540 atomic_inc(&irq_mis_count);
2541 spin_lock(&ioapic_lock);
2542 __mask_and_edge_IO_APIC_irq(irq);
2543 __unmask_and_level_IO_APIC_irq(irq);
2544 spin_unlock(&ioapic_lock);
2549 static struct irq_chip ioapic_chip __read_mostly = {
2551 .startup = startup_ioapic_irq,
2552 .mask = mask_IO_APIC_irq,
2553 .unmask = unmask_IO_APIC_irq,
2554 .ack = ack_apic_edge,
2555 .eoi = ack_apic_level,
2557 .set_affinity = set_ioapic_affinity_irq,
2559 .retrigger = ioapic_retrigger_irq,
2562 #ifdef CONFIG_INTR_REMAP
2563 static struct irq_chip ir_ioapic_chip __read_mostly = {
2564 .name = "IR-IO-APIC",
2565 .startup = startup_ioapic_irq,
2566 .mask = mask_IO_APIC_irq,
2567 .unmask = unmask_IO_APIC_irq,
2568 .ack = ack_x2apic_edge,
2569 .eoi = ack_x2apic_level,
2571 .set_affinity = set_ir_ioapic_affinity_irq,
2573 .retrigger = ioapic_retrigger_irq,
2577 static inline void init_IO_APIC_traps(void)
2580 struct irq_desc *desc;
2581 struct irq_cfg *cfg;
2584 * NOTE! The local APIC isn't very good at handling
2585 * multiple interrupts at the same interrupt level.
2586 * As the interrupt level is determined by taking the
2587 * vector number and shifting that right by 4, we
2588 * want to spread these out a bit so that they don't
2589 * all fall in the same interrupt level.
2591 * Also, we've got to be careful not to trash gate
2592 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2594 for_each_irq_cfg(irq, cfg) {
2595 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2597 * Hmm.. We don't have an entry for this,
2598 * so default to an old-fashioned 8259
2599 * interrupt if we can..
2602 make_8259A_irq(irq);
2604 desc = irq_to_desc(irq);
2605 /* Strange. Oh, well.. */
2606 desc->chip = &no_irq_chip;
2613 * The local APIC irq-chip implementation:
2616 static void mask_lapic_irq(unsigned int irq)
2620 v = apic_read(APIC_LVT0);
2621 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2624 static void unmask_lapic_irq(unsigned int irq)
2628 v = apic_read(APIC_LVT0);
2629 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2632 static void ack_lapic_irq (unsigned int irq)
2637 static struct irq_chip lapic_chip __read_mostly = {
2638 .name = "local-APIC",
2639 .mask = mask_lapic_irq,
2640 .unmask = unmask_lapic_irq,
2641 .ack = ack_lapic_irq,
2644 static void lapic_register_intr(int irq)
2646 struct irq_desc *desc;
2648 desc = irq_to_desc(irq);
2649 desc->status &= ~IRQ_LEVEL;
2650 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2654 static void __init setup_nmi(void)
2657 * Dirty trick to enable the NMI watchdog ...
2658 * We put the 8259A master into AEOI mode and
2659 * unmask on all local APICs LVT0 as NMI.
2661 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2662 * is from Maciej W. Rozycki - so we do not have to EOI from
2663 * the NMI handler or the timer interrupt.
2665 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2667 enable_NMI_through_LVT0();
2669 apic_printk(APIC_VERBOSE, " done.\n");
2673 * This looks a bit hackish but it's about the only one way of sending
2674 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2675 * not support the ExtINT mode, unfortunately. We need to send these
2676 * cycles as some i82489DX-based boards have glue logic that keeps the
2677 * 8259A interrupt line asserted until INTA. --macro
2679 static inline void __init unlock_ExtINT_logic(void)
2682 struct IO_APIC_route_entry entry0, entry1;
2683 unsigned char save_control, save_freq_select;
2685 pin = find_isa_irq_pin(8, mp_INT);
2690 apic = find_isa_irq_apic(8, mp_INT);
2696 entry0 = ioapic_read_entry(apic, pin);
2697 clear_IO_APIC_pin(apic, pin);
2699 memset(&entry1, 0, sizeof(entry1));
2701 entry1.dest_mode = 0; /* physical delivery */
2702 entry1.mask = 0; /* unmask IRQ now */
2703 entry1.dest = hard_smp_processor_id();
2704 entry1.delivery_mode = dest_ExtINT;
2705 entry1.polarity = entry0.polarity;
2709 ioapic_write_entry(apic, pin, entry1);
2711 save_control = CMOS_READ(RTC_CONTROL);
2712 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2713 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2715 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2720 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2724 CMOS_WRITE(save_control, RTC_CONTROL);
2725 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2726 clear_IO_APIC_pin(apic, pin);
2728 ioapic_write_entry(apic, pin, entry0);
2731 static int disable_timer_pin_1 __initdata;
2732 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2733 static int __init disable_timer_pin_setup(char *arg)
2735 disable_timer_pin_1 = 1;
2738 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2740 int timer_through_8259 __initdata;
2743 * This code may look a bit paranoid, but it's supposed to cooperate with
2744 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2745 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2746 * fanatically on his truly buggy board.
2748 * FIXME: really need to revamp this for all platforms.
2750 static inline void __init check_timer(void)
2752 struct irq_cfg *cfg = irq_cfg(0);
2753 int apic1, pin1, apic2, pin2;
2754 unsigned long flags;
2758 local_irq_save(flags);
2760 ver = apic_read(APIC_LVR);
2761 ver = GET_APIC_VERSION(ver);
2764 * get/set the timer IRQ vector:
2766 disable_8259A_irq(0);
2767 assign_irq_vector(0, TARGET_CPUS);
2770 * As IRQ0 is to be enabled in the 8259A, the virtual
2771 * wire has to be disabled in the local APIC. Also
2772 * timer interrupts need to be acknowledged manually in
2773 * the 8259A for the i82489DX when using the NMI
2774 * watchdog as that APIC treats NMIs as level-triggered.
2775 * The AEOI mode will finish them in the 8259A
2778 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2780 #ifdef CONFIG_X86_32
2781 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2784 pin1 = find_isa_irq_pin(0, mp_INT);
2785 apic1 = find_isa_irq_apic(0, mp_INT);
2786 pin2 = ioapic_i8259.pin;
2787 apic2 = ioapic_i8259.apic;
2789 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2790 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2791 cfg->vector, apic1, pin1, apic2, pin2);
2794 * Some BIOS writers are clueless and report the ExtINTA
2795 * I/O APIC input from the cascaded 8259A as the timer
2796 * interrupt input. So just in case, if only one pin
2797 * was found above, try it both directly and through the
2801 #ifdef CONFIG_INTR_REMAP
2802 if (intr_remapping_enabled)
2803 panic("BIOS bug: timer not connected to IO-APIC");
2808 } else if (pin2 == -1) {
2815 * Ok, does IRQ0 through the IOAPIC work?
2818 add_pin_to_irq(0, apic1, pin1);
2819 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2821 unmask_IO_APIC_irq(0);
2822 if (timer_irq_works()) {
2823 if (nmi_watchdog == NMI_IO_APIC) {
2825 enable_8259A_irq(0);
2827 if (disable_timer_pin_1 > 0)
2828 clear_IO_APIC_pin(0, pin1);
2831 #ifdef CONFIG_INTR_REMAP
2832 if (intr_remapping_enabled)
2833 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2835 clear_IO_APIC_pin(apic1, pin1);
2837 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2838 "8254 timer not connected to IO-APIC\n");
2840 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2841 "(IRQ0) through the 8259A ...\n");
2842 apic_printk(APIC_QUIET, KERN_INFO
2843 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2845 * legacy devices should be connected to IO APIC #0
2847 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2848 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2849 unmask_IO_APIC_irq(0);
2850 enable_8259A_irq(0);
2851 if (timer_irq_works()) {
2852 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2853 timer_through_8259 = 1;
2854 if (nmi_watchdog == NMI_IO_APIC) {
2855 disable_8259A_irq(0);
2857 enable_8259A_irq(0);
2862 * Cleanup, just in case ...
2864 disable_8259A_irq(0);
2865 clear_IO_APIC_pin(apic2, pin2);
2866 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2869 if (nmi_watchdog == NMI_IO_APIC) {
2870 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2871 "through the IO-APIC - disabling NMI Watchdog!\n");
2872 nmi_watchdog = NMI_NONE;
2874 #ifdef CONFIG_X86_32
2878 apic_printk(APIC_QUIET, KERN_INFO
2879 "...trying to set up timer as Virtual Wire IRQ...\n");
2881 lapic_register_intr(0);
2882 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2883 enable_8259A_irq(0);
2885 if (timer_irq_works()) {
2886 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2889 disable_8259A_irq(0);
2890 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2891 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2893 apic_printk(APIC_QUIET, KERN_INFO
2894 "...trying to set up timer as ExtINT IRQ...\n");
2898 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2900 unlock_ExtINT_logic();
2902 if (timer_irq_works()) {
2903 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2906 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2907 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2908 "report. Then try booting with the 'noapic' option.\n");
2910 local_irq_restore(flags);
2914 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2915 * to devices. However there may be an I/O APIC pin available for
2916 * this interrupt regardless. The pin may be left unconnected, but
2917 * typically it will be reused as an ExtINT cascade interrupt for
2918 * the master 8259A. In the MPS case such a pin will normally be
2919 * reported as an ExtINT interrupt in the MP table. With ACPI
2920 * there is no provision for ExtINT interrupts, and in the absence
2921 * of an override it would be treated as an ordinary ISA I/O APIC
2922 * interrupt, that is edge-triggered and unmasked by default. We
2923 * used to do this, but it caused problems on some systems because
2924 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2925 * the same ExtINT cascade interrupt to drive the local APIC of the
2926 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2927 * the I/O APIC in all cases now. No actual device should request
2928 * it anyway. --macro
2930 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2932 void __init setup_IO_APIC(void)
2935 #ifdef CONFIG_X86_32
2939 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2943 io_apic_irqs = ~PIC_IRQS;
2945 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2947 * Set up IO-APIC IRQ routing.
2949 #ifdef CONFIG_X86_32
2951 setup_ioapic_ids_from_mpc();
2954 setup_IO_APIC_irqs();
2955 init_IO_APIC_traps();
2960 * Called after all the initialization is done. If we didnt find any
2961 * APIC bugs then we can allow the modify fast path
2964 static int __init io_apic_bug_finalize(void)
2966 if (sis_apic_bug == -1)
2971 late_initcall(io_apic_bug_finalize);
2973 struct sysfs_ioapic_data {
2974 struct sys_device dev;
2975 struct IO_APIC_route_entry entry[0];
2977 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2979 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2981 struct IO_APIC_route_entry *entry;
2982 struct sysfs_ioapic_data *data;
2985 data = container_of(dev, struct sysfs_ioapic_data, dev);
2986 entry = data->entry;
2987 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2988 *entry = ioapic_read_entry(dev->id, i);
2993 static int ioapic_resume(struct sys_device *dev)
2995 struct IO_APIC_route_entry *entry;
2996 struct sysfs_ioapic_data *data;
2997 unsigned long flags;
2998 union IO_APIC_reg_00 reg_00;
3001 data = container_of(dev, struct sysfs_ioapic_data, dev);
3002 entry = data->entry;
3004 spin_lock_irqsave(&ioapic_lock, flags);
3005 reg_00.raw = io_apic_read(dev->id, 0);
3006 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
3007 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
3008 io_apic_write(dev->id, 0, reg_00.raw);
3010 spin_unlock_irqrestore(&ioapic_lock, flags);
3011 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3012 ioapic_write_entry(dev->id, i, entry[i]);
3017 static struct sysdev_class ioapic_sysdev_class = {
3019 .suspend = ioapic_suspend,
3020 .resume = ioapic_resume,
3023 static int __init ioapic_init_sysfs(void)
3025 struct sys_device * dev;
3028 error = sysdev_class_register(&ioapic_sysdev_class);
3032 for (i = 0; i < nr_ioapics; i++ ) {
3033 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3034 * sizeof(struct IO_APIC_route_entry);
3035 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3036 if (!mp_ioapic_data[i]) {
3037 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3040 dev = &mp_ioapic_data[i]->dev;
3042 dev->cls = &ioapic_sysdev_class;
3043 error = sysdev_register(dev);
3045 kfree(mp_ioapic_data[i]);
3046 mp_ioapic_data[i] = NULL;
3047 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3055 device_initcall(ioapic_init_sysfs);
3058 * Dynamic irq allocate and deallocation
3060 unsigned int create_irq_nr(unsigned int irq_want)
3062 /* Allocate an unused irq */
3065 unsigned long flags;
3066 struct irq_cfg *cfg_new;
3068 #ifndef CONFIG_HAVE_SPARSE_IRQ
3069 irq_want = nr_irqs - 1;
3073 spin_lock_irqsave(&vector_lock, flags);
3074 for (new = irq_want; new > 0; new--) {
3075 if (platform_legacy_irq(new))
3077 cfg_new = irq_cfg(new);
3078 if (cfg_new && cfg_new->vector != 0)
3080 /* check if need to create one */
3082 cfg_new = irq_cfg_alloc(new);
3083 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
3087 spin_unlock_irqrestore(&vector_lock, flags);
3090 dynamic_irq_init(irq);
3095 int create_irq(void)
3099 irq = create_irq_nr(nr_irqs - 1);
3107 void destroy_irq(unsigned int irq)
3109 unsigned long flags;
3111 dynamic_irq_cleanup(irq);
3113 #ifdef CONFIG_INTR_REMAP
3116 spin_lock_irqsave(&vector_lock, flags);
3117 __clear_irq_vector(irq);
3118 spin_unlock_irqrestore(&vector_lock, flags);
3122 * MSI message composition
3124 #ifdef CONFIG_PCI_MSI
3125 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
3127 struct irq_cfg *cfg;
3133 err = assign_irq_vector(irq, tmp);
3138 cpus_and(tmp, cfg->domain, tmp);
3139 dest = cpu_mask_to_apicid(tmp);
3141 #ifdef CONFIG_INTR_REMAP
3142 if (irq_remapped(irq)) {
3147 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3148 BUG_ON(ir_index == -1);
3150 memset (&irte, 0, sizeof(irte));
3153 irte.dst_mode = INT_DEST_MODE;
3154 irte.trigger_mode = 0; /* edge */
3155 irte.dlvry_mode = INT_DELIVERY_MODE;
3156 irte.vector = cfg->vector;
3157 irte.dest_id = IRTE_DEST(dest);
3159 modify_irte(irq, &irte);
3161 msg->address_hi = MSI_ADDR_BASE_HI;
3162 msg->data = sub_handle;
3163 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3165 MSI_ADDR_IR_INDEX1(ir_index) |
3166 MSI_ADDR_IR_INDEX2(ir_index);
3170 msg->address_hi = MSI_ADDR_BASE_HI;
3173 ((INT_DEST_MODE == 0) ?
3174 MSI_ADDR_DEST_MODE_PHYSICAL:
3175 MSI_ADDR_DEST_MODE_LOGICAL) |
3176 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3177 MSI_ADDR_REDIRECTION_CPU:
3178 MSI_ADDR_REDIRECTION_LOWPRI) |
3179 MSI_ADDR_DEST_ID(dest);
3182 MSI_DATA_TRIGGER_EDGE |
3183 MSI_DATA_LEVEL_ASSERT |
3184 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3185 MSI_DATA_DELIVERY_FIXED:
3186 MSI_DATA_DELIVERY_LOWPRI) |
3187 MSI_DATA_VECTOR(cfg->vector);
3193 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3195 struct irq_cfg *cfg;
3199 struct irq_desc *desc;
3201 cpus_and(tmp, mask, cpu_online_map);
3202 if (cpus_empty(tmp))
3205 if (assign_irq_vector(irq, mask))
3209 cpus_and(tmp, cfg->domain, mask);
3210 dest = cpu_mask_to_apicid(tmp);
3212 read_msi_msg(irq, &msg);
3214 msg.data &= ~MSI_DATA_VECTOR_MASK;
3215 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3216 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3217 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3219 write_msi_msg(irq, &msg);
3220 desc = irq_to_desc(irq);
3221 desc->affinity = mask;
3224 #ifdef CONFIG_INTR_REMAP
3226 * Migrate the MSI irq to another cpumask. This migration is
3227 * done in the process context using interrupt-remapping hardware.
3229 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3231 struct irq_cfg *cfg;
3233 cpumask_t tmp, cleanup_mask;
3235 struct irq_desc *desc;
3237 cpus_and(tmp, mask, cpu_online_map);
3238 if (cpus_empty(tmp))
3241 if (get_irte(irq, &irte))
3244 if (assign_irq_vector(irq, mask))
3248 cpus_and(tmp, cfg->domain, mask);
3249 dest = cpu_mask_to_apicid(tmp);
3251 irte.vector = cfg->vector;
3252 irte.dest_id = IRTE_DEST(dest);
3255 * atomically update the IRTE with the new destination and vector.
3257 modify_irte(irq, &irte);
3260 * After this point, all the interrupts will start arriving
3261 * at the new destination. So, time to cleanup the previous
3262 * vector allocation.
3264 if (cfg->move_in_progress) {
3265 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3266 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3267 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3268 cfg->move_in_progress = 0;
3271 desc = irq_to_desc(irq);
3272 desc->affinity = mask;
3275 #endif /* CONFIG_SMP */
3278 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3279 * which implement the MSI or MSI-X Capability Structure.
3281 static struct irq_chip msi_chip = {
3283 .unmask = unmask_msi_irq,
3284 .mask = mask_msi_irq,
3285 .ack = ack_apic_edge,
3287 .set_affinity = set_msi_irq_affinity,
3289 .retrigger = ioapic_retrigger_irq,
3292 #ifdef CONFIG_INTR_REMAP
3293 static struct irq_chip msi_ir_chip = {
3294 .name = "IR-PCI-MSI",
3295 .unmask = unmask_msi_irq,
3296 .mask = mask_msi_irq,
3297 .ack = ack_x2apic_edge,
3299 .set_affinity = ir_set_msi_irq_affinity,
3301 .retrigger = ioapic_retrigger_irq,
3305 * Map the PCI dev to the corresponding remapping hardware unit
3306 * and allocate 'nvec' consecutive interrupt-remapping table entries
3309 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3311 struct intel_iommu *iommu;
3314 iommu = map_dev_to_ir(dev);
3317 "Unable to map PCI %s to iommu\n", pci_name(dev));
3321 index = alloc_irte(iommu, irq, nvec);
3324 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3332 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3337 ret = msi_compose_msg(dev, irq, &msg);
3341 set_irq_msi(irq, desc);
3342 write_msi_msg(irq, &msg);
3344 #ifdef CONFIG_INTR_REMAP
3345 if (irq_remapped(irq)) {
3346 struct irq_desc *desc = irq_to_desc(irq);
3348 * irq migration in process context
3350 desc->status |= IRQ_MOVE_PCNTXT;
3351 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3354 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3359 static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
3363 irq = dev->bus->number;
3371 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3375 unsigned int irq_want;
3377 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3379 irq = create_irq_nr(irq_want);
3383 #ifdef CONFIG_INTR_REMAP
3384 if (!intr_remapping_enabled)
3387 ret = msi_alloc_irte(dev, irq, 1);
3392 ret = setup_msi_irq(dev, desc, irq);
3399 #ifdef CONFIG_INTR_REMAP
3406 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3409 int ret, sub_handle;
3410 struct msi_desc *desc;
3411 unsigned int irq_want;
3413 #ifdef CONFIG_INTR_REMAP
3414 struct intel_iommu *iommu = 0;
3418 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3420 list_for_each_entry(desc, &dev->msi_list, list) {
3421 irq = create_irq_nr(irq_want--);
3424 #ifdef CONFIG_INTR_REMAP
3425 if (!intr_remapping_enabled)
3430 * allocate the consecutive block of IRTE's
3433 index = msi_alloc_irte(dev, irq, nvec);
3439 iommu = map_dev_to_ir(dev);
3445 * setup the mapping between the irq and the IRTE
3446 * base index, the sub_handle pointing to the
3447 * appropriate interrupt remap table entry.
3449 set_irte_irq(irq, iommu, index, sub_handle);
3453 ret = setup_msi_irq(dev, desc, irq);
3465 void arch_teardown_msi_irq(unsigned int irq)
3472 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3474 struct irq_cfg *cfg;
3478 struct irq_desc *desc;
3480 cpus_and(tmp, mask, cpu_online_map);
3481 if (cpus_empty(tmp))
3484 if (assign_irq_vector(irq, mask))
3488 cpus_and(tmp, cfg->domain, mask);
3489 dest = cpu_mask_to_apicid(tmp);
3491 dmar_msi_read(irq, &msg);
3493 msg.data &= ~MSI_DATA_VECTOR_MASK;
3494 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3495 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3496 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3498 dmar_msi_write(irq, &msg);
3499 desc = irq_to_desc(irq);
3500 desc->affinity = mask;
3502 #endif /* CONFIG_SMP */
3504 struct irq_chip dmar_msi_type = {
3506 .unmask = dmar_msi_unmask,
3507 .mask = dmar_msi_mask,
3508 .ack = ack_apic_edge,
3510 .set_affinity = dmar_msi_set_affinity,
3512 .retrigger = ioapic_retrigger_irq,
3515 int arch_setup_dmar_msi(unsigned int irq)
3520 ret = msi_compose_msg(NULL, irq, &msg);
3523 dmar_msi_write(irq, &msg);
3524 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3530 #ifdef CONFIG_HPET_TIMER
3533 static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3535 struct irq_cfg *cfg;
3536 struct irq_desc *desc;
3541 cpus_and(tmp, mask, cpu_online_map);
3542 if (cpus_empty(tmp))
3545 if (assign_irq_vector(irq, mask))
3549 cpus_and(tmp, cfg->domain, mask);
3550 dest = cpu_mask_to_apicid(tmp);
3552 hpet_msi_read(irq, &msg);
3554 msg.data &= ~MSI_DATA_VECTOR_MASK;
3555 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3556 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3557 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3559 hpet_msi_write(irq, &msg);
3560 desc = irq_to_desc(irq);
3561 desc->affinity = mask;
3563 #endif /* CONFIG_SMP */
3565 struct irq_chip hpet_msi_type = {
3567 .unmask = hpet_msi_unmask,
3568 .mask = hpet_msi_mask,
3569 .ack = ack_apic_edge,
3571 .set_affinity = hpet_msi_set_affinity,
3573 .retrigger = ioapic_retrigger_irq,
3576 int arch_setup_hpet_msi(unsigned int irq)
3581 ret = msi_compose_msg(NULL, irq, &msg);
3585 hpet_msi_write(irq, &msg);
3586 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3592 #endif /* CONFIG_PCI_MSI */
3594 * Hypertransport interrupt support
3596 #ifdef CONFIG_HT_IRQ
3600 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3602 struct ht_irq_msg msg;
3603 fetch_ht_irq_msg(irq, &msg);
3605 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3606 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3608 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3609 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3611 write_ht_irq_msg(irq, &msg);
3614 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3616 struct irq_cfg *cfg;
3619 struct irq_desc *desc;
3621 cpus_and(tmp, mask, cpu_online_map);
3622 if (cpus_empty(tmp))
3625 if (assign_irq_vector(irq, mask))
3629 cpus_and(tmp, cfg->domain, mask);
3630 dest = cpu_mask_to_apicid(tmp);
3632 target_ht_irq(irq, dest, cfg->vector);
3633 desc = irq_to_desc(irq);
3634 desc->affinity = mask;
3638 static struct irq_chip ht_irq_chip = {
3640 .mask = mask_ht_irq,
3641 .unmask = unmask_ht_irq,
3642 .ack = ack_apic_edge,
3644 .set_affinity = set_ht_irq_affinity,
3646 .retrigger = ioapic_retrigger_irq,
3649 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3651 struct irq_cfg *cfg;
3656 err = assign_irq_vector(irq, tmp);
3658 struct ht_irq_msg msg;
3662 cpus_and(tmp, cfg->domain, tmp);
3663 dest = cpu_mask_to_apicid(tmp);
3665 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3669 HT_IRQ_LOW_DEST_ID(dest) |
3670 HT_IRQ_LOW_VECTOR(cfg->vector) |
3671 ((INT_DEST_MODE == 0) ?
3672 HT_IRQ_LOW_DM_PHYSICAL :
3673 HT_IRQ_LOW_DM_LOGICAL) |
3674 HT_IRQ_LOW_RQEOI_EDGE |
3675 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3676 HT_IRQ_LOW_MT_FIXED :
3677 HT_IRQ_LOW_MT_ARBITRATED) |
3678 HT_IRQ_LOW_IRQ_MASKED;
3680 write_ht_irq_msg(irq, &msg);
3682 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3683 handle_edge_irq, "edge");
3687 #endif /* CONFIG_HT_IRQ */
3689 int __init io_apic_get_redir_entries (int ioapic)
3691 union IO_APIC_reg_01 reg_01;
3692 unsigned long flags;
3694 spin_lock_irqsave(&ioapic_lock, flags);
3695 reg_01.raw = io_apic_read(ioapic, 1);
3696 spin_unlock_irqrestore(&ioapic_lock, flags);
3698 return reg_01.bits.entries;
3701 int __init probe_nr_irqs(void)
3708 int nr_min = NR_IRQS;
3711 for (idx = 0; idx < nr_ioapics; idx++)
3712 nr += io_apic_get_redir_entries(idx) + 1;
3714 /* double it for hotplug and msi and nmi */
3717 /* something wrong ? */
3724 /* --------------------------------------------------------------------------
3725 ACPI-based IOAPIC Configuration
3726 -------------------------------------------------------------------------- */
3730 #ifdef CONFIG_X86_32
3731 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3733 union IO_APIC_reg_00 reg_00;
3734 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3736 unsigned long flags;
3740 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3741 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3742 * supports up to 16 on one shared APIC bus.
3744 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3745 * advantage of new APIC bus architecture.
3748 if (physids_empty(apic_id_map))
3749 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3751 spin_lock_irqsave(&ioapic_lock, flags);
3752 reg_00.raw = io_apic_read(ioapic, 0);
3753 spin_unlock_irqrestore(&ioapic_lock, flags);
3755 if (apic_id >= get_physical_broadcast()) {
3756 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3757 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3758 apic_id = reg_00.bits.ID;
3762 * Every APIC in a system must have a unique ID or we get lots of nice
3763 * 'stuck on smp_invalidate_needed IPI wait' messages.
3765 if (check_apicid_used(apic_id_map, apic_id)) {
3767 for (i = 0; i < get_physical_broadcast(); i++) {
3768 if (!check_apicid_used(apic_id_map, i))
3772 if (i == get_physical_broadcast())
3773 panic("Max apic_id exceeded!\n");
3775 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3776 "trying %d\n", ioapic, apic_id, i);
3781 tmp = apicid_to_cpu_present(apic_id);
3782 physids_or(apic_id_map, apic_id_map, tmp);
3784 if (reg_00.bits.ID != apic_id) {
3785 reg_00.bits.ID = apic_id;
3787 spin_lock_irqsave(&ioapic_lock, flags);
3788 io_apic_write(ioapic, 0, reg_00.raw);
3789 reg_00.raw = io_apic_read(ioapic, 0);
3790 spin_unlock_irqrestore(&ioapic_lock, flags);
3793 if (reg_00.bits.ID != apic_id) {
3794 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3799 apic_printk(APIC_VERBOSE, KERN_INFO
3800 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3805 int __init io_apic_get_version(int ioapic)
3807 union IO_APIC_reg_01 reg_01;
3808 unsigned long flags;
3810 spin_lock_irqsave(&ioapic_lock, flags);
3811 reg_01.raw = io_apic_read(ioapic, 1);
3812 spin_unlock_irqrestore(&ioapic_lock, flags);
3814 return reg_01.bits.version;
3818 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3820 if (!IO_APIC_IRQ(irq)) {
3821 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3827 * IRQs < 16 are already in the irq_2_pin[] map
3830 add_pin_to_irq(irq, ioapic, pin);
3832 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3838 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3842 if (skip_ioapic_setup)
3845 for (i = 0; i < mp_irq_entries; i++)
3846 if (mp_irqs[i].mp_irqtype == mp_INT &&
3847 mp_irqs[i].mp_srcbusirq == bus_irq)
3849 if (i >= mp_irq_entries)
3852 *trigger = irq_trigger(i);
3853 *polarity = irq_polarity(i);
3857 #endif /* CONFIG_ACPI */
3860 * This function currently is only a helper for the i386 smp boot process where
3861 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3862 * so mask in all cases should simply be TARGET_CPUS
3865 void __init setup_ioapic_dest(void)
3867 int pin, ioapic, irq, irq_entry;
3868 struct irq_cfg *cfg;
3870 if (skip_ioapic_setup == 1)
3873 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3874 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3875 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3876 if (irq_entry == -1)
3878 irq = pin_2_irq(irq_entry, ioapic, pin);
3880 /* setup_IO_APIC_irqs could fail to get vector for some device
3881 * when you have too many devices, because at that time only boot
3886 setup_IO_APIC_irq(ioapic, pin, irq,
3887 irq_trigger(irq_entry),
3888 irq_polarity(irq_entry));
3889 #ifdef CONFIG_INTR_REMAP
3890 else if (intr_remapping_enabled)
3891 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3894 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3901 #define IOAPIC_RESOURCE_NAME_SIZE 11
3903 static struct resource *ioapic_resources;
3905 static struct resource * __init ioapic_setup_resources(void)
3908 struct resource *res;
3912 if (nr_ioapics <= 0)
3915 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3918 mem = alloc_bootmem(n);
3922 mem += sizeof(struct resource) * nr_ioapics;
3924 for (i = 0; i < nr_ioapics; i++) {
3926 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3927 sprintf(mem, "IOAPIC %u", i);
3928 mem += IOAPIC_RESOURCE_NAME_SIZE;
3932 ioapic_resources = res;
3937 void __init ioapic_init_mappings(void)
3939 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3941 struct resource *ioapic_res;
3943 ioapic_res = ioapic_setup_resources();
3944 for (i = 0; i < nr_ioapics; i++) {
3945 if (smp_found_config) {
3946 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3947 #ifdef CONFIG_X86_32
3950 "WARNING: bogus zero IO-APIC "
3951 "address found in MPTABLE, "
3952 "disabling IO/APIC support!\n");
3953 smp_found_config = 0;
3954 skip_ioapic_setup = 1;
3955 goto fake_ioapic_page;
3959 #ifdef CONFIG_X86_32
3962 ioapic_phys = (unsigned long)
3963 alloc_bootmem_pages(PAGE_SIZE);
3964 ioapic_phys = __pa(ioapic_phys);
3966 set_fixmap_nocache(idx, ioapic_phys);
3967 apic_printk(APIC_VERBOSE,
3968 "mapped IOAPIC to %08lx (%08lx)\n",
3969 __fix_to_virt(idx), ioapic_phys);
3972 if (ioapic_res != NULL) {
3973 ioapic_res->start = ioapic_phys;
3974 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3980 static int __init ioapic_insert_resources(void)
3983 struct resource *r = ioapic_resources;
3987 "IO APIC resources could be not be allocated.\n");
3991 for (i = 0; i < nr_ioapics; i++) {
3992 insert_resource(&iomem_resource, r);
3999 /* Insert the IO APIC resources after PCI initialization has occured to handle
4000 * IO APICS that are mapped in on a BAR in PCI space. */
4001 late_initcall(ioapic_insert_resources);