3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
18 #include <asm/errno.h>
25 static DEFINE_SPINLOCK(msi_lock);
26 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27 static kmem_cache_t* msi_cachep;
29 static int pci_msi_enable = 1;
30 static int last_alloc_vector;
31 static int nr_released_vectors;
33 #ifndef CONFIG_X86_IO_APIC
34 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
37 static struct msi_ops *msi_ops;
40 msi_register(struct msi_ops *ops)
46 static int msi_cache_init(void)
48 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
49 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
56 static void msi_set_mask_bit(unsigned int vector, int flag)
58 struct msi_desc *entry;
60 entry = (struct msi_desc *)msi_desc[vector];
61 if (!entry || !entry->dev || !entry->mask_base)
63 switch (entry->msi_attrib.type) {
69 pos = (long)entry->mask_base;
70 pci_read_config_dword(entry->dev, pos, &mask_bits);
73 pci_write_config_dword(entry->dev, pos, mask_bits);
78 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
79 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
80 writel(flag, entry->mask_base + offset);
88 static void read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
90 switch(entry->msi_attrib.type) {
93 struct pci_dev *dev = entry->dev;
94 int pos = entry->msi_attrib.pos;
97 pci_read_config_dword(dev, msi_lower_address_reg(pos),
99 if (entry->msi_attrib.is_64) {
100 pci_read_config_dword(dev, msi_upper_address_reg(pos),
102 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
105 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
110 case PCI_CAP_ID_MSIX:
113 base = entry->mask_base +
114 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
116 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
117 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
118 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
126 static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
128 switch (entry->msi_attrib.type) {
131 struct pci_dev *dev = entry->dev;
132 int pos = entry->msi_attrib.pos;
134 pci_write_config_dword(dev, msi_lower_address_reg(pos),
136 if (entry->msi_attrib.is_64) {
137 pci_write_config_dword(dev, msi_upper_address_reg(pos),
139 pci_write_config_word(dev, msi_data_reg(pos, 1),
142 pci_write_config_word(dev, msi_data_reg(pos, 0),
147 case PCI_CAP_ID_MSIX:
150 base = entry->mask_base +
151 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
153 writel(msg->address_lo,
154 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
155 writel(msg->address_hi,
156 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
157 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
166 static void set_msi_affinity(unsigned int irq, cpumask_t cpu_mask)
168 struct msi_desc *entry;
171 entry = msi_desc[irq];
172 if (!entry || !entry->dev)
175 read_msi_msg(entry, &msg);
176 msi_ops->target(irq, cpu_mask, &msg);
177 write_msi_msg(entry, &msg);
178 set_native_irq_info(irq, cpu_mask);
181 #define set_msi_affinity NULL
182 #endif /* CONFIG_SMP */
184 static void mask_MSI_irq(unsigned int vector)
186 msi_set_mask_bit(vector, 1);
189 static void unmask_MSI_irq(unsigned int vector)
191 msi_set_mask_bit(vector, 0);
194 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
196 struct msi_desc *entry;
199 spin_lock_irqsave(&msi_lock, flags);
200 entry = msi_desc[vector];
201 if (!entry || !entry->dev) {
202 spin_unlock_irqrestore(&msi_lock, flags);
205 entry->msi_attrib.state = 1; /* Mark it active */
206 spin_unlock_irqrestore(&msi_lock, flags);
208 return 0; /* never anything pending */
211 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
213 startup_msi_irq_wo_maskbit(vector);
214 unmask_MSI_irq(vector);
215 return 0; /* never anything pending */
218 static void shutdown_msi_irq(unsigned int vector)
220 struct msi_desc *entry;
223 spin_lock_irqsave(&msi_lock, flags);
224 entry = msi_desc[vector];
225 if (entry && entry->dev)
226 entry->msi_attrib.state = 0; /* Mark it not active */
227 spin_unlock_irqrestore(&msi_lock, flags);
230 static void end_msi_irq_wo_maskbit(unsigned int vector)
232 move_native_irq(vector);
236 static void end_msi_irq_w_maskbit(unsigned int vector)
238 move_native_irq(vector);
239 unmask_MSI_irq(vector);
243 static void do_nothing(unsigned int vector)
248 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
249 * which implement the MSI-X Capability Structure.
251 static struct hw_interrupt_type msix_irq_type = {
252 .typename = "PCI-MSI-X",
253 .startup = startup_msi_irq_w_maskbit,
254 .shutdown = shutdown_msi_irq,
255 .enable = unmask_MSI_irq,
256 .disable = mask_MSI_irq,
258 .end = end_msi_irq_w_maskbit,
259 .set_affinity = set_msi_affinity
263 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
264 * which implement the MSI Capability Structure with
265 * Mask-and-Pending Bits.
267 static struct hw_interrupt_type msi_irq_w_maskbit_type = {
268 .typename = "PCI-MSI",
269 .startup = startup_msi_irq_w_maskbit,
270 .shutdown = shutdown_msi_irq,
271 .enable = unmask_MSI_irq,
272 .disable = mask_MSI_irq,
274 .end = end_msi_irq_w_maskbit,
275 .set_affinity = set_msi_affinity
279 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
280 * which implement the MSI Capability Structure without
281 * Mask-and-Pending Bits.
283 static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
284 .typename = "PCI-MSI",
285 .startup = startup_msi_irq_wo_maskbit,
286 .shutdown = shutdown_msi_irq,
287 .enable = do_nothing,
288 .disable = do_nothing,
290 .end = end_msi_irq_wo_maskbit,
291 .set_affinity = set_msi_affinity
294 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
295 static int assign_msi_vector(void)
297 static int new_vector_avail = 1;
302 * msi_lock is provided to ensure that successful allocation of MSI
303 * vector is assigned unique among drivers.
305 spin_lock_irqsave(&msi_lock, flags);
307 if (!new_vector_avail) {
311 * vector_irq[] = -1 indicates that this specific vector is:
312 * - assigned for MSI (since MSI have no associated IRQ) or
313 * - assigned for legacy if less than 16, or
314 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
315 * vector_irq[] = 0 indicates that this vector, previously
316 * assigned for MSI, is freed by hotplug removed operations.
317 * This vector will be reused for any subsequent hotplug added
319 * vector_irq[] > 0 indicates that this vector is assigned for
320 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
321 * vector-to-IOxAPIC IRQ mapping.
323 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
324 if (vector_irq[vector] != 0)
326 free_vector = vector;
327 if (!msi_desc[vector])
333 spin_unlock_irqrestore(&msi_lock, flags);
336 vector_irq[free_vector] = -1;
337 nr_released_vectors--;
338 spin_unlock_irqrestore(&msi_lock, flags);
339 if (msi_desc[free_vector] != NULL) {
343 /* free all linked vectors before re-assign */
345 spin_lock_irqsave(&msi_lock, flags);
346 dev = msi_desc[free_vector]->dev;
347 tail = msi_desc[free_vector]->link.tail;
348 spin_unlock_irqrestore(&msi_lock, flags);
349 msi_free_vector(dev, tail, 1);
350 } while (free_vector != tail);
355 vector = assign_irq_vector(AUTO_ASSIGN);
356 last_alloc_vector = vector;
357 if (vector == LAST_DEVICE_VECTOR)
358 new_vector_avail = 0;
360 spin_unlock_irqrestore(&msi_lock, flags);
364 static int get_new_vector(void)
366 int vector = assign_msi_vector();
369 set_intr_gate(vector, interrupt[vector]);
374 static int msi_init(void)
376 static int status = -ENOMEM;
383 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
388 status = msi_arch_init();
392 "PCI: MSI arch init failed. MSI disabled.\n");
398 "PCI: MSI ops not registered. MSI disabled.\n");
403 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
404 status = msi_cache_init();
407 printk(KERN_WARNING "PCI: MSI cache init failed\n");
411 if (last_alloc_vector < 0) {
413 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
417 vector_irq[last_alloc_vector] = 0;
418 nr_released_vectors++;
423 static int get_msi_vector(struct pci_dev *dev)
425 return get_new_vector();
428 static struct msi_desc* alloc_msi_entry(void)
430 struct msi_desc *entry;
432 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
436 entry->link.tail = entry->link.head = 0; /* single message */
442 static void attach_msi_entry(struct msi_desc *entry, int vector)
446 spin_lock_irqsave(&msi_lock, flags);
447 msi_desc[vector] = entry;
448 spin_unlock_irqrestore(&msi_lock, flags);
451 static void irq_handler_init(int cap_id, int pos, int mask)
455 spin_lock_irqsave(&irq_desc[pos].lock, flags);
456 if (cap_id == PCI_CAP_ID_MSIX)
457 irq_desc[pos].chip = &msix_irq_type;
460 irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
462 irq_desc[pos].chip = &msi_irq_w_maskbit_type;
464 spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
467 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
471 pci_read_config_word(dev, msi_control_reg(pos), &control);
472 if (type == PCI_CAP_ID_MSI) {
473 /* Set enabled bits to single MSI & enable MSI_enable bit */
474 msi_enable(control, 1);
475 pci_write_config_word(dev, msi_control_reg(pos), control);
476 dev->msi_enabled = 1;
478 msix_enable(control);
479 pci_write_config_word(dev, msi_control_reg(pos), control);
480 dev->msix_enabled = 1;
482 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
483 /* PCI Express Endpoint device detected */
484 pci_intx(dev, 0); /* disable intx */
488 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
492 pci_read_config_word(dev, msi_control_reg(pos), &control);
493 if (type == PCI_CAP_ID_MSI) {
494 /* Set enabled bits to single MSI & enable MSI_enable bit */
495 msi_disable(control);
496 pci_write_config_word(dev, msi_control_reg(pos), control);
497 dev->msi_enabled = 0;
499 msix_disable(control);
500 pci_write_config_word(dev, msi_control_reg(pos), control);
501 dev->msix_enabled = 0;
503 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
504 /* PCI Express Endpoint device detected */
505 pci_intx(dev, 1); /* enable intx */
509 static int msi_lookup_vector(struct pci_dev *dev, int type)
514 spin_lock_irqsave(&msi_lock, flags);
515 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
516 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
517 msi_desc[vector]->msi_attrib.type != type ||
518 msi_desc[vector]->msi_attrib.default_vector != dev->irq)
520 spin_unlock_irqrestore(&msi_lock, flags);
521 /* This pre-assigned MSI vector for this device
522 already exits. Override dev->irq with this vector */
526 spin_unlock_irqrestore(&msi_lock, flags);
531 void pci_scan_msi_device(struct pci_dev *dev)
538 int pci_save_msi_state(struct pci_dev *dev)
542 struct pci_cap_saved_state *save_state;
545 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
546 if (pos <= 0 || dev->no_msi)
549 pci_read_config_word(dev, msi_control_reg(pos), &control);
550 if (!(control & PCI_MSI_FLAGS_ENABLE))
553 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
556 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
559 cap = &save_state->data[0];
561 pci_read_config_dword(dev, pos, &cap[i++]);
562 control = cap[0] >> 16;
563 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
564 if (control & PCI_MSI_FLAGS_64BIT) {
565 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
566 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
568 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
569 if (control & PCI_MSI_FLAGS_MASKBIT)
570 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
571 save_state->cap_nr = PCI_CAP_ID_MSI;
572 pci_add_saved_cap(dev, save_state);
576 void pci_restore_msi_state(struct pci_dev *dev)
580 struct pci_cap_saved_state *save_state;
583 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
584 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
585 if (!save_state || pos <= 0)
587 cap = &save_state->data[0];
589 control = cap[i++] >> 16;
590 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
591 if (control & PCI_MSI_FLAGS_64BIT) {
592 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
593 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
595 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
596 if (control & PCI_MSI_FLAGS_MASKBIT)
597 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
598 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
599 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
600 pci_remove_saved_cap(save_state);
604 int pci_save_msix_state(struct pci_dev *dev)
608 int vector, head, tail = 0;
610 struct pci_cap_saved_state *save_state;
612 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
613 if (pos <= 0 || dev->no_msi)
616 /* save the capability */
617 pci_read_config_word(dev, msi_control_reg(pos), &control);
618 if (!(control & PCI_MSIX_FLAGS_ENABLE))
620 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
623 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
626 *((u16 *)&save_state->data[0]) = control;
630 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
635 vector = head = dev->irq;
636 while (head != tail) {
637 struct msi_desc *entry;
639 entry = msi_desc[vector];
640 read_msi_msg(entry, &entry->msg_save);
642 tail = msi_desc[vector]->link.tail;
647 save_state->cap_nr = PCI_CAP_ID_MSIX;
648 pci_add_saved_cap(dev, save_state);
652 void pci_restore_msix_state(struct pci_dev *dev)
656 int vector, head, tail = 0;
657 struct msi_desc *entry;
659 struct pci_cap_saved_state *save_state;
661 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
664 save = *((u16 *)&save_state->data[0]);
665 pci_remove_saved_cap(save_state);
668 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
672 /* route the table */
674 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
676 vector = head = dev->irq;
677 while (head != tail) {
678 entry = msi_desc[vector];
679 write_msi_msg(entry, &entry->msg_save);
681 tail = msi_desc[vector]->link.tail;
686 pci_write_config_word(dev, msi_control_reg(pos), save);
687 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
691 static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
698 pos = entry->msi_attrib.pos;
699 pci_read_config_word(dev, msi_control_reg(pos), &control);
701 /* Configure MSI capability structure */
702 status = msi_ops->setup(dev, dev->irq, &msg);
706 write_msi_msg(entry, &msg);
707 if (entry->msi_attrib.maskbit) {
708 unsigned int maskbits, temp;
709 /* All MSIs are unmasked by default, Mask them all */
710 pci_read_config_dword(dev,
711 msi_mask_bits_reg(pos, is_64bit_address(control)),
713 temp = (1 << multi_msi_capable(control));
714 temp = ((temp - 1) & ~temp);
716 pci_write_config_dword(dev,
717 msi_mask_bits_reg(pos, is_64bit_address(control)),
725 * msi_capability_init - configure device's MSI capability structure
726 * @dev: pointer to the pci_dev data structure of MSI device function
728 * Setup the MSI capability structure of device function with a single
729 * MSI vector, regardless of device function is capable of handling
730 * multiple messages. A return of zero indicates the successful setup
731 * of an entry zero with the new MSI vector or non-zero for otherwise.
733 static int msi_capability_init(struct pci_dev *dev)
736 struct msi_desc *entry;
740 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
741 pci_read_config_word(dev, msi_control_reg(pos), &control);
742 /* MSI Entry Initialization */
743 entry = alloc_msi_entry();
747 vector = get_msi_vector(dev);
749 kmem_cache_free(msi_cachep, entry);
752 entry->link.head = vector;
753 entry->link.tail = vector;
754 entry->msi_attrib.type = PCI_CAP_ID_MSI;
755 entry->msi_attrib.state = 0; /* Mark it not active */
756 entry->msi_attrib.is_64 = is_64bit_address(control);
757 entry->msi_attrib.entry_nr = 0;
758 entry->msi_attrib.maskbit = is_mask_bit_support(control);
759 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
760 entry->msi_attrib.pos = pos;
763 if (is_mask_bit_support(control)) {
764 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
765 is_64bit_address(control));
767 /* Replace with MSI handler */
768 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
769 /* Configure MSI capability structure */
770 status = msi_register_init(dev, entry);
772 dev->irq = entry->msi_attrib.default_vector;
773 kmem_cache_free(msi_cachep, entry);
777 attach_msi_entry(entry, vector);
778 /* Set MSI enabled bits */
779 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
785 * msix_capability_init - configure device's MSI-X capability
786 * @dev: pointer to the pci_dev data structure of MSI-X device function
787 * @entries: pointer to an array of struct msix_entry entries
788 * @nvec: number of @entries
790 * Setup the MSI-X capability structure of device function with a
791 * single MSI-X vector. A return of zero indicates the successful setup of
792 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
794 static int msix_capability_init(struct pci_dev *dev,
795 struct msix_entry *entries, int nvec)
797 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
800 int vector, pos, i, j, nr_entries, temp = 0;
801 unsigned long phys_addr;
807 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
808 /* Request & Map MSI-X table region */
809 pci_read_config_word(dev, msi_control_reg(pos), &control);
810 nr_entries = multi_msix_capable(control);
812 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
813 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
814 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
815 phys_addr = pci_resource_start (dev, bir) + table_offset;
816 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
820 /* MSI-X Table Initialization */
821 for (i = 0; i < nvec; i++) {
822 entry = alloc_msi_entry();
825 vector = get_msi_vector(dev);
827 kmem_cache_free(msi_cachep, entry);
831 j = entries[i].entry;
832 entries[i].vector = vector;
833 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
834 entry->msi_attrib.state = 0; /* Mark it not active */
835 entry->msi_attrib.is_64 = 1;
836 entry->msi_attrib.entry_nr = j;
837 entry->msi_attrib.maskbit = 1;
838 entry->msi_attrib.default_vector = dev->irq;
839 entry->msi_attrib.pos = pos;
841 entry->mask_base = base;
843 entry->link.head = vector;
844 entry->link.tail = vector;
847 entry->link.head = temp;
848 entry->link.tail = tail->link.tail;
849 tail->link.tail = vector;
850 head->link.head = vector;
854 /* Replace with MSI-X handler */
855 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
856 /* Configure MSI-X capability structure */
857 status = msi_ops->setup(dev, vector, &msg);
861 write_msi_msg(entry, &msg);
862 attach_msi_entry(entry, vector);
867 for (; i >= 0; i--) {
868 vector = (entries + i)->vector;
869 msi_free_vector(dev, vector, 0);
870 (entries + i)->vector = 0;
872 /* If we had some success report the number of irqs
873 * we succeeded in setting up.
879 /* Set MSI-X enabled bits */
880 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
886 * pci_msi_supported - check whether MSI may be enabled on device
887 * @dev: pointer to the pci_dev data structure of MSI device function
889 * MSI must be globally enabled and supported by the device and its root
890 * bus. But, the root bus is not easy to find since some architectures
891 * have virtual busses on top of the PCI hierarchy (for instance the
892 * hypertransport bus), while the actual bus where MSI must be supported
893 * is below. So we test the MSI flag on all parent busses and assume
894 * that no quirk will ever set the NO_MSI flag on a non-root bus.
897 int pci_msi_supported(struct pci_dev * dev)
901 if (!pci_msi_enable || !dev || dev->no_msi)
904 /* check MSI flags of all parent busses */
905 for (bus = dev->bus; bus; bus = bus->parent)
906 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
913 * pci_enable_msi - configure device's MSI capability structure
914 * @dev: pointer to the pci_dev data structure of MSI device function
916 * Setup the MSI capability structure of device function with
917 * a single MSI vector upon its software driver call to request for
918 * MSI mode enabled on its hardware device function. A return of zero
919 * indicates the successful setup of an entry zero with the new MSI
920 * vector or non-zero for otherwise.
922 int pci_enable_msi(struct pci_dev* dev)
924 int pos, temp, status;
927 if (pci_msi_supported(dev) < 0)
936 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
940 pci_read_config_word(dev, msi_control_reg(pos), &control);
941 if (!is_64bit_address(control) && msi_ops->needs_64bit_address)
944 WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSI));
946 /* Check whether driver already requested for MSI-X vectors */
947 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
948 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
949 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
950 "Device already has MSI-X vectors assigned\n",
955 status = msi_capability_init(dev);
959 void pci_disable_msi(struct pci_dev* dev)
961 struct msi_desc *entry;
962 int pos, default_vector;
971 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
975 pci_read_config_word(dev, msi_control_reg(pos), &control);
976 if (!(control & PCI_MSI_FLAGS_ENABLE))
979 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
981 spin_lock_irqsave(&msi_lock, flags);
982 entry = msi_desc[dev->irq];
983 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
984 spin_unlock_irqrestore(&msi_lock, flags);
987 if (entry->msi_attrib.state) {
988 spin_unlock_irqrestore(&msi_lock, flags);
989 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
990 "free_irq() on MSI vector %d\n",
991 pci_name(dev), dev->irq);
992 BUG_ON(entry->msi_attrib.state > 0);
994 default_vector = entry->msi_attrib.default_vector;
995 spin_unlock_irqrestore(&msi_lock, flags);
996 msi_free_vector(dev, dev->irq, 0);
998 /* Restore dev->irq to its default pin-assertion vector */
999 dev->irq = default_vector;
1003 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1005 struct msi_desc *entry;
1006 int head, entry_nr, type;
1008 unsigned long flags;
1010 msi_ops->teardown(vector);
1012 spin_lock_irqsave(&msi_lock, flags);
1013 entry = msi_desc[vector];
1014 if (!entry || entry->dev != dev) {
1015 spin_unlock_irqrestore(&msi_lock, flags);
1018 type = entry->msi_attrib.type;
1019 entry_nr = entry->msi_attrib.entry_nr;
1020 head = entry->link.head;
1021 base = entry->mask_base;
1022 msi_desc[entry->link.head]->link.tail = entry->link.tail;
1023 msi_desc[entry->link.tail]->link.head = entry->link.head;
1026 vector_irq[vector] = 0;
1027 nr_released_vectors++;
1029 msi_desc[vector] = NULL;
1030 spin_unlock_irqrestore(&msi_lock, flags);
1032 kmem_cache_free(msi_cachep, entry);
1034 if (type == PCI_CAP_ID_MSIX) {
1037 entry_nr * PCI_MSIX_ENTRY_SIZE +
1038 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1048 * pci_enable_msix - configure device's MSI-X capability structure
1049 * @dev: pointer to the pci_dev data structure of MSI-X device function
1050 * @entries: pointer to an array of MSI-X entries
1051 * @nvec: number of MSI-X vectors requested for allocation by device driver
1053 * Setup the MSI-X capability structure of device function with the number
1054 * of requested vectors upon its software driver call to request for
1055 * MSI-X mode enabled on its hardware device function. A return of zero
1056 * indicates the successful configuration of MSI-X capability structure
1057 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1058 * Or a return of > 0 indicates that driver request is exceeding the number
1059 * of vectors available. Driver should use the returned value to re-send
1062 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1064 int status, pos, nr_entries;
1068 if (!entries || pci_msi_supported(dev) < 0)
1071 status = msi_init();
1075 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1079 pci_read_config_word(dev, msi_control_reg(pos), &control);
1080 nr_entries = multi_msix_capable(control);
1081 if (nvec > nr_entries)
1084 /* Check for any invalid entries */
1085 for (i = 0; i < nvec; i++) {
1086 if (entries[i].entry >= nr_entries)
1087 return -EINVAL; /* invalid entry */
1088 for (j = i + 1; j < nvec; j++) {
1089 if (entries[i].entry == entries[j].entry)
1090 return -EINVAL; /* duplicate entry */
1094 WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSIX));
1096 /* Check whether driver already requested for MSI vector */
1097 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1098 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1099 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
1100 "Device already has an MSI vector assigned\n",
1105 status = msix_capability_init(dev, entries, nvec);
1109 void pci_disable_msix(struct pci_dev* dev)
1114 if (!pci_msi_enable)
1119 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1123 pci_read_config_word(dev, msi_control_reg(pos), &control);
1124 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1127 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1130 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1131 int state, vector, head, tail = 0, warning = 0;
1132 unsigned long flags;
1134 vector = head = dev->irq;
1135 dev->irq = temp; /* Restore pin IRQ */
1136 while (head != tail) {
1137 spin_lock_irqsave(&msi_lock, flags);
1138 state = msi_desc[vector]->msi_attrib.state;
1139 tail = msi_desc[vector]->link.tail;
1140 spin_unlock_irqrestore(&msi_lock, flags);
1143 else if (vector != head) /* Release MSI-X vector */
1144 msi_free_vector(dev, vector, 0);
1147 msi_free_vector(dev, vector, 0);
1149 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1150 "free_irq() on all MSI-X vectors\n",
1152 BUG_ON(warning > 0);
1158 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1159 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1161 * Being called during hotplug remove, from which the device function
1162 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1163 * allocated for this device function, are reclaimed to unused state,
1164 * which may be used later on.
1166 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1168 int state, pos, temp;
1169 unsigned long flags;
1171 if (!pci_msi_enable || !dev)
1174 temp = dev->irq; /* Save IOAPIC IRQ */
1175 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1176 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1177 spin_lock_irqsave(&msi_lock, flags);
1178 state = msi_desc[dev->irq]->msi_attrib.state;
1179 spin_unlock_irqrestore(&msi_lock, flags);
1181 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1182 "called without free_irq() on MSI vector %d\n",
1183 pci_name(dev), dev->irq);
1185 } else /* Release MSI vector assigned to this device */
1186 msi_free_vector(dev, dev->irq, 0);
1187 dev->irq = temp; /* Restore IOAPIC IRQ */
1189 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1190 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1191 int vector, head, tail = 0, warning = 0;
1192 void __iomem *base = NULL;
1194 vector = head = dev->irq;
1195 while (head != tail) {
1196 spin_lock_irqsave(&msi_lock, flags);
1197 state = msi_desc[vector]->msi_attrib.state;
1198 tail = msi_desc[vector]->link.tail;
1199 base = msi_desc[vector]->mask_base;
1200 spin_unlock_irqrestore(&msi_lock, flags);
1203 else if (vector != head) /* Release MSI-X vector */
1204 msi_free_vector(dev, vector, 0);
1207 msi_free_vector(dev, vector, 0);
1210 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1211 "called without free_irq() on all MSI-X vectors\n",
1213 BUG_ON(warning > 0);
1215 dev->irq = temp; /* Restore IOAPIC IRQ */
1219 void pci_no_msi(void)
1224 EXPORT_SYMBOL(pci_enable_msi);
1225 EXPORT_SYMBOL(pci_disable_msi);
1226 EXPORT_SYMBOL(pci_enable_msix);
1227 EXPORT_SYMBOL(pci_disable_msix);