2 * probe.c - PCI detection and setup code
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
9 #include <linux/pci_hotplug.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/cpumask.h>
13 #include <linux/pci-aspm.h>
14 #include <asm-generic/pci-bridge.h>
17 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
18 #define CARDBUS_RESERVE_BUSNR 3
20 static struct resource busn_resource = {
24 .flags = IORESOURCE_BUS,
27 /* Ugh. Need to stop exporting this to modules. */
28 LIST_HEAD(pci_root_buses);
29 EXPORT_SYMBOL(pci_root_buses);
31 static LIST_HEAD(pci_domain_busn_res_list);
33 struct pci_domain_busn_res {
34 struct list_head list;
39 static struct resource *get_pci_domain_busn_res(int domain_nr)
41 struct pci_domain_busn_res *r;
43 list_for_each_entry(r, &pci_domain_busn_res_list, list)
44 if (r->domain_nr == domain_nr)
47 r = kzalloc(sizeof(*r), GFP_KERNEL);
51 r->domain_nr = domain_nr;
54 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
56 list_add_tail(&r->list, &pci_domain_busn_res_list);
61 static int find_anything(struct device *dev, void *data)
67 * Some device drivers need know if pci is initiated.
68 * Basically, we think pci is not initiated when there
69 * is no device to be found on the pci_bus_type.
71 int no_pci_devices(void)
76 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
77 no_devices = (dev == NULL);
81 EXPORT_SYMBOL(no_pci_devices);
86 static void release_pcibus_dev(struct device *dev)
88 struct pci_bus *pci_bus = to_pci_bus(dev);
91 put_device(pci_bus->bridge);
92 pci_bus_remove_resources(pci_bus);
93 pci_release_bus_of_node(pci_bus);
97 static struct class pcibus_class = {
99 .dev_release = &release_pcibus_dev,
100 .dev_groups = pcibus_groups,
103 static int __init pcibus_class_init(void)
105 return class_register(&pcibus_class);
107 postcore_initcall(pcibus_class_init);
109 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
111 u64 size = mask & maxbase; /* Find the significant bits */
115 /* Get the lowest of them to find the decode size, and
116 from that the extent. */
117 size = (size & ~(size-1)) - 1;
119 /* base == maxbase can be valid only if the BAR has
120 already been programmed with all 1s. */
121 if (base == maxbase && ((base | size) & mask) != mask)
127 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
133 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
134 flags |= IORESOURCE_IO;
138 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
139 flags |= IORESOURCE_MEM;
140 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
141 flags |= IORESOURCE_PREFETCH;
143 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
145 case PCI_BASE_ADDRESS_MEM_TYPE_32:
147 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
148 /* 1M mem BAR treated as 32-bit BAR */
150 case PCI_BASE_ADDRESS_MEM_TYPE_64:
151 flags |= IORESOURCE_MEM_64;
154 /* mem unknown type treated as 32-bit BAR */
160 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
163 * pci_read_base - read a PCI BAR
164 * @dev: the PCI device
165 * @type: type of the BAR
166 * @res: resource buffer to be filled in
167 * @pos: BAR position in the config space
169 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
171 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
172 struct resource *res, unsigned int pos)
175 u64 l64, sz64, mask64;
177 struct pci_bus_region region, inverted_region;
178 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
180 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
182 /* No printks while decoding is disabled! */
183 if (!dev->mmio_always_on) {
184 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
185 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
186 pci_write_config_word(dev, PCI_COMMAND,
187 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 res->name = pci_name(dev);
193 pci_read_config_dword(dev, pos, &l);
194 pci_write_config_dword(dev, pos, l | mask);
195 pci_read_config_dword(dev, pos, &sz);
196 pci_write_config_dword(dev, pos, l);
199 * All bits set in sz means the device isn't working properly.
200 * If the BAR isn't implemented, all bits must be 0. If it's a
201 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
204 if (!sz || sz == 0xffffffff)
208 * I don't know how l can have all bits set. Copied from old code.
209 * Maybe it fixes a bug on some ancient platform.
214 if (type == pci_bar_unknown) {
215 res->flags = decode_bar(dev, l);
216 res->flags |= IORESOURCE_SIZEALIGN;
217 if (res->flags & IORESOURCE_IO) {
218 l &= PCI_BASE_ADDRESS_IO_MASK;
219 sz &= PCI_BASE_ADDRESS_IO_MASK;
220 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
222 l &= PCI_BASE_ADDRESS_MEM_MASK;
223 sz &= PCI_BASE_ADDRESS_MEM_MASK;
224 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
227 res->flags |= (l & IORESOURCE_ROM_ENABLE);
228 l &= PCI_ROM_ADDRESS_MASK;
229 sz &= PCI_ROM_ADDRESS_MASK;
230 mask = (u32)PCI_ROM_ADDRESS_MASK;
233 if (res->flags & IORESOURCE_MEM_64) {
236 mask64 = mask | (u64)~0 << 32;
238 pci_read_config_dword(dev, pos + 4, &l);
239 pci_write_config_dword(dev, pos + 4, ~0);
240 pci_read_config_dword(dev, pos + 4, &sz);
241 pci_write_config_dword(dev, pos + 4, l);
243 l64 |= ((u64)l << 32);
244 sz64 |= ((u64)sz << 32);
246 sz64 = pci_size(l64, sz64, mask64);
251 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
252 sz64 > 0x100000000ULL) {
253 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
260 if ((sizeof(dma_addr_t) < 8) && l) {
261 /* Above 32-bit boundary; try to reallocate */
262 res->flags |= IORESOURCE_UNSET;
269 region.end = l64 + sz64;
272 sz = pci_size(l, sz, mask);
281 pcibios_bus_to_resource(dev->bus, res, ®ion);
282 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
285 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
286 * the corresponding resource address (the physical address used by
287 * the CPU. Converting that resource address back to a bus address
288 * should yield the original BAR value:
290 * resource_to_bus(bus_to_resource(A)) == A
292 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
293 * be claimed by the device.
295 if (inverted_region.start != region.start) {
296 res->flags |= IORESOURCE_UNSET;
298 res->end = region.end - region.start;
308 if (!dev->mmio_always_on &&
309 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
310 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
313 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
314 pos, (unsigned long long) sz64);
316 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
317 pos, (unsigned long long) l64);
319 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
320 pos, (unsigned long long) region.start);
322 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
324 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
327 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
329 unsigned int pos, reg;
331 for (pos = 0; pos < howmany; pos++) {
332 struct resource *res = &dev->resource[pos];
333 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
334 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
338 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
339 dev->rom_base_reg = rom;
340 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
341 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
342 IORESOURCE_SIZEALIGN;
343 __pci_read_base(dev, pci_bar_mem32, res, rom);
347 static void pci_read_bridge_io(struct pci_bus *child)
349 struct pci_dev *dev = child->self;
350 u8 io_base_lo, io_limit_lo;
351 unsigned long io_mask, io_granularity, base, limit;
352 struct pci_bus_region region;
353 struct resource *res;
355 io_mask = PCI_IO_RANGE_MASK;
356 io_granularity = 0x1000;
357 if (dev->io_window_1k) {
358 /* Support 1K I/O space granularity */
359 io_mask = PCI_IO_1K_RANGE_MASK;
360 io_granularity = 0x400;
363 res = child->resource[0];
364 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
365 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
366 base = (io_base_lo & io_mask) << 8;
367 limit = (io_limit_lo & io_mask) << 8;
369 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
370 u16 io_base_hi, io_limit_hi;
372 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
373 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
374 base |= ((unsigned long) io_base_hi << 16);
375 limit |= ((unsigned long) io_limit_hi << 16);
379 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
381 region.end = limit + io_granularity - 1;
382 pcibios_bus_to_resource(dev->bus, res, ®ion);
383 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
387 static void pci_read_bridge_mmio(struct pci_bus *child)
389 struct pci_dev *dev = child->self;
390 u16 mem_base_lo, mem_limit_lo;
391 unsigned long base, limit;
392 struct pci_bus_region region;
393 struct resource *res;
395 res = child->resource[1];
396 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
397 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
398 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
399 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
401 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
403 region.end = limit + 0xfffff;
404 pcibios_bus_to_resource(dev->bus, res, ®ion);
405 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
409 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
411 struct pci_dev *dev = child->self;
412 u16 mem_base_lo, mem_limit_lo;
413 unsigned long base, limit;
414 struct pci_bus_region region;
415 struct resource *res;
417 res = child->resource[2];
418 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
419 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
420 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
421 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
423 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
424 u32 mem_base_hi, mem_limit_hi;
426 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
427 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
430 * Some bridges set the base > limit by default, and some
431 * (broken) BIOSes do not initialize them. If we find
432 * this, just assume they are not being used.
434 if (mem_base_hi <= mem_limit_hi) {
435 #if BITS_PER_LONG == 64
436 base |= ((unsigned long) mem_base_hi) << 32;
437 limit |= ((unsigned long) mem_limit_hi) << 32;
439 if (mem_base_hi || mem_limit_hi) {
440 dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n");
447 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
448 IORESOURCE_MEM | IORESOURCE_PREFETCH;
449 if (res->flags & PCI_PREF_RANGE_TYPE_64)
450 res->flags |= IORESOURCE_MEM_64;
452 region.end = limit + 0xfffff;
453 pcibios_bus_to_resource(dev->bus, res, ®ion);
454 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
458 void pci_read_bridge_bases(struct pci_bus *child)
460 struct pci_dev *dev = child->self;
461 struct resource *res;
464 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
467 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
469 dev->transparent ? " (subtractive decode)" : "");
471 pci_bus_remove_resources(child);
472 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
473 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
475 pci_read_bridge_io(child);
476 pci_read_bridge_mmio(child);
477 pci_read_bridge_mmio_pref(child);
479 if (dev->transparent) {
480 pci_bus_for_each_resource(child->parent, res, i) {
481 if (res && res->flags) {
482 pci_bus_add_resource(child, res,
483 PCI_SUBTRACTIVE_DECODE);
484 dev_printk(KERN_DEBUG, &dev->dev,
485 " bridge window %pR (subtractive decode)\n",
492 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
496 b = kzalloc(sizeof(*b), GFP_KERNEL);
500 INIT_LIST_HEAD(&b->node);
501 INIT_LIST_HEAD(&b->children);
502 INIT_LIST_HEAD(&b->devices);
503 INIT_LIST_HEAD(&b->slots);
504 INIT_LIST_HEAD(&b->resources);
505 b->max_bus_speed = PCI_SPEED_UNKNOWN;
506 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
507 #ifdef CONFIG_PCI_DOMAINS_GENERIC
509 b->domain_nr = parent->domain_nr;
514 static void pci_release_host_bridge_dev(struct device *dev)
516 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
518 if (bridge->release_fn)
519 bridge->release_fn(bridge);
521 pci_free_resource_list(&bridge->windows);
526 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
528 struct pci_host_bridge *bridge;
530 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
534 INIT_LIST_HEAD(&bridge->windows);
539 static const unsigned char pcix_bus_speed[] = {
540 PCI_SPEED_UNKNOWN, /* 0 */
541 PCI_SPEED_66MHz_PCIX, /* 1 */
542 PCI_SPEED_100MHz_PCIX, /* 2 */
543 PCI_SPEED_133MHz_PCIX, /* 3 */
544 PCI_SPEED_UNKNOWN, /* 4 */
545 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
546 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
547 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
548 PCI_SPEED_UNKNOWN, /* 8 */
549 PCI_SPEED_66MHz_PCIX_266, /* 9 */
550 PCI_SPEED_100MHz_PCIX_266, /* A */
551 PCI_SPEED_133MHz_PCIX_266, /* B */
552 PCI_SPEED_UNKNOWN, /* C */
553 PCI_SPEED_66MHz_PCIX_533, /* D */
554 PCI_SPEED_100MHz_PCIX_533, /* E */
555 PCI_SPEED_133MHz_PCIX_533 /* F */
558 const unsigned char pcie_link_speed[] = {
559 PCI_SPEED_UNKNOWN, /* 0 */
560 PCIE_SPEED_2_5GT, /* 1 */
561 PCIE_SPEED_5_0GT, /* 2 */
562 PCIE_SPEED_8_0GT, /* 3 */
563 PCI_SPEED_UNKNOWN, /* 4 */
564 PCI_SPEED_UNKNOWN, /* 5 */
565 PCI_SPEED_UNKNOWN, /* 6 */
566 PCI_SPEED_UNKNOWN, /* 7 */
567 PCI_SPEED_UNKNOWN, /* 8 */
568 PCI_SPEED_UNKNOWN, /* 9 */
569 PCI_SPEED_UNKNOWN, /* A */
570 PCI_SPEED_UNKNOWN, /* B */
571 PCI_SPEED_UNKNOWN, /* C */
572 PCI_SPEED_UNKNOWN, /* D */
573 PCI_SPEED_UNKNOWN, /* E */
574 PCI_SPEED_UNKNOWN /* F */
577 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
579 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
581 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
583 static unsigned char agp_speeds[] = {
591 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
597 else if (agpstat & 2)
599 else if (agpstat & 1)
611 return agp_speeds[index];
614 static void pci_set_bus_speed(struct pci_bus *bus)
616 struct pci_dev *bridge = bus->self;
619 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
621 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
625 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
626 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
628 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
629 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
632 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
635 enum pci_bus_speed max;
637 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
640 if (status & PCI_X_SSTATUS_533MHZ) {
641 max = PCI_SPEED_133MHz_PCIX_533;
642 } else if (status & PCI_X_SSTATUS_266MHZ) {
643 max = PCI_SPEED_133MHz_PCIX_266;
644 } else if (status & PCI_X_SSTATUS_133MHZ) {
645 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
646 max = PCI_SPEED_133MHz_PCIX_ECC;
648 max = PCI_SPEED_133MHz_PCIX;
650 max = PCI_SPEED_66MHz_PCIX;
653 bus->max_bus_speed = max;
654 bus->cur_bus_speed = pcix_bus_speed[
655 (status & PCI_X_SSTATUS_FREQ) >> 6];
660 if (pci_is_pcie(bridge)) {
664 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
665 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
667 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
668 pcie_update_link_speed(bus, linksta);
672 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
673 struct pci_dev *bridge, int busnr)
675 struct pci_bus *child;
680 * Allocate a new bus, and inherit stuff from the parent..
682 child = pci_alloc_bus(parent);
686 child->parent = parent;
687 child->ops = parent->ops;
688 child->msi = parent->msi;
689 child->sysdata = parent->sysdata;
690 child->bus_flags = parent->bus_flags;
692 /* initialize some portions of the bus device, but don't register it
693 * now as the parent is not properly set up yet.
695 child->dev.class = &pcibus_class;
696 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
699 * Set up the primary, secondary and subordinate
702 child->number = child->busn_res.start = busnr;
703 child->primary = parent->busn_res.start;
704 child->busn_res.end = 0xff;
707 child->dev.parent = parent->bridge;
711 child->self = bridge;
712 child->bridge = get_device(&bridge->dev);
713 child->dev.parent = child->bridge;
714 pci_set_bus_of_node(child);
715 pci_set_bus_speed(child);
717 /* Set up default resource pointers and names.. */
718 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
719 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
720 child->resource[i]->name = child->name;
722 bridge->subordinate = child;
725 ret = device_register(&child->dev);
728 pcibios_add_bus(child);
730 /* Create legacy_io and legacy_mem files for this bus */
731 pci_create_legacy_files(child);
736 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
739 struct pci_bus *child;
741 child = pci_alloc_child_bus(parent, dev, busnr);
743 down_write(&pci_bus_sem);
744 list_add_tail(&child->node, &parent->children);
745 up_write(&pci_bus_sem);
749 EXPORT_SYMBOL(pci_add_new_bus);
751 static void pci_enable_crs(struct pci_dev *pdev)
755 /* Enable CRS Software Visibility if supported */
756 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
757 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
758 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
759 PCI_EXP_RTCTL_CRSSVE);
763 * If it's a bridge, configure it and scan the bus behind it.
764 * For CardBus bridges, we don't scan behind as the devices will
765 * be handled by the bridge driver itself.
767 * We need to process bridges in two passes -- first we scan those
768 * already configured by the BIOS and after we are done with all of
769 * them, we proceed to assigning numbers to the remaining buses in
770 * order to avoid overlaps between old and new bus numbers.
772 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
774 struct pci_bus *child;
775 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
778 u8 primary, secondary, subordinate;
781 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
782 primary = buses & 0xFF;
783 secondary = (buses >> 8) & 0xFF;
784 subordinate = (buses >> 16) & 0xFF;
786 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
787 secondary, subordinate, pass);
789 if (!primary && (primary != bus->number) && secondary && subordinate) {
790 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
791 primary = bus->number;
794 /* Check if setup is sensible at all */
796 (primary != bus->number || secondary <= bus->number ||
797 secondary > subordinate)) {
798 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
799 secondary, subordinate);
803 /* Disable MasterAbortMode during probing to avoid reporting
804 of bus errors (in some architectures) */
805 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
806 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
807 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
811 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
812 !is_cardbus && !broken) {
815 * Bus already configured by firmware, process it in the first
816 * pass and just note the configuration.
822 * The bus might already exist for two reasons: Either we are
823 * rescanning the bus or the bus is reachable through more than
824 * one bridge. The second case can happen with the i450NX
827 child = pci_find_bus(pci_domain_nr(bus), secondary);
829 child = pci_add_new_bus(bus, dev, secondary);
832 child->primary = primary;
833 pci_bus_insert_busn_res(child, secondary, subordinate);
834 child->bridge_ctl = bctl;
837 cmax = pci_scan_child_bus(child);
838 if (cmax > subordinate)
839 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
841 /* subordinate should equal child->busn_res.end */
842 if (subordinate > max)
846 * We need to assign a number to this bus which we always
847 * do in the second pass.
850 if (pcibios_assign_all_busses() || broken || is_cardbus)
851 /* Temporarily disable forwarding of the
852 configuration cycles on all bridges in
853 this bus segment to avoid possible
854 conflicts in the second pass between two
855 bridges programmed with overlapping
857 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
863 pci_write_config_word(dev, PCI_STATUS, 0xffff);
865 /* Prevent assigning a bus number that already exists.
866 * This can happen when a bridge is hot-plugged, so in
867 * this case we only re-scan this bus. */
868 child = pci_find_bus(pci_domain_nr(bus), max+1);
870 child = pci_add_new_bus(bus, dev, max+1);
873 pci_bus_insert_busn_res(child, max+1, 0xff);
876 buses = (buses & 0xff000000)
877 | ((unsigned int)(child->primary) << 0)
878 | ((unsigned int)(child->busn_res.start) << 8)
879 | ((unsigned int)(child->busn_res.end) << 16);
882 * yenta.c forces a secondary latency timer of 176.
883 * Copy that behaviour here.
886 buses &= ~0xff000000;
887 buses |= CARDBUS_LATENCY_TIMER << 24;
891 * We need to blast all three values with a single write.
893 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
896 child->bridge_ctl = bctl;
897 max = pci_scan_child_bus(child);
900 * For CardBus bridges, we leave 4 bus numbers
901 * as cards with a PCI-to-PCI bridge can be
904 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
905 struct pci_bus *parent = bus;
906 if (pci_find_bus(pci_domain_nr(bus),
909 while (parent->parent) {
910 if ((!pcibios_assign_all_busses()) &&
911 (parent->busn_res.end > max) &&
912 (parent->busn_res.end <= max+i)) {
915 parent = parent->parent;
919 * Often, there are two cardbus bridges
920 * -- try to leave one valid bus number
930 * Set the subordinate bus number to its real value.
932 pci_bus_update_busn_res_end(child, max);
933 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
937 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
938 pci_domain_nr(bus), child->number);
940 /* Has only triggered on CardBus, fixup is in yenta_socket */
941 while (bus->parent) {
942 if ((child->busn_res.end > bus->busn_res.end) ||
943 (child->number > bus->busn_res.end) ||
944 (child->number < bus->number) ||
945 (child->busn_res.end < bus->number)) {
946 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
948 (bus->number > child->busn_res.end &&
949 bus->busn_res.end < child->number) ?
950 "wholly" : "partially",
951 bus->self->transparent ? " transparent" : "",
959 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
963 EXPORT_SYMBOL(pci_scan_bridge);
966 * Read interrupt line and base address registers.
967 * The architecture-dependent code can tweak these, of course.
969 static void pci_read_irq(struct pci_dev *dev)
973 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
976 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
980 void set_pcie_port_type(struct pci_dev *pdev)
985 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
988 pdev->pcie_cap = pos;
989 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
990 pdev->pcie_flags_reg = reg16;
991 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
992 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
995 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
999 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32);
1000 if (reg32 & PCI_EXP_SLTCAP_HPC)
1001 pdev->is_hotplug_bridge = 1;
1005 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1008 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1009 * when forwarding a type1 configuration request the bridge must check that
1010 * the extended register address field is zero. The bridge is not permitted
1011 * to forward the transactions and must handle it as an Unsupported Request.
1012 * Some bridges do not follow this rule and simply drop the extended register
1013 * bits, resulting in the standard config space being aliased, every 256
1014 * bytes across the entire configuration space. Test for this condition by
1015 * comparing the first dword of each potential alias to the vendor/device ID.
1017 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1018 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1020 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1022 #ifdef CONFIG_PCI_QUIRKS
1026 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1028 for (pos = PCI_CFG_SPACE_SIZE;
1029 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1030 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1042 * pci_cfg_space_size - get the configuration space size of the PCI device.
1045 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1046 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1047 * access it. Maybe we don't have a way to generate extended config space
1048 * accesses, or the device is behind a reverse Express bridge. So we try
1049 * reading the dword at 0x100 which must either be 0 or a valid extended
1050 * capability header.
1052 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1055 int pos = PCI_CFG_SPACE_SIZE;
1057 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1059 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1062 return PCI_CFG_SPACE_EXP_SIZE;
1065 return PCI_CFG_SPACE_SIZE;
1068 int pci_cfg_space_size(struct pci_dev *dev)
1074 class = dev->class >> 8;
1075 if (class == PCI_CLASS_BRIDGE_HOST)
1076 return pci_cfg_space_size_ext(dev);
1078 if (!pci_is_pcie(dev)) {
1079 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1083 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1084 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1088 return pci_cfg_space_size_ext(dev);
1091 return PCI_CFG_SPACE_SIZE;
1094 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1097 * pci_setup_device - fill in class and map information of a device
1098 * @dev: the device structure to fill
1100 * Initialize the device structure with information about the device's
1101 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1102 * Called at initialisation of the PCI subsystem and by CardBus services.
1103 * Returns 0 on success and negative if unknown type of device (not normal,
1104 * bridge or CardBus).
1106 int pci_setup_device(struct pci_dev *dev)
1110 struct pci_slot *slot;
1112 struct pci_bus_region region;
1113 struct resource *res;
1115 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1118 dev->sysdata = dev->bus->sysdata;
1119 dev->dev.parent = dev->bus->bridge;
1120 dev->dev.bus = &pci_bus_type;
1121 dev->hdr_type = hdr_type & 0x7f;
1122 dev->multifunction = !!(hdr_type & 0x80);
1123 dev->error_state = pci_channel_io_normal;
1124 set_pcie_port_type(dev);
1126 list_for_each_entry(slot, &dev->bus->slots, list)
1127 if (PCI_SLOT(dev->devfn) == slot->number)
1130 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1131 set this higher, assuming the system even supports it. */
1132 dev->dma_mask = 0xffffffff;
1134 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1135 dev->bus->number, PCI_SLOT(dev->devfn),
1136 PCI_FUNC(dev->devfn));
1138 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1139 dev->revision = class & 0xff;
1140 dev->class = class >> 8; /* upper 3 bytes */
1142 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1143 dev->vendor, dev->device, dev->hdr_type, dev->class);
1145 /* need to have dev->class ready */
1146 dev->cfg_size = pci_cfg_space_size(dev);
1148 /* "Unknown power state" */
1149 dev->current_state = PCI_UNKNOWN;
1151 /* Early fixups, before probing the BARs */
1152 pci_fixup_device(pci_fixup_early, dev);
1153 /* device class may be changed after fixup */
1154 class = dev->class >> 8;
1156 switch (dev->hdr_type) { /* header type */
1157 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1158 if (class == PCI_CLASS_BRIDGE_PCI)
1161 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1162 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1163 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1166 * Do the ugly legacy mode stuff here rather than broken chip
1167 * quirk code. Legacy mode ATA controllers have fixed
1168 * addresses. These are not always echoed in BAR0-3, and
1169 * BAR0-3 in a few cases contain junk!
1171 if (class == PCI_CLASS_STORAGE_IDE) {
1173 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1174 if ((progif & 1) == 0) {
1175 region.start = 0x1F0;
1177 res = &dev->resource[0];
1178 res->flags = LEGACY_IO_RESOURCE;
1179 pcibios_bus_to_resource(dev->bus, res, ®ion);
1180 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1182 region.start = 0x3F6;
1184 res = &dev->resource[1];
1185 res->flags = LEGACY_IO_RESOURCE;
1186 pcibios_bus_to_resource(dev->bus, res, ®ion);
1187 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1190 if ((progif & 4) == 0) {
1191 region.start = 0x170;
1193 res = &dev->resource[2];
1194 res->flags = LEGACY_IO_RESOURCE;
1195 pcibios_bus_to_resource(dev->bus, res, ®ion);
1196 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1198 region.start = 0x376;
1200 res = &dev->resource[3];
1201 res->flags = LEGACY_IO_RESOURCE;
1202 pcibios_bus_to_resource(dev->bus, res, ®ion);
1203 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1209 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1210 if (class != PCI_CLASS_BRIDGE_PCI)
1212 /* The PCI-to-PCI bridge spec requires that subtractive
1213 decoding (i.e. transparent) bridge must have programming
1214 interface code of 0x01. */
1216 dev->transparent = ((dev->class & 0xff) == 1);
1217 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1218 set_pcie_hotplug_bridge(dev);
1219 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1221 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1222 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1226 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1227 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1230 pci_read_bases(dev, 1, 0);
1231 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1232 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1235 default: /* unknown header */
1236 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1241 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1242 dev->class, dev->hdr_type);
1243 dev->class = PCI_CLASS_NOT_DEFINED;
1246 /* We found a fine healthy device, go go go... */
1250 static struct hpp_type0 pci_default_type0 = {
1252 .cache_line_size = 8,
1253 .latency_timer = 0x40,
1258 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1260 u16 pci_cmd, pci_bctl;
1263 hpp = &pci_default_type0;
1265 if (hpp->revision > 1) {
1267 "PCI settings rev %d not supported; using defaults\n",
1269 hpp = &pci_default_type0;
1272 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1273 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1274 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1275 if (hpp->enable_serr)
1276 pci_cmd |= PCI_COMMAND_SERR;
1277 if (hpp->enable_perr)
1278 pci_cmd |= PCI_COMMAND_PARITY;
1279 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1281 /* Program bridge control value */
1282 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1283 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1284 hpp->latency_timer);
1285 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1286 if (hpp->enable_serr)
1287 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1288 if (hpp->enable_perr)
1289 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1290 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1294 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1297 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1300 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1308 if (hpp->revision > 1) {
1309 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1315 * Don't allow _HPX to change MPS or MRRS settings. We manage
1316 * those to make sure they're consistent with the rest of the
1319 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1320 PCI_EXP_DEVCTL_READRQ;
1321 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1322 PCI_EXP_DEVCTL_READRQ);
1324 /* Initialize Device Control Register */
1325 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1326 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1328 /* Initialize Link Control Register */
1329 if (dev->subordinate)
1330 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1331 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1333 /* Find Advanced Error Reporting Enhanced Capability */
1334 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1338 /* Initialize Uncorrectable Error Mask Register */
1339 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32);
1340 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1341 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1343 /* Initialize Uncorrectable Error Severity Register */
1344 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32);
1345 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1346 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1348 /* Initialize Correctable Error Mask Register */
1349 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32);
1350 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1351 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1353 /* Initialize Advanced Error Capabilities and Control Register */
1354 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
1355 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1356 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1359 * FIXME: The following two registers are not supported yet.
1361 * o Secondary Uncorrectable Error Severity Register
1362 * o Secondary Uncorrectable Error Mask Register
1366 static void pci_configure_device(struct pci_dev *dev)
1368 struct hotplug_params hpp;
1371 memset(&hpp, 0, sizeof(hpp));
1372 ret = pci_get_hp_params(dev, &hpp);
1376 program_hpp_type2(dev, hpp.t2);
1377 program_hpp_type1(dev, hpp.t1);
1378 program_hpp_type0(dev, hpp.t0);
1381 static void pci_release_capabilities(struct pci_dev *dev)
1383 pci_vpd_release(dev);
1384 pci_iov_release(dev);
1385 pci_free_cap_save_buffers(dev);
1389 * pci_release_dev - free a pci device structure when all users of it are finished.
1390 * @dev: device that's been disconnected
1392 * Will be called only by the device core when all users of this pci device are
1395 static void pci_release_dev(struct device *dev)
1397 struct pci_dev *pci_dev;
1399 pci_dev = to_pci_dev(dev);
1400 pci_release_capabilities(pci_dev);
1401 pci_release_of_node(pci_dev);
1402 pcibios_release_device(pci_dev);
1403 pci_bus_put(pci_dev->bus);
1404 kfree(pci_dev->driver_override);
1408 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1410 struct pci_dev *dev;
1412 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1416 INIT_LIST_HEAD(&dev->bus_list);
1417 dev->dev.type = &pci_dev_type;
1418 dev->bus = pci_bus_get(bus);
1422 EXPORT_SYMBOL(pci_alloc_dev);
1424 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1429 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1432 /* some broken boards return 0 or ~0 if a slot is empty: */
1433 if (*l == 0xffffffff || *l == 0x00000000 ||
1434 *l == 0x0000ffff || *l == 0xffff0000)
1438 * Configuration Request Retry Status. Some root ports return the
1439 * actual device ID instead of the synthetic ID (0xFFFF) required
1440 * by the PCIe spec. Ignore the device ID and only check for
1443 while ((*l & 0xffff) == 0x0001) {
1449 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1451 /* Card hasn't responded in 60 seconds? Must be stuck. */
1452 if (delay > crs_timeout) {
1453 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1454 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1462 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1465 * Read the config data for a PCI device, sanity-check it
1466 * and fill in the dev structure...
1468 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1470 struct pci_dev *dev;
1473 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1476 dev = pci_alloc_dev(bus);
1481 dev->vendor = l & 0xffff;
1482 dev->device = (l >> 16) & 0xffff;
1484 pci_set_of_node(dev);
1486 if (pci_setup_device(dev)) {
1487 pci_bus_put(dev->bus);
1495 static void pci_init_capabilities(struct pci_dev *dev)
1497 /* MSI/MSI-X list */
1498 pci_msi_init_pci_dev(dev);
1500 /* Buffers for saving PCIe and PCI-X capabilities */
1501 pci_allocate_cap_save_buffers(dev);
1503 /* Power Management */
1506 /* Vital Product Data */
1507 pci_vpd_pci22_init(dev);
1509 /* Alternative Routing-ID Forwarding */
1510 pci_configure_ari(dev);
1512 /* Single Root I/O Virtualization */
1515 /* Enable ACS P2P upstream forwarding */
1516 pci_enable_acs(dev);
1519 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1523 pci_configure_device(dev);
1525 device_initialize(&dev->dev);
1526 dev->dev.release = pci_release_dev;
1528 set_dev_node(&dev->dev, pcibus_to_node(bus));
1529 dev->dev.dma_mask = &dev->dma_mask;
1530 dev->dev.dma_parms = &dev->dma_parms;
1531 dev->dev.coherent_dma_mask = 0xffffffffull;
1533 pci_set_dma_max_seg_size(dev, 65536);
1534 pci_set_dma_seg_boundary(dev, 0xffffffff);
1536 /* Fix up broken headers */
1537 pci_fixup_device(pci_fixup_header, dev);
1539 /* moved out from quirk header fixup code */
1540 pci_reassigndev_resource_alignment(dev);
1542 /* Clear the state_saved flag. */
1543 dev->state_saved = false;
1545 /* Initialize various capabilities */
1546 pci_init_capabilities(dev);
1549 * Add the device to our list of discovered devices
1550 * and the bus list for fixup functions, etc.
1552 down_write(&pci_bus_sem);
1553 list_add_tail(&dev->bus_list, &bus->devices);
1554 up_write(&pci_bus_sem);
1556 ret = pcibios_add_device(dev);
1559 /* Notifier could use PCI capabilities */
1560 dev->match_driver = false;
1561 ret = device_add(&dev->dev);
1565 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1567 struct pci_dev *dev;
1569 dev = pci_get_slot(bus, devfn);
1575 dev = pci_scan_device(bus, devfn);
1579 pci_device_add(dev, bus);
1583 EXPORT_SYMBOL(pci_scan_single_device);
1585 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1591 if (pci_ari_enabled(bus)) {
1594 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1598 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1599 next_fn = PCI_ARI_CAP_NFN(cap);
1601 return 0; /* protect against malformed list */
1606 /* dev may be NULL for non-contiguous multifunction devices */
1607 if (!dev || dev->multifunction)
1608 return (fn + 1) % 8;
1613 static int only_one_child(struct pci_bus *bus)
1615 struct pci_dev *parent = bus->self;
1617 if (!parent || !pci_is_pcie(parent))
1619 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1621 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1622 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1628 * pci_scan_slot - scan a PCI slot on a bus for devices.
1629 * @bus: PCI bus to scan
1630 * @devfn: slot number to scan (must have zero function.)
1632 * Scan a PCI slot on the specified PCI bus for devices, adding
1633 * discovered devices to the @bus->devices list. New devices
1634 * will not have is_added set.
1636 * Returns the number of new devices found.
1638 int pci_scan_slot(struct pci_bus *bus, int devfn)
1640 unsigned fn, nr = 0;
1641 struct pci_dev *dev;
1643 if (only_one_child(bus) && (devfn > 0))
1644 return 0; /* Already scanned the entire slot */
1646 dev = pci_scan_single_device(bus, devfn);
1652 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1653 dev = pci_scan_single_device(bus, devfn + fn);
1657 dev->multifunction = 1;
1661 /* only one slot has pcie device */
1662 if (bus->self && nr)
1663 pcie_aspm_init_link_state(bus->self);
1667 EXPORT_SYMBOL(pci_scan_slot);
1669 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1673 if (!pci_is_pcie(dev))
1677 * We don't have a way to change MPS settings on devices that have
1678 * drivers attached. A hot-added device might support only the minimum
1679 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1680 * where devices may be hot-added, we limit the fabric MPS to 128 so
1681 * hot-added devices will work correctly.
1683 * However, if we hot-add a device to a slot directly below a Root
1684 * Port, it's impossible for there to be other existing devices below
1685 * the port. We don't limit the MPS in this case because we can
1686 * reconfigure MPS on both the Root Port and the hot-added device,
1687 * and there are no other devices involved.
1689 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1691 if (dev->is_hotplug_bridge &&
1692 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1695 if (*smpss > dev->pcie_mpss)
1696 *smpss = dev->pcie_mpss;
1701 static void pcie_write_mps(struct pci_dev *dev, int mps)
1705 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1706 mps = 128 << dev->pcie_mpss;
1708 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1710 /* For "Performance", the assumption is made that
1711 * downstream communication will never be larger than
1712 * the MRRS. So, the MPS only needs to be configured
1713 * for the upstream communication. This being the case,
1714 * walk from the top down and set the MPS of the child
1715 * to that of the parent bus.
1717 * Configure the device MPS with the smaller of the
1718 * device MPSS or the bridge MPS (which is assumed to be
1719 * properly configured at this point to the largest
1720 * allowable MPS based on its parent bus).
1722 mps = min(mps, pcie_get_mps(dev->bus->self));
1725 rc = pcie_set_mps(dev, mps);
1727 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1730 static void pcie_write_mrrs(struct pci_dev *dev)
1734 /* In the "safe" case, do not configure the MRRS. There appear to be
1735 * issues with setting MRRS to 0 on a number of devices.
1737 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1740 /* For Max performance, the MRRS must be set to the largest supported
1741 * value. However, it cannot be configured larger than the MPS the
1742 * device or the bus can support. This should already be properly
1743 * configured by a prior call to pcie_write_mps.
1745 mrrs = pcie_get_mps(dev);
1747 /* MRRS is a R/W register. Invalid values can be written, but a
1748 * subsequent read will verify if the value is acceptable or not.
1749 * If the MRRS value provided is not acceptable (e.g., too large),
1750 * shrink the value until it is acceptable to the HW.
1752 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1753 rc = pcie_set_readrq(dev, mrrs);
1757 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1762 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
1765 static void pcie_bus_detect_mps(struct pci_dev *dev)
1767 struct pci_dev *bridge = dev->bus->self;
1773 mps = pcie_get_mps(dev);
1774 p_mps = pcie_get_mps(bridge);
1777 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1778 mps, pci_name(bridge), p_mps);
1781 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1785 if (!pci_is_pcie(dev))
1788 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1789 pcie_bus_detect_mps(dev);
1793 mps = 128 << *(u8 *)data;
1794 orig_mps = pcie_get_mps(dev);
1796 pcie_write_mps(dev, mps);
1797 pcie_write_mrrs(dev);
1799 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1800 pcie_get_mps(dev), 128 << dev->pcie_mpss,
1801 orig_mps, pcie_get_readrq(dev));
1806 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1807 * parents then children fashion. If this changes, then this code will not
1810 void pcie_bus_configure_settings(struct pci_bus *bus)
1817 if (!pci_is_pcie(bus->self))
1820 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1821 * to be aware of the MPS of the destination. To work around this,
1822 * simply force the MPS of the entire system to the smallest possible.
1824 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1827 if (pcie_bus_config == PCIE_BUS_SAFE) {
1828 smpss = bus->self->pcie_mpss;
1830 pcie_find_smpss(bus->self, &smpss);
1831 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1834 pcie_bus_configure_set(bus->self, &smpss);
1835 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1837 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1839 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1841 unsigned int devfn, pass, max = bus->busn_res.start;
1842 struct pci_dev *dev;
1844 dev_dbg(&bus->dev, "scanning bus\n");
1846 /* Go find them, Rover! */
1847 for (devfn = 0; devfn < 0x100; devfn += 8)
1848 pci_scan_slot(bus, devfn);
1850 /* Reserve buses for SR-IOV capability. */
1851 max += pci_iov_bus_range(bus);
1854 * After performing arch-dependent fixup of the bus, look behind
1855 * all PCI-to-PCI bridges on this bus.
1857 if (!bus->is_added) {
1858 dev_dbg(&bus->dev, "fixups for bus\n");
1859 pcibios_fixup_bus(bus);
1863 for (pass = 0; pass < 2; pass++)
1864 list_for_each_entry(dev, &bus->devices, bus_list) {
1865 if (pci_is_bridge(dev))
1866 max = pci_scan_bridge(bus, dev, max, pass);
1870 * We've scanned the bus and so we know all about what's on
1871 * the other side of any bridges that may be on this bus plus
1874 * Return how far we've got finding sub-buses.
1876 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1879 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1882 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1883 * @bridge: Host bridge to set up.
1885 * Default empty implementation. Replace with an architecture-specific setup
1886 * routine, if necessary.
1888 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1893 void __weak pcibios_add_bus(struct pci_bus *bus)
1897 void __weak pcibios_remove_bus(struct pci_bus *bus)
1901 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1902 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1905 struct pci_host_bridge *bridge;
1906 struct pci_bus *b, *b2;
1907 struct pci_host_bridge_window *window, *n;
1908 struct resource *res;
1909 resource_size_t offset;
1913 b = pci_alloc_bus(NULL);
1917 b->sysdata = sysdata;
1919 b->number = b->busn_res.start = bus;
1920 pci_bus_assign_domain_nr(b, parent);
1921 b2 = pci_find_bus(pci_domain_nr(b), bus);
1923 /* If we already got to this bus through a different bridge, ignore it */
1924 dev_dbg(&b2->dev, "bus already known\n");
1928 bridge = pci_alloc_host_bridge(b);
1932 bridge->dev.parent = parent;
1933 bridge->dev.release = pci_release_host_bridge_dev;
1934 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1935 error = pcibios_root_bridge_prepare(bridge);
1941 error = device_register(&bridge->dev);
1943 put_device(&bridge->dev);
1946 b->bridge = get_device(&bridge->dev);
1947 device_enable_async_suspend(b->bridge);
1948 pci_set_bus_of_node(b);
1951 set_dev_node(b->bridge, pcibus_to_node(b));
1953 b->dev.class = &pcibus_class;
1954 b->dev.parent = b->bridge;
1955 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1956 error = device_register(&b->dev);
1958 goto class_dev_reg_err;
1962 /* Create legacy_io and legacy_mem files for this bus */
1963 pci_create_legacy_files(b);
1966 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1968 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1970 /* Add initial resources to the bus */
1971 list_for_each_entry_safe(window, n, resources, list) {
1972 list_move_tail(&window->list, &bridge->windows);
1974 offset = window->offset;
1975 if (res->flags & IORESOURCE_BUS)
1976 pci_bus_insert_busn_res(b, bus, res->end);
1978 pci_bus_add_resource(b, res, 0);
1980 if (resource_type(res) == IORESOURCE_IO)
1981 fmt = " (bus address [%#06llx-%#06llx])";
1983 fmt = " (bus address [%#010llx-%#010llx])";
1984 snprintf(bus_addr, sizeof(bus_addr), fmt,
1985 (unsigned long long) (res->start - offset),
1986 (unsigned long long) (res->end - offset));
1989 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1992 down_write(&pci_bus_sem);
1993 list_add_tail(&b->node, &pci_root_buses);
1994 up_write(&pci_bus_sem);
1999 put_device(&bridge->dev);
2000 device_unregister(&bridge->dev);
2006 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2008 struct resource *res = &b->busn_res;
2009 struct resource *parent_res, *conflict;
2013 res->flags = IORESOURCE_BUS;
2015 if (!pci_is_root_bus(b))
2016 parent_res = &b->parent->busn_res;
2018 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2019 res->flags |= IORESOURCE_PCI_FIXED;
2022 conflict = request_resource_conflict(parent_res, res);
2025 dev_printk(KERN_DEBUG, &b->dev,
2026 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2027 res, pci_is_root_bus(b) ? "domain " : "",
2028 parent_res, conflict->name, conflict);
2030 return conflict == NULL;
2033 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2035 struct resource *res = &b->busn_res;
2036 struct resource old_res = *res;
2037 resource_size_t size;
2040 if (res->start > bus_max)
2043 size = bus_max - res->start + 1;
2044 ret = adjust_resource(res, res->start, size);
2045 dev_printk(KERN_DEBUG, &b->dev,
2046 "busn_res: %pR end %s updated to %02x\n",
2047 &old_res, ret ? "can not be" : "is", bus_max);
2049 if (!ret && !res->parent)
2050 pci_bus_insert_busn_res(b, res->start, res->end);
2055 void pci_bus_release_busn_res(struct pci_bus *b)
2057 struct resource *res = &b->busn_res;
2060 if (!res->flags || !res->parent)
2063 ret = release_resource(res);
2064 dev_printk(KERN_DEBUG, &b->dev,
2065 "busn_res: %pR %s released\n",
2066 res, ret ? "can not be" : "is");
2069 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2070 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2072 struct pci_host_bridge_window *window;
2077 list_for_each_entry(window, resources, list)
2078 if (window->res->flags & IORESOURCE_BUS) {
2083 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2089 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2091 pci_bus_insert_busn_res(b, bus, 255);
2094 max = pci_scan_child_bus(b);
2097 pci_bus_update_busn_res_end(b, max);
2099 pci_bus_add_devices(b);
2102 EXPORT_SYMBOL(pci_scan_root_bus);
2104 /* Deprecated; use pci_scan_root_bus() instead */
2105 struct pci_bus *pci_scan_bus_parented(struct device *parent,
2106 int bus, struct pci_ops *ops, void *sysdata)
2108 LIST_HEAD(resources);
2111 pci_add_resource(&resources, &ioport_resource);
2112 pci_add_resource(&resources, &iomem_resource);
2113 pci_add_resource(&resources, &busn_resource);
2114 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
2116 pci_scan_child_bus(b);
2118 pci_free_resource_list(&resources);
2121 EXPORT_SYMBOL(pci_scan_bus_parented);
2123 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2126 LIST_HEAD(resources);
2129 pci_add_resource(&resources, &ioport_resource);
2130 pci_add_resource(&resources, &iomem_resource);
2131 pci_add_resource(&resources, &busn_resource);
2132 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2134 pci_scan_child_bus(b);
2135 pci_bus_add_devices(b);
2137 pci_free_resource_list(&resources);
2141 EXPORT_SYMBOL(pci_scan_bus);
2144 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2145 * @bridge: PCI bridge for the bus to scan
2147 * Scan a PCI bus and child buses for new devices, add them,
2148 * and enable them, resizing bridge mmio/io resource if necessary
2149 * and possible. The caller must ensure the child devices are already
2150 * removed for resizing to occur.
2152 * Returns the max number of subordinate bus discovered.
2154 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2157 struct pci_bus *bus = bridge->subordinate;
2159 max = pci_scan_child_bus(bus);
2161 pci_assign_unassigned_bridge_resources(bridge);
2163 pci_bus_add_devices(bus);
2169 * pci_rescan_bus - scan a PCI bus for devices.
2170 * @bus: PCI bus to scan
2172 * Scan a PCI bus and child buses for new devices, adds them,
2175 * Returns the max number of subordinate bus discovered.
2177 unsigned int pci_rescan_bus(struct pci_bus *bus)
2181 max = pci_scan_child_bus(bus);
2182 pci_assign_unassigned_bus_resources(bus);
2183 pci_bus_add_devices(bus);
2187 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2190 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2191 * routines should always be executed under this mutex.
2193 static DEFINE_MUTEX(pci_rescan_remove_lock);
2195 void pci_lock_rescan_remove(void)
2197 mutex_lock(&pci_rescan_remove_lock);
2199 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2201 void pci_unlock_rescan_remove(void)
2203 mutex_unlock(&pci_rescan_remove_lock);
2205 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2207 static int __init pci_sort_bf_cmp(const struct device *d_a,
2208 const struct device *d_b)
2210 const struct pci_dev *a = to_pci_dev(d_a);
2211 const struct pci_dev *b = to_pci_dev(d_b);
2213 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2214 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2216 if (a->bus->number < b->bus->number) return -1;
2217 else if (a->bus->number > b->bus->number) return 1;
2219 if (a->devfn < b->devfn) return -1;
2220 else if (a->devfn > b->devfn) return 1;
2225 void __init pci_sort_breadthfirst(void)
2227 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);