44f83ce0247019d78d02f6b879c13da4c9866f0a
[pandora-kernel.git] / arch / x86 / pci / acpi.c
1 #include <linux/pci.h>
2 #include <linux/acpi.h>
3 #include <linux/init.h>
4 #include <linux/irq.h>
5 #include <linux/dmi.h>
6 #include <linux/slab.h>
7 #include <asm/numa.h>
8 #include <asm/pci_x86.h>
9
10 struct pci_root_info {
11         struct acpi_device *bridge;
12         char *name;
13         unsigned int res_num;
14         struct resource *res;
15         struct pci_bus *bus;
16         int busnum;
17 };
18
19 static bool pci_use_crs = true;
20
21 static int __init set_use_crs(const struct dmi_system_id *id)
22 {
23         pci_use_crs = true;
24         return 0;
25 }
26
27 static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28         /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
29         {
30                 .callback = set_use_crs,
31                 .ident = "IBM System x3800",
32                 .matches = {
33                         DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34                         DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
35                 },
36         },
37         {}
38 };
39
40 void __init pci_acpi_crs_quirks(void)
41 {
42         int year;
43
44         if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
45                 pci_use_crs = false;
46
47         dmi_check_system(pci_use_crs_table);
48
49         /*
50          * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
51          * takes precedence over anything we figured out above.
52          */
53         if (pci_probe & PCI_ROOT_NO_CRS)
54                 pci_use_crs = false;
55         else if (pci_probe & PCI_USE__CRS)
56                 pci_use_crs = true;
57
58         printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
59                "if necessary, use \"pci=%s\" and report a bug\n",
60                pci_use_crs ? "Using" : "Ignoring",
61                pci_use_crs ? "nocrs" : "use_crs");
62 }
63
64 static acpi_status
65 resource_to_addr(struct acpi_resource *resource,
66                         struct acpi_resource_address64 *addr)
67 {
68         acpi_status status;
69         struct acpi_resource_memory24 *memory24;
70         struct acpi_resource_memory32 *memory32;
71         struct acpi_resource_fixed_memory32 *fixed_memory32;
72
73         memset(addr, 0, sizeof(*addr));
74         switch (resource->type) {
75         case ACPI_RESOURCE_TYPE_MEMORY24:
76                 memory24 = &resource->data.memory24;
77                 addr->resource_type = ACPI_MEMORY_RANGE;
78                 addr->minimum = memory24->minimum;
79                 addr->address_length = memory24->address_length;
80                 addr->maximum = addr->minimum + addr->address_length - 1;
81                 return AE_OK;
82         case ACPI_RESOURCE_TYPE_MEMORY32:
83                 memory32 = &resource->data.memory32;
84                 addr->resource_type = ACPI_MEMORY_RANGE;
85                 addr->minimum = memory32->minimum;
86                 addr->address_length = memory32->address_length;
87                 addr->maximum = addr->minimum + addr->address_length - 1;
88                 return AE_OK;
89         case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
90                 fixed_memory32 = &resource->data.fixed_memory32;
91                 addr->resource_type = ACPI_MEMORY_RANGE;
92                 addr->minimum = fixed_memory32->address;
93                 addr->address_length = fixed_memory32->address_length;
94                 addr->maximum = addr->minimum + addr->address_length - 1;
95                 return AE_OK;
96         case ACPI_RESOURCE_TYPE_ADDRESS16:
97         case ACPI_RESOURCE_TYPE_ADDRESS32:
98         case ACPI_RESOURCE_TYPE_ADDRESS64:
99                 status = acpi_resource_to_address64(resource, addr);
100                 if (ACPI_SUCCESS(status) &&
101                     (addr->resource_type == ACPI_MEMORY_RANGE ||
102                     addr->resource_type == ACPI_IO_RANGE) &&
103                     addr->address_length > 0) {
104                         return AE_OK;
105                 }
106                 break;
107         }
108         return AE_ERROR;
109 }
110
111 static acpi_status
112 count_resource(struct acpi_resource *acpi_res, void *data)
113 {
114         struct pci_root_info *info = data;
115         struct acpi_resource_address64 addr;
116         acpi_status status;
117
118         status = resource_to_addr(acpi_res, &addr);
119         if (ACPI_SUCCESS(status))
120                 info->res_num++;
121         return AE_OK;
122 }
123
124 static void
125 align_resource(struct acpi_device *bridge, struct resource *res)
126 {
127         int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
128
129         /*
130          * Host bridge windows are not BARs, but the decoders on the PCI side
131          * that claim this address space have starting alignment and length
132          * constraints, so fix any obvious BIOS goofs.
133          */
134         if (!IS_ALIGNED(res->start, align)) {
135                 dev_printk(KERN_DEBUG, &bridge->dev,
136                            "host bridge window %pR invalid; "
137                            "aligning start to %d-byte boundary\n", res, align);
138                 res->start &= ~(align - 1);
139         }
140         if (!IS_ALIGNED(res->end + 1, align)) {
141                 dev_printk(KERN_DEBUG, &bridge->dev,
142                            "host bridge window %pR invalid; "
143                            "aligning end to %d-byte boundary\n", res, align);
144                 res->end = ALIGN(res->end, align) - 1;
145         }
146 }
147
148 static acpi_status
149 setup_resource(struct acpi_resource *acpi_res, void *data)
150 {
151         struct pci_root_info *info = data;
152         struct resource *res;
153         struct acpi_resource_address64 addr;
154         acpi_status status;
155         unsigned long flags;
156         struct resource *root, *conflict;
157         u64 start, end, max_len;
158
159         status = resource_to_addr(acpi_res, &addr);
160         if (!ACPI_SUCCESS(status))
161                 return AE_OK;
162
163         if (addr.resource_type == ACPI_MEMORY_RANGE) {
164                 root = &iomem_resource;
165                 flags = IORESOURCE_MEM;
166                 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
167                         flags |= IORESOURCE_PREFETCH;
168         } else if (addr.resource_type == ACPI_IO_RANGE) {
169                 root = &ioport_resource;
170                 flags = IORESOURCE_IO;
171         } else
172                 return AE_OK;
173
174         max_len = addr.maximum - addr.minimum + 1;
175         if (addr.address_length > max_len) {
176                 dev_printk(KERN_DEBUG, &info->bridge->dev,
177                            "host bridge window length %#llx doesn't fit in "
178                            "%#llx-%#llx, trimming\n",
179                            (unsigned long long) addr.address_length,
180                            (unsigned long long) addr.minimum,
181                            (unsigned long long) addr.maximum);
182                 addr.address_length = max_len;
183         }
184
185         start = addr.minimum + addr.translation_offset;
186         end = start + addr.address_length - 1;
187
188         res = &info->res[info->res_num];
189         res->name = info->name;
190         res->flags = flags;
191         res->start = start;
192         res->end = end;
193         res->child = NULL;
194         align_resource(info->bridge, res);
195
196         if (!pci_use_crs) {
197                 dev_printk(KERN_DEBUG, &info->bridge->dev,
198                            "host bridge window %pR (ignored)\n", res);
199                 return AE_OK;
200         }
201
202         conflict = insert_resource_conflict(root, res);
203         if (conflict) {
204                 dev_err(&info->bridge->dev,
205                         "address space collision: host bridge window %pR "
206                         "conflicts with %s %pR\n",
207                         res, conflict->name, conflict);
208         } else {
209                 pci_bus_add_resource(info->bus, res, 0);
210                 info->res_num++;
211                 if (addr.translation_offset)
212                         dev_info(&info->bridge->dev, "host bridge window %pR "
213                                  "(PCI address [%#llx-%#llx])\n",
214                                  res, res->start - addr.translation_offset,
215                                  res->end - addr.translation_offset);
216                 else
217                         dev_info(&info->bridge->dev,
218                                  "host bridge window %pR\n", res);
219         }
220         return AE_OK;
221 }
222
223 static void
224 get_current_resources(struct acpi_device *device, int busnum,
225                         int domain, struct pci_bus *bus)
226 {
227         struct pci_root_info info;
228         size_t size;
229
230         if (pci_use_crs)
231                 pci_bus_remove_resources(bus);
232
233         info.bridge = device;
234         info.bus = bus;
235         info.res_num = 0;
236         acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
237                                 &info);
238         if (!info.res_num)
239                 return;
240
241         size = sizeof(*info.res) * info.res_num;
242         info.res = kmalloc(size, GFP_KERNEL);
243         if (!info.res)
244                 goto res_alloc_fail;
245
246         info.name = kmalloc(16, GFP_KERNEL);
247         if (!info.name)
248                 goto name_alloc_fail;
249         sprintf(info.name, "PCI Bus %04x:%02x", domain, busnum);
250
251         info.res_num = 0;
252         acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
253                                 &info);
254
255         return;
256
257 name_alloc_fail:
258         kfree(info.res);
259 res_alloc_fail:
260         return;
261 }
262
263 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum)
264 {
265         struct pci_bus *bus;
266         struct pci_sysdata *sd;
267         int node;
268 #ifdef CONFIG_ACPI_NUMA
269         int pxm;
270 #endif
271
272         if (domain && !pci_domains_supported) {
273                 printk(KERN_WARNING "pci_bus %04x:%02x: "
274                        "ignored (multiple domains not supported)\n",
275                        domain, busnum);
276                 return NULL;
277         }
278
279         node = -1;
280 #ifdef CONFIG_ACPI_NUMA
281         pxm = acpi_get_pxm(device->handle);
282         if (pxm >= 0)
283                 node = pxm_to_node(pxm);
284         if (node != -1)
285                 set_mp_bus_to_node(busnum, node);
286         else
287 #endif
288                 node = get_mp_bus_to_node(busnum);
289
290         if (node != -1 && !node_online(node))
291                 node = -1;
292
293         /* Allocate per-root-bus (not per bus) arch-specific data.
294          * TODO: leak; this memory is never freed.
295          * It's arguable whether it's worth the trouble to care.
296          */
297         sd = kzalloc(sizeof(*sd), GFP_KERNEL);
298         if (!sd) {
299                 printk(KERN_WARNING "pci_bus %04x:%02x: "
300                        "ignored (out of memory)\n", domain, busnum);
301                 return NULL;
302         }
303
304         sd->domain = domain;
305         sd->node = node;
306         /*
307          * Maybe the desired pci bus has been already scanned. In such case
308          * it is unnecessary to scan the pci bus with the given domain,busnum.
309          */
310         bus = pci_find_bus(domain, busnum);
311         if (bus) {
312                 /*
313                  * If the desired bus exits, the content of bus->sysdata will
314                  * be replaced by sd.
315                  */
316                 memcpy(bus->sysdata, sd, sizeof(*sd));
317                 kfree(sd);
318         } else {
319                 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
320                 if (bus) {
321                         get_current_resources(device, busnum, domain, bus);
322                         bus->subordinate = pci_scan_child_bus(bus);
323                 }
324         }
325
326         if (!bus)
327                 kfree(sd);
328
329         if (bus && node != -1) {
330 #ifdef CONFIG_ACPI_NUMA
331                 if (pxm >= 0)
332                         dev_printk(KERN_DEBUG, &bus->dev,
333                                    "on NUMA node %d (pxm %d)\n", node, pxm);
334 #else
335                 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
336 #endif
337         }
338
339         return bus;
340 }
341
342 int __init pci_acpi_init(void)
343 {
344         struct pci_dev *dev = NULL;
345
346         if (acpi_noirq)
347                 return -ENODEV;
348
349         printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
350         acpi_irq_penalty_init();
351         pcibios_enable_irq = acpi_pci_irq_enable;
352         pcibios_disable_irq = acpi_pci_irq_disable;
353         x86_init.pci.init_irq = x86_init_noop;
354
355         if (pci_routeirq) {
356                 /*
357                  * PCI IRQ routing is set up by pci_enable_device(), but we
358                  * also do it here in case there are still broken drivers that
359                  * don't use pci_enable_device().
360                  */
361                 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
362                 for_each_pci_dev(dev)
363                         acpi_pci_irq_enable(dev);
364         }
365
366         return 0;
367 }