Revert "PCI: update bridge resources to get more big ranges in PCI assign unssigned"
[pandora-kernel.git] / drivers / pci / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
7 #include <linux/pci.h>
8 #include <linux/irq.h>
9 #include <asm/io_apic.h>
10 #include <asm/smp.h>
11 #include <asm/cpu.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
16 #include "pci.h"
17
18 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
19 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
20 static int ir_ioapic_num, ir_hpet_num;
21 int intr_remapping_enabled;
22
23 static int disable_intremap;
24 static __init int setup_nointremap(char *str)
25 {
26         disable_intremap = 1;
27         return 0;
28 }
29 early_param("nointremap", setup_nointremap);
30
31 struct irq_2_iommu {
32         struct intel_iommu *iommu;
33         u16 irte_index;
34         u16 sub_handle;
35         u8  irte_mask;
36 };
37
38 #ifdef CONFIG_GENERIC_HARDIRQS
39 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
40 {
41         struct irq_2_iommu *iommu;
42
43         iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
44         printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
45
46         return iommu;
47 }
48
49 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
50 {
51         struct irq_desc *desc;
52
53         desc = irq_to_desc(irq);
54
55         if (WARN_ON_ONCE(!desc))
56                 return NULL;
57
58         return desc->irq_2_iommu;
59 }
60
61 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
62 {
63         struct irq_desc *desc;
64         struct irq_2_iommu *irq_iommu;
65
66         desc = irq_to_desc(irq);
67         if (!desc) {
68                 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
69                 return NULL;
70         }
71
72         irq_iommu = desc->irq_2_iommu;
73
74         if (!irq_iommu)
75                 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
76
77         return desc->irq_2_iommu;
78 }
79
80 #else /* !CONFIG_SPARSE_IRQ */
81
82 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
83
84 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
85 {
86         if (irq < nr_irqs)
87                 return &irq_2_iommuX[irq];
88
89         return NULL;
90 }
91 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
92 {
93         return irq_2_iommu(irq);
94 }
95 #endif
96
97 static DEFINE_SPINLOCK(irq_2_ir_lock);
98
99 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
100 {
101         struct irq_2_iommu *irq_iommu;
102
103         irq_iommu = irq_2_iommu(irq);
104
105         if (!irq_iommu)
106                 return NULL;
107
108         if (!irq_iommu->iommu)
109                 return NULL;
110
111         return irq_iommu;
112 }
113
114 int irq_remapped(int irq)
115 {
116         return valid_irq_2_iommu(irq) != NULL;
117 }
118
119 int get_irte(int irq, struct irte *entry)
120 {
121         int index;
122         struct irq_2_iommu *irq_iommu;
123         unsigned long flags;
124
125         if (!entry)
126                 return -1;
127
128         spin_lock_irqsave(&irq_2_ir_lock, flags);
129         irq_iommu = valid_irq_2_iommu(irq);
130         if (!irq_iommu) {
131                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
132                 return -1;
133         }
134
135         index = irq_iommu->irte_index + irq_iommu->sub_handle;
136         *entry = *(irq_iommu->iommu->ir_table->base + index);
137
138         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
139         return 0;
140 }
141
142 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
143 {
144         struct ir_table *table = iommu->ir_table;
145         struct irq_2_iommu *irq_iommu;
146         u16 index, start_index;
147         unsigned int mask = 0;
148         unsigned long flags;
149         int i;
150
151         if (!count)
152                 return -1;
153
154 #ifndef CONFIG_SPARSE_IRQ
155         /* protect irq_2_iommu_alloc later */
156         if (irq >= nr_irqs)
157                 return -1;
158 #endif
159
160         /*
161          * start the IRTE search from index 0.
162          */
163         index = start_index = 0;
164
165         if (count > 1) {
166                 count = __roundup_pow_of_two(count);
167                 mask = ilog2(count);
168         }
169
170         if (mask > ecap_max_handle_mask(iommu->ecap)) {
171                 printk(KERN_ERR
172                        "Requested mask %x exceeds the max invalidation handle"
173                        " mask value %Lx\n", mask,
174                        ecap_max_handle_mask(iommu->ecap));
175                 return -1;
176         }
177
178         spin_lock_irqsave(&irq_2_ir_lock, flags);
179         do {
180                 for (i = index; i < index + count; i++)
181                         if  (table->base[i].present)
182                                 break;
183                 /* empty index found */
184                 if (i == index + count)
185                         break;
186
187                 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
188
189                 if (index == start_index) {
190                         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
191                         printk(KERN_ERR "can't allocate an IRTE\n");
192                         return -1;
193                 }
194         } while (1);
195
196         for (i = index; i < index + count; i++)
197                 table->base[i].present = 1;
198
199         irq_iommu = irq_2_iommu_alloc(irq);
200         if (!irq_iommu) {
201                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
202                 printk(KERN_ERR "can't allocate irq_2_iommu\n");
203                 return -1;
204         }
205
206         irq_iommu->iommu = iommu;
207         irq_iommu->irte_index =  index;
208         irq_iommu->sub_handle = 0;
209         irq_iommu->irte_mask = mask;
210
211         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
212
213         return index;
214 }
215
216 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
217 {
218         struct qi_desc desc;
219
220         desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
221                    | QI_IEC_SELECTIVE;
222         desc.high = 0;
223
224         return qi_submit_sync(&desc, iommu);
225 }
226
227 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
228 {
229         int index;
230         struct irq_2_iommu *irq_iommu;
231         unsigned long flags;
232
233         spin_lock_irqsave(&irq_2_ir_lock, flags);
234         irq_iommu = valid_irq_2_iommu(irq);
235         if (!irq_iommu) {
236                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
237                 return -1;
238         }
239
240         *sub_handle = irq_iommu->sub_handle;
241         index = irq_iommu->irte_index;
242         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
243         return index;
244 }
245
246 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
247 {
248         struct irq_2_iommu *irq_iommu;
249         unsigned long flags;
250
251         spin_lock_irqsave(&irq_2_ir_lock, flags);
252
253         irq_iommu = irq_2_iommu_alloc(irq);
254
255         if (!irq_iommu) {
256                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
257                 printk(KERN_ERR "can't allocate irq_2_iommu\n");
258                 return -1;
259         }
260
261         irq_iommu->iommu = iommu;
262         irq_iommu->irte_index = index;
263         irq_iommu->sub_handle = subhandle;
264         irq_iommu->irte_mask = 0;
265
266         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
267
268         return 0;
269 }
270
271 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
272 {
273         struct irq_2_iommu *irq_iommu;
274         unsigned long flags;
275
276         spin_lock_irqsave(&irq_2_ir_lock, flags);
277         irq_iommu = valid_irq_2_iommu(irq);
278         if (!irq_iommu) {
279                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280                 return -1;
281         }
282
283         irq_iommu->iommu = NULL;
284         irq_iommu->irte_index = 0;
285         irq_iommu->sub_handle = 0;
286         irq_2_iommu(irq)->irte_mask = 0;
287
288         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
289
290         return 0;
291 }
292
293 int modify_irte(int irq, struct irte *irte_modified)
294 {
295         int rc;
296         int index;
297         struct irte *irte;
298         struct intel_iommu *iommu;
299         struct irq_2_iommu *irq_iommu;
300         unsigned long flags;
301
302         spin_lock_irqsave(&irq_2_ir_lock, flags);
303         irq_iommu = valid_irq_2_iommu(irq);
304         if (!irq_iommu) {
305                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
306                 return -1;
307         }
308
309         iommu = irq_iommu->iommu;
310
311         index = irq_iommu->irte_index + irq_iommu->sub_handle;
312         irte = &iommu->ir_table->base[index];
313
314         set_64bit((unsigned long *)&irte->low, irte_modified->low);
315         set_64bit((unsigned long *)&irte->high, irte_modified->high);
316         __iommu_flush_cache(iommu, irte, sizeof(*irte));
317
318         rc = qi_flush_iec(iommu, index, 0);
319         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
320
321         return rc;
322 }
323
324 int flush_irte(int irq)
325 {
326         int rc;
327         int index;
328         struct intel_iommu *iommu;
329         struct irq_2_iommu *irq_iommu;
330         unsigned long flags;
331
332         spin_lock_irqsave(&irq_2_ir_lock, flags);
333         irq_iommu = valid_irq_2_iommu(irq);
334         if (!irq_iommu) {
335                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
336                 return -1;
337         }
338
339         iommu = irq_iommu->iommu;
340
341         index = irq_iommu->irte_index + irq_iommu->sub_handle;
342
343         rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
344         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
345
346         return rc;
347 }
348
349 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
350 {
351         int i;
352
353         for (i = 0; i < MAX_HPET_TBS; i++)
354                 if (ir_hpet[i].id == hpet_id)
355                         return ir_hpet[i].iommu;
356         return NULL;
357 }
358
359 struct intel_iommu *map_ioapic_to_ir(int apic)
360 {
361         int i;
362
363         for (i = 0; i < MAX_IO_APICS; i++)
364                 if (ir_ioapic[i].id == apic)
365                         return ir_ioapic[i].iommu;
366         return NULL;
367 }
368
369 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
370 {
371         struct dmar_drhd_unit *drhd;
372
373         drhd = dmar_find_matched_drhd_unit(dev);
374         if (!drhd)
375                 return NULL;
376
377         return drhd->iommu;
378 }
379
380 static int clear_entries(struct irq_2_iommu *irq_iommu)
381 {
382         struct irte *start, *entry, *end;
383         struct intel_iommu *iommu;
384         int index;
385
386         if (irq_iommu->sub_handle)
387                 return 0;
388
389         iommu = irq_iommu->iommu;
390         index = irq_iommu->irte_index + irq_iommu->sub_handle;
391
392         start = iommu->ir_table->base + index;
393         end = start + (1 << irq_iommu->irte_mask);
394
395         for (entry = start; entry < end; entry++) {
396                 set_64bit((unsigned long *)&entry->low, 0);
397                 set_64bit((unsigned long *)&entry->high, 0);
398         }
399
400         return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
401 }
402
403 int free_irte(int irq)
404 {
405         int rc = 0;
406         struct irq_2_iommu *irq_iommu;
407         unsigned long flags;
408
409         spin_lock_irqsave(&irq_2_ir_lock, flags);
410         irq_iommu = valid_irq_2_iommu(irq);
411         if (!irq_iommu) {
412                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
413                 return -1;
414         }
415
416         rc = clear_entries(irq_iommu);
417
418         irq_iommu->iommu = NULL;
419         irq_iommu->irte_index = 0;
420         irq_iommu->sub_handle = 0;
421         irq_iommu->irte_mask = 0;
422
423         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
424
425         return rc;
426 }
427
428 /*
429  * source validation type
430  */
431 #define SVT_NO_VERIFY           0x0  /* no verification is required */
432 #define SVT_VERIFY_SID_SQ       0x1  /* verify using SID and SQ fiels */
433 #define SVT_VERIFY_BUS          0x2  /* verify bus of request-id */
434
435 /*
436  * source-id qualifier
437  */
438 #define SQ_ALL_16       0x0  /* verify all 16 bits of request-id */
439 #define SQ_13_IGNORE_1  0x1  /* verify most significant 13 bits, ignore
440                               * the third least significant bit
441                               */
442 #define SQ_13_IGNORE_2  0x2  /* verify most significant 13 bits, ignore
443                               * the second and third least significant bits
444                               */
445 #define SQ_13_IGNORE_3  0x3  /* verify most significant 13 bits, ignore
446                               * the least three significant bits
447                               */
448
449 /*
450  * set SVT, SQ and SID fields of irte to verify
451  * source ids of interrupt requests
452  */
453 static void set_irte_sid(struct irte *irte, unsigned int svt,
454                          unsigned int sq, unsigned int sid)
455 {
456         irte->svt = svt;
457         irte->sq = sq;
458         irte->sid = sid;
459 }
460
461 int set_ioapic_sid(struct irte *irte, int apic)
462 {
463         int i;
464         u16 sid = 0;
465
466         if (!irte)
467                 return -1;
468
469         for (i = 0; i < MAX_IO_APICS; i++) {
470                 if (ir_ioapic[i].id == apic) {
471                         sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
472                         break;
473                 }
474         }
475
476         if (sid == 0) {
477                 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
478                 return -1;
479         }
480
481         set_irte_sid(irte, 1, 0, sid);
482
483         return 0;
484 }
485
486 int set_hpet_sid(struct irte *irte, u8 id)
487 {
488         int i;
489         u16 sid = 0;
490
491         if (!irte)
492                 return -1;
493
494         for (i = 0; i < MAX_HPET_TBS; i++) {
495                 if (ir_hpet[i].id == id) {
496                         sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
497                         break;
498                 }
499         }
500
501         if (sid == 0) {
502                 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
503                 return -1;
504         }
505
506         /*
507          * Should really use SQ_ALL_16. Some platforms are broken.
508          * While we figure out the right quirks for these broken platforms, use
509          * SQ_13_IGNORE_3 for now.
510          */
511         set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
512
513         return 0;
514 }
515
516 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
517 {
518         struct pci_dev *bridge;
519
520         if (!irte || !dev)
521                 return -1;
522
523         /* PCIe device or Root Complex integrated PCI device */
524         if (pci_is_pcie(dev) || !dev->bus->parent) {
525                 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
526                              (dev->bus->number << 8) | dev->devfn);
527                 return 0;
528         }
529
530         bridge = pci_find_upstream_pcie_bridge(dev);
531         if (bridge) {
532                 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
533                         set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
534                                 (bridge->bus->number << 8) | dev->bus->number);
535                 else /* this is a legacy PCI bridge */
536                         set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
537                                 (bridge->bus->number << 8) | bridge->devfn);
538         }
539
540         return 0;
541 }
542
543 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
544 {
545         u64 addr;
546         u32 sts;
547         unsigned long flags;
548
549         addr = virt_to_phys((void *)iommu->ir_table->base);
550
551         spin_lock_irqsave(&iommu->register_lock, flags);
552
553         dmar_writeq(iommu->reg + DMAR_IRTA_REG,
554                     (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
555
556         /* Set interrupt-remapping table pointer */
557         iommu->gcmd |= DMA_GCMD_SIRTP;
558         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
559
560         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
561                       readl, (sts & DMA_GSTS_IRTPS), sts);
562         spin_unlock_irqrestore(&iommu->register_lock, flags);
563
564         /*
565          * global invalidation of interrupt entry cache before enabling
566          * interrupt-remapping.
567          */
568         qi_global_iec(iommu);
569
570         spin_lock_irqsave(&iommu->register_lock, flags);
571
572         /* Enable interrupt-remapping */
573         iommu->gcmd |= DMA_GCMD_IRE;
574         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
575
576         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
577                       readl, (sts & DMA_GSTS_IRES), sts);
578
579         spin_unlock_irqrestore(&iommu->register_lock, flags);
580 }
581
582
583 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
584 {
585         struct ir_table *ir_table;
586         struct page *pages;
587
588         ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
589                                              GFP_ATOMIC);
590
591         if (!iommu->ir_table)
592                 return -ENOMEM;
593
594         pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
595                                  INTR_REMAP_PAGE_ORDER);
596
597         if (!pages) {
598                 printk(KERN_ERR "failed to allocate pages of order %d\n",
599                        INTR_REMAP_PAGE_ORDER);
600                 kfree(iommu->ir_table);
601                 return -ENOMEM;
602         }
603
604         ir_table->base = page_address(pages);
605
606         iommu_set_intr_remapping(iommu, mode);
607         return 0;
608 }
609
610 /*
611  * Disable Interrupt Remapping.
612  */
613 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
614 {
615         unsigned long flags;
616         u32 sts;
617
618         if (!ecap_ir_support(iommu->ecap))
619                 return;
620
621         /*
622          * global invalidation of interrupt entry cache before disabling
623          * interrupt-remapping.
624          */
625         qi_global_iec(iommu);
626
627         spin_lock_irqsave(&iommu->register_lock, flags);
628
629         sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
630         if (!(sts & DMA_GSTS_IRES))
631                 goto end;
632
633         iommu->gcmd &= ~DMA_GCMD_IRE;
634         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
635
636         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
637                       readl, !(sts & DMA_GSTS_IRES), sts);
638
639 end:
640         spin_unlock_irqrestore(&iommu->register_lock, flags);
641 }
642
643 int __init intr_remapping_supported(void)
644 {
645         struct dmar_drhd_unit *drhd;
646
647         if (disable_intremap)
648                 return 0;
649
650         if (!dmar_ir_support())
651                 return 0;
652
653         for_each_drhd_unit(drhd) {
654                 struct intel_iommu *iommu = drhd->iommu;
655
656                 if (!ecap_ir_support(iommu->ecap))
657                         return 0;
658         }
659
660         return 1;
661 }
662
663 int __init enable_intr_remapping(int eim)
664 {
665         struct dmar_drhd_unit *drhd;
666         int setup = 0;
667
668         if (parse_ioapics_under_ir() != 1) {
669                 printk(KERN_INFO "Not enable interrupt remapping\n");
670                 return -1;
671         }
672
673         for_each_drhd_unit(drhd) {
674                 struct intel_iommu *iommu = drhd->iommu;
675
676                 /*
677                  * If the queued invalidation is already initialized,
678                  * shouldn't disable it.
679                  */
680                 if (iommu->qi)
681                         continue;
682
683                 /*
684                  * Clear previous faults.
685                  */
686                 dmar_fault(-1, iommu);
687
688                 /*
689                  * Disable intr remapping and queued invalidation, if already
690                  * enabled prior to OS handover.
691                  */
692                 iommu_disable_intr_remapping(iommu);
693
694                 dmar_disable_qi(iommu);
695         }
696
697         /*
698          * check for the Interrupt-remapping support
699          */
700         for_each_drhd_unit(drhd) {
701                 struct intel_iommu *iommu = drhd->iommu;
702
703                 if (!ecap_ir_support(iommu->ecap))
704                         continue;
705
706                 if (eim && !ecap_eim_support(iommu->ecap)) {
707                         printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
708                                " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
709                         return -1;
710                 }
711         }
712
713         /*
714          * Enable queued invalidation for all the DRHD's.
715          */
716         for_each_drhd_unit(drhd) {
717                 int ret;
718                 struct intel_iommu *iommu = drhd->iommu;
719                 ret = dmar_enable_qi(iommu);
720
721                 if (ret) {
722                         printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
723                                " invalidation, ecap %Lx, ret %d\n",
724                                drhd->reg_base_addr, iommu->ecap, ret);
725                         return -1;
726                 }
727         }
728
729         /*
730          * Setup Interrupt-remapping for all the DRHD's now.
731          */
732         for_each_drhd_unit(drhd) {
733                 struct intel_iommu *iommu = drhd->iommu;
734
735                 if (!ecap_ir_support(iommu->ecap))
736                         continue;
737
738                 if (setup_intr_remapping(iommu, eim))
739                         goto error;
740
741                 setup = 1;
742         }
743
744         if (!setup)
745                 goto error;
746
747         intr_remapping_enabled = 1;
748
749         return 0;
750
751 error:
752         /*
753          * handle error condition gracefully here!
754          */
755         return -1;
756 }
757
758 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
759                                       struct intel_iommu *iommu)
760 {
761         struct acpi_dmar_pci_path *path;
762         u8 bus;
763         int count;
764
765         bus = scope->bus;
766         path = (struct acpi_dmar_pci_path *)(scope + 1);
767         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
768                 / sizeof(struct acpi_dmar_pci_path);
769
770         while (--count > 0) {
771                 /*
772                  * Access PCI directly due to the PCI
773                  * subsystem isn't initialized yet.
774                  */
775                 bus = read_pci_config_byte(bus, path->dev, path->fn,
776                                            PCI_SECONDARY_BUS);
777                 path++;
778         }
779         ir_hpet[ir_hpet_num].bus   = bus;
780         ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
781         ir_hpet[ir_hpet_num].iommu = iommu;
782         ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
783         ir_hpet_num++;
784 }
785
786 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
787                                       struct intel_iommu *iommu)
788 {
789         struct acpi_dmar_pci_path *path;
790         u8 bus;
791         int count;
792
793         bus = scope->bus;
794         path = (struct acpi_dmar_pci_path *)(scope + 1);
795         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
796                 / sizeof(struct acpi_dmar_pci_path);
797
798         while (--count > 0) {
799                 /*
800                  * Access PCI directly due to the PCI
801                  * subsystem isn't initialized yet.
802                  */
803                 bus = read_pci_config_byte(bus, path->dev, path->fn,
804                                            PCI_SECONDARY_BUS);
805                 path++;
806         }
807
808         ir_ioapic[ir_ioapic_num].bus   = bus;
809         ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
810         ir_ioapic[ir_ioapic_num].iommu = iommu;
811         ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
812         ir_ioapic_num++;
813 }
814
815 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
816                                       struct intel_iommu *iommu)
817 {
818         struct acpi_dmar_hardware_unit *drhd;
819         struct acpi_dmar_device_scope *scope;
820         void *start, *end;
821
822         drhd = (struct acpi_dmar_hardware_unit *)header;
823
824         start = (void *)(drhd + 1);
825         end = ((void *)drhd) + header->length;
826
827         while (start < end) {
828                 scope = start;
829                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
830                         if (ir_ioapic_num == MAX_IO_APICS) {
831                                 printk(KERN_WARNING "Exceeded Max IO APICS\n");
832                                 return -1;
833                         }
834
835                         printk(KERN_INFO "IOAPIC id %d under DRHD base"
836                                " 0x%Lx\n", scope->enumeration_id,
837                                drhd->address);
838
839                         ir_parse_one_ioapic_scope(scope, iommu);
840                 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
841                         if (ir_hpet_num == MAX_HPET_TBS) {
842                                 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
843                                 return -1;
844                         }
845
846                         printk(KERN_INFO "HPET id %d under DRHD base"
847                                " 0x%Lx\n", scope->enumeration_id,
848                                drhd->address);
849
850                         ir_parse_one_hpet_scope(scope, iommu);
851                 }
852                 start += scope->length;
853         }
854
855         return 0;
856 }
857
858 /*
859  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
860  * hardware unit.
861  */
862 int __init parse_ioapics_under_ir(void)
863 {
864         struct dmar_drhd_unit *drhd;
865         int ir_supported = 0;
866
867         for_each_drhd_unit(drhd) {
868                 struct intel_iommu *iommu = drhd->iommu;
869
870                 if (ecap_ir_support(iommu->ecap)) {
871                         if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
872                                 return -1;
873
874                         ir_supported = 1;
875                 }
876         }
877
878         if (ir_supported && ir_ioapic_num != nr_ioapics) {
879                 printk(KERN_WARNING
880                        "Not all IO-APIC's listed under remapping hardware\n");
881                 return -1;
882         }
883
884         return ir_supported;
885 }
886
887 void disable_intr_remapping(void)
888 {
889         struct dmar_drhd_unit *drhd;
890         struct intel_iommu *iommu = NULL;
891
892         /*
893          * Disable Interrupt-remapping for all the DRHD's now.
894          */
895         for_each_iommu(iommu, drhd) {
896                 if (!ecap_ir_support(iommu->ecap))
897                         continue;
898
899                 iommu_disable_intr_remapping(iommu);
900         }
901 }
902
903 int reenable_intr_remapping(int eim)
904 {
905         struct dmar_drhd_unit *drhd;
906         int setup = 0;
907         struct intel_iommu *iommu = NULL;
908
909         for_each_iommu(iommu, drhd)
910                 if (iommu->qi)
911                         dmar_reenable_qi(iommu);
912
913         /*
914          * Setup Interrupt-remapping for all the DRHD's now.
915          */
916         for_each_iommu(iommu, drhd) {
917                 if (!ecap_ir_support(iommu->ecap))
918                         continue;
919
920                 /* Set up interrupt remapping for iommu.*/
921                 iommu_set_intr_remapping(iommu, eim);
922                 setup = 1;
923         }
924
925         if (!setup)
926                 goto error;
927
928         return 0;
929
930 error:
931         /*
932          * handle error condition gracefully here!
933          */
934         return -1;
935 }
936