Intel-IOMMU, intr-remap: set the whole 128bits of irte when modify/free it
[pandora-kernel.git] / drivers / pci / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/pci.h>
6 #include <linux/irq.h>
7 #include <asm/io_apic.h>
8 #include <asm/smp.h>
9 #include <asm/cpu.h>
10 #include <linux/intel-iommu.h>
11 #include "intr_remapping.h"
12 #include <acpi/acpi.h>
13
14 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15 static int ir_ioapic_num;
16 int intr_remapping_enabled;
17
18 static int disable_intremap;
19 static __init int setup_nointremap(char *str)
20 {
21         disable_intremap = 1;
22         return 0;
23 }
24 early_param("nointremap", setup_nointremap);
25
26 struct irq_2_iommu {
27         struct intel_iommu *iommu;
28         u16 irte_index;
29         u16 sub_handle;
30         u8  irte_mask;
31 };
32
33 #ifdef CONFIG_GENERIC_HARDIRQS
34 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
35 {
36         struct irq_2_iommu *iommu;
37
38         iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
39         printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
40
41         return iommu;
42 }
43
44 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
45 {
46         struct irq_desc *desc;
47
48         desc = irq_to_desc(irq);
49
50         if (WARN_ON_ONCE(!desc))
51                 return NULL;
52
53         return desc->irq_2_iommu;
54 }
55
56 static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
57 {
58         struct irq_desc *desc;
59         struct irq_2_iommu *irq_iommu;
60
61         /*
62          * alloc irq desc if not allocated already.
63          */
64         desc = irq_to_desc_alloc_node(irq, node);
65         if (!desc) {
66                 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
67                 return NULL;
68         }
69
70         irq_iommu = desc->irq_2_iommu;
71
72         if (!irq_iommu)
73                 desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
74
75         return desc->irq_2_iommu;
76 }
77
78 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
79 {
80         return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
81 }
82
83 #else /* !CONFIG_SPARSE_IRQ */
84
85 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
86
87 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
88 {
89         if (irq < nr_irqs)
90                 return &irq_2_iommuX[irq];
91
92         return NULL;
93 }
94 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
95 {
96         return irq_2_iommu(irq);
97 }
98 #endif
99
100 static DEFINE_SPINLOCK(irq_2_ir_lock);
101
102 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
103 {
104         struct irq_2_iommu *irq_iommu;
105
106         irq_iommu = irq_2_iommu(irq);
107
108         if (!irq_iommu)
109                 return NULL;
110
111         if (!irq_iommu->iommu)
112                 return NULL;
113
114         return irq_iommu;
115 }
116
117 int irq_remapped(int irq)
118 {
119         return valid_irq_2_iommu(irq) != NULL;
120 }
121
122 int get_irte(int irq, struct irte *entry)
123 {
124         int index;
125         struct irq_2_iommu *irq_iommu;
126         unsigned long flags;
127
128         if (!entry)
129                 return -1;
130
131         spin_lock_irqsave(&irq_2_ir_lock, flags);
132         irq_iommu = valid_irq_2_iommu(irq);
133         if (!irq_iommu) {
134                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
135                 return -1;
136         }
137
138         index = irq_iommu->irte_index + irq_iommu->sub_handle;
139         *entry = *(irq_iommu->iommu->ir_table->base + index);
140
141         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
142         return 0;
143 }
144
145 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
146 {
147         struct ir_table *table = iommu->ir_table;
148         struct irq_2_iommu *irq_iommu;
149         u16 index, start_index;
150         unsigned int mask = 0;
151         unsigned long flags;
152         int i;
153
154         if (!count)
155                 return -1;
156
157 #ifndef CONFIG_SPARSE_IRQ
158         /* protect irq_2_iommu_alloc later */
159         if (irq >= nr_irqs)
160                 return -1;
161 #endif
162
163         /*
164          * start the IRTE search from index 0.
165          */
166         index = start_index = 0;
167
168         if (count > 1) {
169                 count = __roundup_pow_of_two(count);
170                 mask = ilog2(count);
171         }
172
173         if (mask > ecap_max_handle_mask(iommu->ecap)) {
174                 printk(KERN_ERR
175                        "Requested mask %x exceeds the max invalidation handle"
176                        " mask value %Lx\n", mask,
177                        ecap_max_handle_mask(iommu->ecap));
178                 return -1;
179         }
180
181         spin_lock_irqsave(&irq_2_ir_lock, flags);
182         do {
183                 for (i = index; i < index + count; i++)
184                         if  (table->base[i].present)
185                                 break;
186                 /* empty index found */
187                 if (i == index + count)
188                         break;
189
190                 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
191
192                 if (index == start_index) {
193                         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
194                         printk(KERN_ERR "can't allocate an IRTE\n");
195                         return -1;
196                 }
197         } while (1);
198
199         for (i = index; i < index + count; i++)
200                 table->base[i].present = 1;
201
202         irq_iommu = irq_2_iommu_alloc(irq);
203         if (!irq_iommu) {
204                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
205                 printk(KERN_ERR "can't allocate irq_2_iommu\n");
206                 return -1;
207         }
208
209         irq_iommu->iommu = iommu;
210         irq_iommu->irte_index =  index;
211         irq_iommu->sub_handle = 0;
212         irq_iommu->irte_mask = mask;
213
214         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
215
216         return index;
217 }
218
219 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
220 {
221         struct qi_desc desc;
222
223         desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
224                    | QI_IEC_SELECTIVE;
225         desc.high = 0;
226
227         return qi_submit_sync(&desc, iommu);
228 }
229
230 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
231 {
232         int index;
233         struct irq_2_iommu *irq_iommu;
234         unsigned long flags;
235
236         spin_lock_irqsave(&irq_2_ir_lock, flags);
237         irq_iommu = valid_irq_2_iommu(irq);
238         if (!irq_iommu) {
239                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
240                 return -1;
241         }
242
243         *sub_handle = irq_iommu->sub_handle;
244         index = irq_iommu->irte_index;
245         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
246         return index;
247 }
248
249 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
250 {
251         struct irq_2_iommu *irq_iommu;
252         unsigned long flags;
253
254         spin_lock_irqsave(&irq_2_ir_lock, flags);
255
256         irq_iommu = irq_2_iommu_alloc(irq);
257
258         if (!irq_iommu) {
259                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
260                 printk(KERN_ERR "can't allocate irq_2_iommu\n");
261                 return -1;
262         }
263
264         irq_iommu->iommu = iommu;
265         irq_iommu->irte_index = index;
266         irq_iommu->sub_handle = subhandle;
267         irq_iommu->irte_mask = 0;
268
269         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
270
271         return 0;
272 }
273
274 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
275 {
276         struct irq_2_iommu *irq_iommu;
277         unsigned long flags;
278
279         spin_lock_irqsave(&irq_2_ir_lock, flags);
280         irq_iommu = valid_irq_2_iommu(irq);
281         if (!irq_iommu) {
282                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
283                 return -1;
284         }
285
286         irq_iommu->iommu = NULL;
287         irq_iommu->irte_index = 0;
288         irq_iommu->sub_handle = 0;
289         irq_2_iommu(irq)->irte_mask = 0;
290
291         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
292
293         return 0;
294 }
295
296 int modify_irte(int irq, struct irte *irte_modified)
297 {
298         int rc;
299         int index;
300         struct irte *irte;
301         struct intel_iommu *iommu;
302         struct irq_2_iommu *irq_iommu;
303         unsigned long flags;
304
305         spin_lock_irqsave(&irq_2_ir_lock, flags);
306         irq_iommu = valid_irq_2_iommu(irq);
307         if (!irq_iommu) {
308                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
309                 return -1;
310         }
311
312         iommu = irq_iommu->iommu;
313
314         index = irq_iommu->irte_index + irq_iommu->sub_handle;
315         irte = &iommu->ir_table->base[index];
316
317         set_64bit((unsigned long *)&irte->low, irte_modified->low);
318         set_64bit((unsigned long *)&irte->high, irte_modified->high);
319         __iommu_flush_cache(iommu, irte, sizeof(*irte));
320
321         rc = qi_flush_iec(iommu, index, 0);
322         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
323
324         return rc;
325 }
326
327 int flush_irte(int irq)
328 {
329         int rc;
330         int index;
331         struct intel_iommu *iommu;
332         struct irq_2_iommu *irq_iommu;
333         unsigned long flags;
334
335         spin_lock_irqsave(&irq_2_ir_lock, flags);
336         irq_iommu = valid_irq_2_iommu(irq);
337         if (!irq_iommu) {
338                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
339                 return -1;
340         }
341
342         iommu = irq_iommu->iommu;
343
344         index = irq_iommu->irte_index + irq_iommu->sub_handle;
345
346         rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
347         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
348
349         return rc;
350 }
351
352 struct intel_iommu *map_ioapic_to_ir(int apic)
353 {
354         int i;
355
356         for (i = 0; i < MAX_IO_APICS; i++)
357                 if (ir_ioapic[i].id == apic)
358                         return ir_ioapic[i].iommu;
359         return NULL;
360 }
361
362 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
363 {
364         struct dmar_drhd_unit *drhd;
365
366         drhd = dmar_find_matched_drhd_unit(dev);
367         if (!drhd)
368                 return NULL;
369
370         return drhd->iommu;
371 }
372
373 static int clear_entries(struct irq_2_iommu *irq_iommu)
374 {
375         struct irte *start, *entry, *end;
376         struct intel_iommu *iommu;
377         int index;
378
379         if (irq_iommu->sub_handle)
380                 return 0;
381
382         iommu = irq_iommu->iommu;
383         index = irq_iommu->irte_index + irq_iommu->sub_handle;
384
385         start = iommu->ir_table->base + index;
386         end = start + (1 << irq_iommu->irte_mask);
387
388         for (entry = start; entry < end; entry++) {
389                 set_64bit((unsigned long *)&entry->low, 0);
390                 set_64bit((unsigned long *)&entry->high, 0);
391         }
392
393         return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
394 }
395
396 int free_irte(int irq)
397 {
398         int rc = 0;
399         struct irq_2_iommu *irq_iommu;
400         unsigned long flags;
401
402         spin_lock_irqsave(&irq_2_ir_lock, flags);
403         irq_iommu = valid_irq_2_iommu(irq);
404         if (!irq_iommu) {
405                 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
406                 return -1;
407         }
408
409         rc = clear_entries(irq_iommu);
410
411         irq_iommu->iommu = NULL;
412         irq_iommu->irte_index = 0;
413         irq_iommu->sub_handle = 0;
414         irq_iommu->irte_mask = 0;
415
416         spin_unlock_irqrestore(&irq_2_ir_lock, flags);
417
418         return rc;
419 }
420
421 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
422 {
423         u64 addr;
424         u32 sts;
425         unsigned long flags;
426
427         addr = virt_to_phys((void *)iommu->ir_table->base);
428
429         spin_lock_irqsave(&iommu->register_lock, flags);
430
431         dmar_writeq(iommu->reg + DMAR_IRTA_REG,
432                     (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
433
434         /* Set interrupt-remapping table pointer */
435         iommu->gcmd |= DMA_GCMD_SIRTP;
436         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
437
438         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
439                       readl, (sts & DMA_GSTS_IRTPS), sts);
440         spin_unlock_irqrestore(&iommu->register_lock, flags);
441
442         /*
443          * global invalidation of interrupt entry cache before enabling
444          * interrupt-remapping.
445          */
446         qi_global_iec(iommu);
447
448         spin_lock_irqsave(&iommu->register_lock, flags);
449
450         /* Enable interrupt-remapping */
451         iommu->gcmd |= DMA_GCMD_IRE;
452         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
453
454         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
455                       readl, (sts & DMA_GSTS_IRES), sts);
456
457         spin_unlock_irqrestore(&iommu->register_lock, flags);
458 }
459
460
461 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
462 {
463         struct ir_table *ir_table;
464         struct page *pages;
465
466         ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
467                                              GFP_ATOMIC);
468
469         if (!iommu->ir_table)
470                 return -ENOMEM;
471
472         pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
473
474         if (!pages) {
475                 printk(KERN_ERR "failed to allocate pages of order %d\n",
476                        INTR_REMAP_PAGE_ORDER);
477                 kfree(iommu->ir_table);
478                 return -ENOMEM;
479         }
480
481         ir_table->base = page_address(pages);
482
483         iommu_set_intr_remapping(iommu, mode);
484         return 0;
485 }
486
487 /*
488  * Disable Interrupt Remapping.
489  */
490 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
491 {
492         unsigned long flags;
493         u32 sts;
494
495         if (!ecap_ir_support(iommu->ecap))
496                 return;
497
498         /*
499          * global invalidation of interrupt entry cache before disabling
500          * interrupt-remapping.
501          */
502         qi_global_iec(iommu);
503
504         spin_lock_irqsave(&iommu->register_lock, flags);
505
506         sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
507         if (!(sts & DMA_GSTS_IRES))
508                 goto end;
509
510         iommu->gcmd &= ~DMA_GCMD_IRE;
511         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
512
513         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
514                       readl, !(sts & DMA_GSTS_IRES), sts);
515
516 end:
517         spin_unlock_irqrestore(&iommu->register_lock, flags);
518 }
519
520 int __init intr_remapping_supported(void)
521 {
522         struct dmar_drhd_unit *drhd;
523
524         if (disable_intremap)
525                 return 0;
526
527         for_each_drhd_unit(drhd) {
528                 struct intel_iommu *iommu = drhd->iommu;
529
530                 if (!ecap_ir_support(iommu->ecap))
531                         return 0;
532         }
533
534         return 1;
535 }
536
537 int __init enable_intr_remapping(int eim)
538 {
539         struct dmar_drhd_unit *drhd;
540         int setup = 0;
541
542         for_each_drhd_unit(drhd) {
543                 struct intel_iommu *iommu = drhd->iommu;
544
545                 /*
546                  * If the queued invalidation is already initialized,
547                  * shouldn't disable it.
548                  */
549                 if (iommu->qi)
550                         continue;
551
552                 /*
553                  * Clear previous faults.
554                  */
555                 dmar_fault(-1, iommu);
556
557                 /*
558                  * Disable intr remapping and queued invalidation, if already
559                  * enabled prior to OS handover.
560                  */
561                 iommu_disable_intr_remapping(iommu);
562
563                 dmar_disable_qi(iommu);
564         }
565
566         /*
567          * check for the Interrupt-remapping support
568          */
569         for_each_drhd_unit(drhd) {
570                 struct intel_iommu *iommu = drhd->iommu;
571
572                 if (!ecap_ir_support(iommu->ecap))
573                         continue;
574
575                 if (eim && !ecap_eim_support(iommu->ecap)) {
576                         printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
577                                " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
578                         return -1;
579                 }
580         }
581
582         /*
583          * Enable queued invalidation for all the DRHD's.
584          */
585         for_each_drhd_unit(drhd) {
586                 int ret;
587                 struct intel_iommu *iommu = drhd->iommu;
588                 ret = dmar_enable_qi(iommu);
589
590                 if (ret) {
591                         printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
592                                " invalidation, ecap %Lx, ret %d\n",
593                                drhd->reg_base_addr, iommu->ecap, ret);
594                         return -1;
595                 }
596         }
597
598         /*
599          * Setup Interrupt-remapping for all the DRHD's now.
600          */
601         for_each_drhd_unit(drhd) {
602                 struct intel_iommu *iommu = drhd->iommu;
603
604                 if (!ecap_ir_support(iommu->ecap))
605                         continue;
606
607                 if (setup_intr_remapping(iommu, eim))
608                         goto error;
609
610                 setup = 1;
611         }
612
613         if (!setup)
614                 goto error;
615
616         intr_remapping_enabled = 1;
617
618         return 0;
619
620 error:
621         /*
622          * handle error condition gracefully here!
623          */
624         return -1;
625 }
626
627 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
628                                  struct intel_iommu *iommu)
629 {
630         struct acpi_dmar_hardware_unit *drhd;
631         struct acpi_dmar_device_scope *scope;
632         void *start, *end;
633
634         drhd = (struct acpi_dmar_hardware_unit *)header;
635
636         start = (void *)(drhd + 1);
637         end = ((void *)drhd) + header->length;
638
639         while (start < end) {
640                 scope = start;
641                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
642                         if (ir_ioapic_num == MAX_IO_APICS) {
643                                 printk(KERN_WARNING "Exceeded Max IO APICS\n");
644                                 return -1;
645                         }
646
647                         printk(KERN_INFO "IOAPIC id %d under DRHD base"
648                                " 0x%Lx\n", scope->enumeration_id,
649                                drhd->address);
650
651                         ir_ioapic[ir_ioapic_num].iommu = iommu;
652                         ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
653                         ir_ioapic_num++;
654                 }
655                 start += scope->length;
656         }
657
658         return 0;
659 }
660
661 /*
662  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
663  * hardware unit.
664  */
665 int __init parse_ioapics_under_ir(void)
666 {
667         struct dmar_drhd_unit *drhd;
668         int ir_supported = 0;
669
670         for_each_drhd_unit(drhd) {
671                 struct intel_iommu *iommu = drhd->iommu;
672
673                 if (ecap_ir_support(iommu->ecap)) {
674                         if (ir_parse_ioapic_scope(drhd->hdr, iommu))
675                                 return -1;
676
677                         ir_supported = 1;
678                 }
679         }
680
681         if (ir_supported && ir_ioapic_num != nr_ioapics) {
682                 printk(KERN_WARNING
683                        "Not all IO-APIC's listed under remapping hardware\n");
684                 return -1;
685         }
686
687         return ir_supported;
688 }
689
690 void disable_intr_remapping(void)
691 {
692         struct dmar_drhd_unit *drhd;
693         struct intel_iommu *iommu = NULL;
694
695         /*
696          * Disable Interrupt-remapping for all the DRHD's now.
697          */
698         for_each_iommu(iommu, drhd) {
699                 if (!ecap_ir_support(iommu->ecap))
700                         continue;
701
702                 iommu_disable_intr_remapping(iommu);
703         }
704 }
705
706 int reenable_intr_remapping(int eim)
707 {
708         struct dmar_drhd_unit *drhd;
709         int setup = 0;
710         struct intel_iommu *iommu = NULL;
711
712         for_each_iommu(iommu, drhd)
713                 if (iommu->qi)
714                         dmar_reenable_qi(iommu);
715
716         /*
717          * Setup Interrupt-remapping for all the DRHD's now.
718          */
719         for_each_iommu(iommu, drhd) {
720                 if (!ecap_ir_support(iommu->ecap))
721                         continue;
722
723                 /* Set up interrupt remapping for iommu.*/
724                 iommu_set_intr_remapping(iommu, eim);
725                 setup = 1;
726         }
727
728         if (!setup)
729                 goto error;
730
731         return 0;
732
733 error:
734         /*
735          * handle error condition gracefully here!
736          */
737         return -1;
738 }
739