2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #define ROOT_SIZE VTD_PAGE_SIZE
48 #define CONTEXT_SIZE VTD_PAGE_SIZE
50 #define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
64 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
74 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
75 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
77 /* page table handling */
78 #define LEVEL_STRIDE (9)
79 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81 static inline int agaw_to_level(int agaw)
86 static inline int agaw_to_width(int agaw)
88 return 30 + agaw * LEVEL_STRIDE;
91 static inline int width_to_agaw(int width)
93 return (width - 30) / LEVEL_STRIDE;
96 static inline unsigned int level_to_offset_bits(int level)
98 return (level - 1) * LEVEL_STRIDE;
101 static inline int pfn_level_offset(unsigned long pfn, int level)
103 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
106 static inline unsigned long level_mask(int level)
108 return -1UL << level_to_offset_bits(level);
111 static inline unsigned long level_size(int level)
113 return 1UL << level_to_offset_bits(level);
116 static inline unsigned long align_to_level(unsigned long pfn, int level)
118 return (pfn + level_size(level) - 1) & level_mask(level);
121 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
123 return 1 << ((lvl - 1) * LEVEL_STRIDE);
126 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
127 are never going to work. */
128 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
130 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
133 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
135 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
137 static inline unsigned long page_to_dma_pfn(struct page *pg)
139 return mm_to_dma_pfn(page_to_pfn(pg));
141 static inline unsigned long virt_to_dma_pfn(void *p)
143 return page_to_dma_pfn(virt_to_page(p));
146 /* global iommu list, set NULL for ignored DMAR units */
147 static struct intel_iommu **g_iommus;
149 static void __init check_tylersburg_isoch(void);
150 static int rwbf_quirk;
153 * set to 1 to panic kernel if can't successfully enable VT-d
154 * (used when kernel is launched w/ TXT)
156 static int force_on = 0;
161 * 12-63: Context Ptr (12 - (haw-1))
168 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
169 static inline bool root_present(struct root_entry *root)
171 return (root->val & 1);
173 static inline void set_root_present(struct root_entry *root)
177 static inline void set_root_value(struct root_entry *root, unsigned long value)
179 root->val |= value & VTD_PAGE_MASK;
182 static inline struct context_entry *
183 get_context_addr_from_root(struct root_entry *root)
185 return (struct context_entry *)
186 (root_present(root)?phys_to_virt(
187 root->val & VTD_PAGE_MASK) :
194 * 1: fault processing disable
195 * 2-3: translation type
196 * 12-63: address space root
202 struct context_entry {
207 static inline bool context_present(struct context_entry *context)
209 return (context->lo & 1);
211 static inline void context_set_present(struct context_entry *context)
216 static inline void context_set_fault_enable(struct context_entry *context)
218 context->lo &= (((u64)-1) << 2) | 1;
221 static inline void context_set_translation_type(struct context_entry *context,
224 context->lo &= (((u64)-1) << 4) | 3;
225 context->lo |= (value & 3) << 2;
228 static inline void context_set_address_root(struct context_entry *context,
231 context->lo |= value & VTD_PAGE_MASK;
234 static inline void context_set_address_width(struct context_entry *context,
237 context->hi |= value & 7;
240 static inline void context_set_domain_id(struct context_entry *context,
243 context->hi |= (value & ((1 << 16) - 1)) << 8;
246 static inline void context_clear_entry(struct context_entry *context)
259 * 12-63: Host physcial address
265 static inline void dma_clear_pte(struct dma_pte *pte)
270 static inline void dma_set_pte_readable(struct dma_pte *pte)
272 pte->val |= DMA_PTE_READ;
275 static inline void dma_set_pte_writable(struct dma_pte *pte)
277 pte->val |= DMA_PTE_WRITE;
280 static inline void dma_set_pte_snp(struct dma_pte *pte)
282 pte->val |= DMA_PTE_SNP;
285 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
287 pte->val = (pte->val & ~3) | (prot & 3);
290 static inline u64 dma_pte_addr(struct dma_pte *pte)
293 return pte->val & VTD_PAGE_MASK;
295 /* Must have a full atomic 64-bit read */
296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
300 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
302 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
305 static inline bool dma_pte_present(struct dma_pte *pte)
307 return (pte->val & 3) != 0;
310 static inline bool dma_pte_superpage(struct dma_pte *pte)
312 return (pte->val & (1 << 7));
315 static inline int first_pte_in_page(struct dma_pte *pte)
317 return !((unsigned long)pte & ~VTD_PAGE_MASK);
321 * This domain is a statically identity mapping domain.
322 * 1. This domain creats a static 1:1 mapping to all usable memory.
323 * 2. It maps to each iommu if successful.
324 * 3. Each iommu mapps to this domain if successful.
326 static struct dmar_domain *si_domain;
327 static int hw_pass_through = 1;
329 /* devices under the same p2p bridge are owned in one domain */
330 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
332 /* domain represents a virtual machine, more than one devices
333 * across iommus may be owned in one domain, e.g. kvm guest.
335 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
337 /* si_domain contains mulitple devices */
338 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
341 int id; /* domain id */
342 int nid; /* node id */
343 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
345 struct list_head devices; /* all devices' list */
346 struct iova_domain iovad; /* iova's that belong to this domain */
348 struct dma_pte *pgd; /* virtual address */
349 int gaw; /* max guest address width */
351 /* adjusted guest address width, 0 is level 2 30-bit */
354 int flags; /* flags to find out type of domain */
356 int iommu_coherency;/* indicate coherency of iommu access */
357 int iommu_snooping; /* indicate snooping control feature*/
358 int iommu_count; /* reference count of iommu */
359 int iommu_superpage;/* Level of superpages supported:
360 0 == 4KiB (no superpages), 1 == 2MiB,
361 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
362 spinlock_t iommu_lock; /* protect iommu set in domain */
363 u64 max_addr; /* maximum mapped address */
366 /* PCI domain-device relationship */
367 struct device_domain_info {
368 struct list_head link; /* link to domain siblings */
369 struct list_head global; /* link to global list */
370 int segment; /* PCI domain */
371 u8 bus; /* PCI bus number */
372 u8 devfn; /* PCI devfn number */
373 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
374 struct intel_iommu *iommu; /* IOMMU used by this device */
375 struct dmar_domain *domain; /* pointer to domain */
378 static void flush_unmaps_timeout(unsigned long data);
380 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
382 #define HIGH_WATER_MARK 250
383 struct deferred_flush_tables {
385 struct iova *iova[HIGH_WATER_MARK];
386 struct dmar_domain *domain[HIGH_WATER_MARK];
389 static struct deferred_flush_tables *deferred_flush;
391 /* bitmap for indexing intel_iommus */
392 static int g_num_of_iommus;
394 static DEFINE_SPINLOCK(async_umap_flush_lock);
395 static LIST_HEAD(unmaps_to_do);
398 static long list_size;
400 static void domain_remove_dev_info(struct dmar_domain *domain);
402 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
403 int dmar_disabled = 0;
405 int dmar_disabled = 1;
406 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
408 int intel_iommu_enabled = 0;
409 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
411 static int dmar_map_gfx = 1;
412 static int dmar_forcedac;
413 static int intel_iommu_strict;
414 static int intel_iommu_superpage = 1;
416 int intel_iommu_gfx_mapped;
417 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
419 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
420 static DEFINE_SPINLOCK(device_domain_lock);
421 static LIST_HEAD(device_domain_list);
423 static struct iommu_ops intel_iommu_ops;
425 static int __init intel_iommu_setup(char *str)
430 if (!strncmp(str, "on", 2)) {
432 printk(KERN_INFO "Intel-IOMMU: enabled\n");
433 } else if (!strncmp(str, "off", 3)) {
435 printk(KERN_INFO "Intel-IOMMU: disabled\n");
436 } else if (!strncmp(str, "igfx_off", 8)) {
439 "Intel-IOMMU: disable GFX device mapping\n");
440 } else if (!strncmp(str, "forcedac", 8)) {
442 "Intel-IOMMU: Forcing DAC for PCI devices\n");
444 } else if (!strncmp(str, "strict", 6)) {
446 "Intel-IOMMU: disable batched IOTLB flush\n");
447 intel_iommu_strict = 1;
448 } else if (!strncmp(str, "sp_off", 6)) {
450 "Intel-IOMMU: disable supported super page\n");
451 intel_iommu_superpage = 0;
454 str += strcspn(str, ",");
460 __setup("intel_iommu=", intel_iommu_setup);
462 static struct kmem_cache *iommu_domain_cache;
463 static struct kmem_cache *iommu_devinfo_cache;
464 static struct kmem_cache *iommu_iova_cache;
466 static inline void *alloc_pgtable_page(int node)
471 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
473 vaddr = page_address(page);
477 static inline void free_pgtable_page(void *vaddr)
479 free_page((unsigned long)vaddr);
482 static inline void *alloc_domain_mem(void)
484 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
487 static void free_domain_mem(void *vaddr)
489 kmem_cache_free(iommu_domain_cache, vaddr);
492 static inline void * alloc_devinfo_mem(void)
494 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
497 static inline void free_devinfo_mem(void *vaddr)
499 kmem_cache_free(iommu_devinfo_cache, vaddr);
502 struct iova *alloc_iova_mem(void)
504 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
507 void free_iova_mem(struct iova *iova)
509 kmem_cache_free(iommu_iova_cache, iova);
513 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
518 sagaw = cap_sagaw(iommu->cap);
519 for (agaw = width_to_agaw(max_gaw);
521 if (test_bit(agaw, &sagaw))
529 * Calculate max SAGAW for each iommu.
531 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
533 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
537 * calculate agaw for each iommu.
538 * "SAGAW" may be different across iommus, use a default agaw, and
539 * get a supported less agaw for iommus that don't support the default agaw.
541 int iommu_calculate_agaw(struct intel_iommu *iommu)
543 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
546 /* This functionin only returns single iommu in a domain */
547 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
551 /* si_domain and vm domain should not get here. */
552 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
553 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
555 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
556 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
559 return g_iommus[iommu_id];
562 static void domain_update_iommu_coherency(struct dmar_domain *domain)
566 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
568 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
570 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
571 if (!ecap_coherent(g_iommus[i]->ecap)) {
572 domain->iommu_coherency = 0;
578 static void domain_update_iommu_snooping(struct dmar_domain *domain)
582 domain->iommu_snooping = 1;
584 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
585 if (!ecap_sc_support(g_iommus[i]->ecap)) {
586 domain->iommu_snooping = 0;
592 static void domain_update_iommu_superpage(struct dmar_domain *domain)
594 struct dmar_drhd_unit *drhd;
595 struct intel_iommu *iommu = NULL;
598 if (!intel_iommu_superpage) {
599 domain->iommu_superpage = 0;
603 /* set iommu_superpage to the smallest common denominator */
604 for_each_active_iommu(iommu, drhd) {
605 mask &= cap_super_page_val(iommu->cap);
610 domain->iommu_superpage = fls(mask);
613 /* Some capabilities may be different across iommus */
614 static void domain_update_iommu_cap(struct dmar_domain *domain)
616 domain_update_iommu_coherency(domain);
617 domain_update_iommu_snooping(domain);
618 domain_update_iommu_superpage(domain);
621 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
623 struct dmar_drhd_unit *drhd = NULL;
626 for_each_drhd_unit(drhd) {
629 if (segment != drhd->segment)
632 for (i = 0; i < drhd->devices_cnt; i++) {
633 if (drhd->devices[i] &&
634 drhd->devices[i]->bus->number == bus &&
635 drhd->devices[i]->devfn == devfn)
637 if (drhd->devices[i] &&
638 drhd->devices[i]->subordinate &&
639 drhd->devices[i]->subordinate->number <= bus &&
640 drhd->devices[i]->subordinate->subordinate >= bus)
644 if (drhd->include_all)
651 static void domain_flush_cache(struct dmar_domain *domain,
652 void *addr, int size)
654 if (!domain->iommu_coherency)
655 clflush_cache_range(addr, size);
658 /* Gets context entry for a given bus and devfn */
659 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
662 struct root_entry *root;
663 struct context_entry *context;
664 unsigned long phy_addr;
667 spin_lock_irqsave(&iommu->lock, flags);
668 root = &iommu->root_entry[bus];
669 context = get_context_addr_from_root(root);
671 context = (struct context_entry *)
672 alloc_pgtable_page(iommu->node);
674 spin_unlock_irqrestore(&iommu->lock, flags);
677 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
678 phy_addr = virt_to_phys((void *)context);
679 set_root_value(root, phy_addr);
680 set_root_present(root);
681 __iommu_flush_cache(iommu, root, sizeof(*root));
683 spin_unlock_irqrestore(&iommu->lock, flags);
684 return &context[devfn];
687 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
689 struct root_entry *root;
690 struct context_entry *context;
694 spin_lock_irqsave(&iommu->lock, flags);
695 root = &iommu->root_entry[bus];
696 context = get_context_addr_from_root(root);
701 ret = context_present(&context[devfn]);
703 spin_unlock_irqrestore(&iommu->lock, flags);
707 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
709 struct root_entry *root;
710 struct context_entry *context;
713 spin_lock_irqsave(&iommu->lock, flags);
714 root = &iommu->root_entry[bus];
715 context = get_context_addr_from_root(root);
717 context_clear_entry(&context[devfn]);
718 __iommu_flush_cache(iommu, &context[devfn], \
721 spin_unlock_irqrestore(&iommu->lock, flags);
724 static void free_context_table(struct intel_iommu *iommu)
726 struct root_entry *root;
729 struct context_entry *context;
731 spin_lock_irqsave(&iommu->lock, flags);
732 if (!iommu->root_entry) {
735 for (i = 0; i < ROOT_ENTRY_NR; i++) {
736 root = &iommu->root_entry[i];
737 context = get_context_addr_from_root(root);
739 free_pgtable_page(context);
741 free_pgtable_page(iommu->root_entry);
742 iommu->root_entry = NULL;
744 spin_unlock_irqrestore(&iommu->lock, flags);
747 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
748 unsigned long pfn, int target_level)
750 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
751 struct dma_pte *parent, *pte = NULL;
752 int level = agaw_to_level(domain->agaw);
755 BUG_ON(!domain->pgd);
756 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
757 parent = domain->pgd;
762 offset = pfn_level_offset(pfn, level);
763 pte = &parent[offset];
764 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
766 if (level == target_level)
769 if (!dma_pte_present(pte)) {
772 tmp_page = alloc_pgtable_page(domain->nid);
777 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
778 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
779 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
780 /* Someone else set it while we were thinking; use theirs. */
781 free_pgtable_page(tmp_page);
784 domain_flush_cache(domain, pte, sizeof(*pte));
787 parent = phys_to_virt(dma_pte_addr(pte));
795 /* return address's pte at specific level */
796 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
798 int level, int *large_page)
800 struct dma_pte *parent, *pte = NULL;
801 int total = agaw_to_level(domain->agaw);
804 parent = domain->pgd;
805 while (level <= total) {
806 offset = pfn_level_offset(pfn, total);
807 pte = &parent[offset];
811 if (!dma_pte_present(pte)) {
816 if (pte->val & DMA_PTE_LARGE_PAGE) {
821 parent = phys_to_virt(dma_pte_addr(pte));
827 /* clear last level pte, a tlb flush should be followed */
828 static int dma_pte_clear_range(struct dmar_domain *domain,
829 unsigned long start_pfn,
830 unsigned long last_pfn)
832 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
833 unsigned int large_page = 1;
834 struct dma_pte *first_pte, *pte;
837 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
838 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
839 BUG_ON(start_pfn > last_pfn);
841 /* we don't need lock here; nobody else touches the iova range */
844 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
846 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
851 start_pfn += lvl_to_nr_pages(large_page);
853 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
855 domain_flush_cache(domain, first_pte,
856 (void *)pte - (void *)first_pte);
858 } while (start_pfn && start_pfn <= last_pfn);
860 order = (large_page - 1) * 9;
864 /* free page table pages. last level pte should already be cleared */
865 static void dma_pte_free_pagetable(struct dmar_domain *domain,
866 unsigned long start_pfn,
867 unsigned long last_pfn)
869 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
870 struct dma_pte *first_pte, *pte;
871 int total = agaw_to_level(domain->agaw);
876 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
877 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
878 BUG_ON(start_pfn > last_pfn);
880 /* We don't need lock here; nobody else touches the iova range */
882 while (level <= total) {
883 tmp = align_to_level(start_pfn, level);
885 /* If we can't even clear one PTE at this level, we're done */
886 if (tmp + level_size(level) - 1 > last_pfn)
891 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
892 if (large_page > level)
893 level = large_page + 1;
895 tmp = align_to_level(tmp + 1, level + 1);
899 if (dma_pte_present(pte)) {
900 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
904 tmp += level_size(level);
905 } while (!first_pte_in_page(pte) &&
906 tmp + level_size(level) - 1 <= last_pfn);
908 domain_flush_cache(domain, first_pte,
909 (void *)pte - (void *)first_pte);
911 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
915 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
916 free_pgtable_page(domain->pgd);
922 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
924 struct root_entry *root;
927 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
931 __iommu_flush_cache(iommu, root, ROOT_SIZE);
933 spin_lock_irqsave(&iommu->lock, flags);
934 iommu->root_entry = root;
935 spin_unlock_irqrestore(&iommu->lock, flags);
940 static void iommu_set_root_entry(struct intel_iommu *iommu)
946 addr = iommu->root_entry;
948 raw_spin_lock_irqsave(&iommu->register_lock, flag);
949 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
951 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
953 /* Make sure hardware complete it */
954 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
955 readl, (sts & DMA_GSTS_RTPS), sts);
957 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
960 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
965 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
968 raw_spin_lock_irqsave(&iommu->register_lock, flag);
969 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
971 /* Make sure hardware complete it */
972 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
973 readl, (!(val & DMA_GSTS_WBFS)), val);
975 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
978 /* return value determine if we need a write buffer flush */
979 static void __iommu_flush_context(struct intel_iommu *iommu,
980 u16 did, u16 source_id, u8 function_mask,
987 case DMA_CCMD_GLOBAL_INVL:
988 val = DMA_CCMD_GLOBAL_INVL;
990 case DMA_CCMD_DOMAIN_INVL:
991 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
993 case DMA_CCMD_DEVICE_INVL:
994 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
995 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1000 val |= DMA_CCMD_ICC;
1002 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1003 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1005 /* Make sure hardware complete it */
1006 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1007 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1009 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1012 /* return value determine if we need a write buffer flush */
1013 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1014 u64 addr, unsigned int size_order, u64 type)
1016 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1017 u64 val = 0, val_iva = 0;
1021 case DMA_TLB_GLOBAL_FLUSH:
1022 /* global flush doesn't need set IVA_REG */
1023 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1025 case DMA_TLB_DSI_FLUSH:
1026 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1028 case DMA_TLB_PSI_FLUSH:
1029 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1030 /* Note: always flush non-leaf currently */
1031 val_iva = size_order | addr;
1036 /* Note: set drain read/write */
1039 * This is probably to be super secure.. Looks like we can
1040 * ignore it without any impact.
1042 if (cap_read_drain(iommu->cap))
1043 val |= DMA_TLB_READ_DRAIN;
1045 if (cap_write_drain(iommu->cap))
1046 val |= DMA_TLB_WRITE_DRAIN;
1048 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1049 /* Note: Only uses first TLB reg currently */
1051 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1052 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1054 /* Make sure hardware complete it */
1055 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1056 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1058 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1060 /* check IOTLB invalidation granularity */
1061 if (DMA_TLB_IAIG(val) == 0)
1062 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1063 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1064 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1065 (unsigned long long)DMA_TLB_IIRG(type),
1066 (unsigned long long)DMA_TLB_IAIG(val));
1069 static struct device_domain_info *iommu_support_dev_iotlb(
1070 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1073 unsigned long flags;
1074 struct device_domain_info *info;
1075 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1077 if (!ecap_dev_iotlb_support(iommu->ecap))
1083 spin_lock_irqsave(&device_domain_lock, flags);
1084 list_for_each_entry(info, &domain->devices, link)
1085 if (info->bus == bus && info->devfn == devfn) {
1089 spin_unlock_irqrestore(&device_domain_lock, flags);
1091 if (!found || !info->dev)
1094 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1097 if (!dmar_find_matched_atsr_unit(info->dev))
1100 info->iommu = iommu;
1105 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1110 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1113 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1115 if (!info->dev || !pci_ats_enabled(info->dev))
1118 pci_disable_ats(info->dev);
1121 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1122 u64 addr, unsigned mask)
1125 unsigned long flags;
1126 struct device_domain_info *info;
1128 spin_lock_irqsave(&device_domain_lock, flags);
1129 list_for_each_entry(info, &domain->devices, link) {
1130 if (!info->dev || !pci_ats_enabled(info->dev))
1133 sid = info->bus << 8 | info->devfn;
1134 qdep = pci_ats_queue_depth(info->dev);
1135 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1137 spin_unlock_irqrestore(&device_domain_lock, flags);
1140 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1141 unsigned long pfn, unsigned int pages, int map)
1143 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1144 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1149 * Fallback to domain selective flush if no PSI support or the size is
1151 * PSI requires page size to be 2 ^ x, and the base address is naturally
1152 * aligned to the size
1154 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1155 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1158 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1162 * In caching mode, changes of pages from non-present to present require
1163 * flush. However, device IOTLB doesn't need to be flushed in this case.
1165 if (!cap_caching_mode(iommu->cap) || !map)
1166 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1169 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1172 unsigned long flags;
1174 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1175 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1176 pmen &= ~DMA_PMEN_EPM;
1177 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1179 /* wait for the protected region status bit to clear */
1180 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1181 readl, !(pmen & DMA_PMEN_PRS), pmen);
1183 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1186 static int iommu_enable_translation(struct intel_iommu *iommu)
1189 unsigned long flags;
1191 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1192 iommu->gcmd |= DMA_GCMD_TE;
1193 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1195 /* Make sure hardware complete it */
1196 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1197 readl, (sts & DMA_GSTS_TES), sts);
1199 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1203 static int iommu_disable_translation(struct intel_iommu *iommu)
1208 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1209 iommu->gcmd &= ~DMA_GCMD_TE;
1210 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1212 /* Make sure hardware complete it */
1213 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1214 readl, (!(sts & DMA_GSTS_TES)), sts);
1216 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1221 static int iommu_init_domains(struct intel_iommu *iommu)
1223 unsigned long ndomains;
1224 unsigned long nlongs;
1226 ndomains = cap_ndoms(iommu->cap);
1227 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1229 nlongs = BITS_TO_LONGS(ndomains);
1231 spin_lock_init(&iommu->lock);
1233 /* TBD: there might be 64K domains,
1234 * consider other allocation for future chip
1236 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1237 if (!iommu->domain_ids) {
1238 printk(KERN_ERR "Allocating domain id array failed\n");
1241 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1243 if (!iommu->domains) {
1244 printk(KERN_ERR "Allocating domain array failed\n");
1249 * if Caching mode is set, then invalid translations are tagged
1250 * with domainid 0. Hence we need to pre-allocate it.
1252 if (cap_caching_mode(iommu->cap))
1253 set_bit(0, iommu->domain_ids);
1258 static void domain_exit(struct dmar_domain *domain);
1259 static void vm_domain_exit(struct dmar_domain *domain);
1261 void free_dmar_iommu(struct intel_iommu *iommu)
1263 struct dmar_domain *domain;
1265 unsigned long flags;
1267 if ((iommu->domains) && (iommu->domain_ids)) {
1268 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1269 domain = iommu->domains[i];
1270 clear_bit(i, iommu->domain_ids);
1272 spin_lock_irqsave(&domain->iommu_lock, flags);
1273 if (--domain->iommu_count == 0) {
1274 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1275 vm_domain_exit(domain);
1277 domain_exit(domain);
1279 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1283 if (iommu->gcmd & DMA_GCMD_TE)
1284 iommu_disable_translation(iommu);
1287 irq_set_handler_data(iommu->irq, NULL);
1288 /* This will mask the irq */
1289 free_irq(iommu->irq, iommu);
1290 destroy_irq(iommu->irq);
1293 kfree(iommu->domains);
1294 kfree(iommu->domain_ids);
1296 g_iommus[iommu->seq_id] = NULL;
1298 /* if all iommus are freed, free g_iommus */
1299 for (i = 0; i < g_num_of_iommus; i++) {
1304 if (i == g_num_of_iommus)
1307 /* free context mapping */
1308 free_context_table(iommu);
1311 static struct dmar_domain *alloc_domain(void)
1313 struct dmar_domain *domain;
1315 domain = alloc_domain_mem();
1320 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1326 static int iommu_attach_domain(struct dmar_domain *domain,
1327 struct intel_iommu *iommu)
1330 unsigned long ndomains;
1331 unsigned long flags;
1333 ndomains = cap_ndoms(iommu->cap);
1335 spin_lock_irqsave(&iommu->lock, flags);
1337 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1338 if (num >= ndomains) {
1339 spin_unlock_irqrestore(&iommu->lock, flags);
1340 printk(KERN_ERR "IOMMU: no free domain ids\n");
1345 set_bit(num, iommu->domain_ids);
1346 set_bit(iommu->seq_id, &domain->iommu_bmp);
1347 iommu->domains[num] = domain;
1348 spin_unlock_irqrestore(&iommu->lock, flags);
1353 static void iommu_detach_domain(struct dmar_domain *domain,
1354 struct intel_iommu *iommu)
1356 unsigned long flags;
1360 spin_lock_irqsave(&iommu->lock, flags);
1361 ndomains = cap_ndoms(iommu->cap);
1362 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1363 if (iommu->domains[num] == domain) {
1370 clear_bit(num, iommu->domain_ids);
1371 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1372 iommu->domains[num] = NULL;
1374 spin_unlock_irqrestore(&iommu->lock, flags);
1377 static struct iova_domain reserved_iova_list;
1378 static struct lock_class_key reserved_rbtree_key;
1380 static int dmar_init_reserved_ranges(void)
1382 struct pci_dev *pdev = NULL;
1386 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1388 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1389 &reserved_rbtree_key);
1391 /* IOAPIC ranges shouldn't be accessed by DMA */
1392 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1393 IOVA_PFN(IOAPIC_RANGE_END));
1395 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1399 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1400 for_each_pci_dev(pdev) {
1403 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1404 r = &pdev->resource[i];
1405 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1407 iova = reserve_iova(&reserved_iova_list,
1411 printk(KERN_ERR "Reserve iova failed\n");
1419 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1421 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1424 static inline int guestwidth_to_adjustwidth(int gaw)
1427 int r = (gaw - 12) % 9;
1438 static int domain_init(struct dmar_domain *domain, int guest_width)
1440 struct intel_iommu *iommu;
1441 int adjust_width, agaw;
1442 unsigned long sagaw;
1444 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1445 spin_lock_init(&domain->iommu_lock);
1447 domain_reserve_special_ranges(domain);
1449 /* calculate AGAW */
1450 iommu = domain_get_iommu(domain);
1451 if (guest_width > cap_mgaw(iommu->cap))
1452 guest_width = cap_mgaw(iommu->cap);
1453 domain->gaw = guest_width;
1454 adjust_width = guestwidth_to_adjustwidth(guest_width);
1455 agaw = width_to_agaw(adjust_width);
1456 sagaw = cap_sagaw(iommu->cap);
1457 if (!test_bit(agaw, &sagaw)) {
1458 /* hardware doesn't support it, choose a bigger one */
1459 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1460 agaw = find_next_bit(&sagaw, 5, agaw);
1464 domain->agaw = agaw;
1465 INIT_LIST_HEAD(&domain->devices);
1467 if (ecap_coherent(iommu->ecap))
1468 domain->iommu_coherency = 1;
1470 domain->iommu_coherency = 0;
1472 if (ecap_sc_support(iommu->ecap))
1473 domain->iommu_snooping = 1;
1475 domain->iommu_snooping = 0;
1477 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1478 domain->iommu_count = 1;
1479 domain->nid = iommu->node;
1481 /* always allocate the top pgd */
1482 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1485 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1489 static void domain_exit(struct dmar_domain *domain)
1491 struct dmar_drhd_unit *drhd;
1492 struct intel_iommu *iommu;
1494 /* Domain 0 is reserved, so dont process it */
1498 /* Flush any lazy unmaps that may reference this domain */
1499 if (!intel_iommu_strict)
1500 flush_unmaps_timeout(0);
1502 domain_remove_dev_info(domain);
1504 put_iova_domain(&domain->iovad);
1507 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1509 /* free page tables */
1510 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1512 for_each_active_iommu(iommu, drhd)
1513 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1514 iommu_detach_domain(domain, iommu);
1516 free_domain_mem(domain);
1519 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1520 u8 bus, u8 devfn, int translation)
1522 struct context_entry *context;
1523 unsigned long flags;
1524 struct intel_iommu *iommu;
1525 struct dma_pte *pgd;
1527 unsigned long ndomains;
1530 struct device_domain_info *info = NULL;
1532 pr_debug("Set context mapping for %02x:%02x.%d\n",
1533 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1535 BUG_ON(!domain->pgd);
1536 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1537 translation != CONTEXT_TT_MULTI_LEVEL);
1539 iommu = device_to_iommu(segment, bus, devfn);
1543 context = device_to_context_entry(iommu, bus, devfn);
1546 spin_lock_irqsave(&iommu->lock, flags);
1547 if (context_present(context)) {
1548 spin_unlock_irqrestore(&iommu->lock, flags);
1555 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1556 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1559 /* find an available domain id for this device in iommu */
1560 ndomains = cap_ndoms(iommu->cap);
1561 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1562 if (iommu->domains[num] == domain) {
1570 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1571 if (num >= ndomains) {
1572 spin_unlock_irqrestore(&iommu->lock, flags);
1573 printk(KERN_ERR "IOMMU: no free domain ids\n");
1577 set_bit(num, iommu->domain_ids);
1578 iommu->domains[num] = domain;
1582 /* Skip top levels of page tables for
1583 * iommu which has less agaw than default.
1584 * Unnecessary for PT mode.
1586 if (translation != CONTEXT_TT_PASS_THROUGH) {
1587 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1588 pgd = phys_to_virt(dma_pte_addr(pgd));
1589 if (!dma_pte_present(pgd)) {
1590 spin_unlock_irqrestore(&iommu->lock, flags);
1597 context_set_domain_id(context, id);
1599 if (translation != CONTEXT_TT_PASS_THROUGH) {
1600 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1601 translation = info ? CONTEXT_TT_DEV_IOTLB :
1602 CONTEXT_TT_MULTI_LEVEL;
1605 * In pass through mode, AW must be programmed to indicate the largest
1606 * AGAW value supported by hardware. And ASR is ignored by hardware.
1608 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1609 context_set_address_width(context, iommu->msagaw);
1611 context_set_address_root(context, virt_to_phys(pgd));
1612 context_set_address_width(context, iommu->agaw);
1615 context_set_translation_type(context, translation);
1616 context_set_fault_enable(context);
1617 context_set_present(context);
1618 domain_flush_cache(domain, context, sizeof(*context));
1621 * It's a non-present to present mapping. If hardware doesn't cache
1622 * non-present entry we only need to flush the write-buffer. If the
1623 * _does_ cache non-present entries, then it does so in the special
1624 * domain #0, which we have to flush:
1626 if (cap_caching_mode(iommu->cap)) {
1627 iommu->flush.flush_context(iommu, 0,
1628 (((u16)bus) << 8) | devfn,
1629 DMA_CCMD_MASK_NOBIT,
1630 DMA_CCMD_DEVICE_INVL);
1631 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1633 iommu_flush_write_buffer(iommu);
1635 iommu_enable_dev_iotlb(info);
1636 spin_unlock_irqrestore(&iommu->lock, flags);
1638 spin_lock_irqsave(&domain->iommu_lock, flags);
1639 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1640 domain->iommu_count++;
1641 if (domain->iommu_count == 1)
1642 domain->nid = iommu->node;
1643 domain_update_iommu_cap(domain);
1645 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1650 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1654 struct pci_dev *tmp, *parent;
1656 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1657 pdev->bus->number, pdev->devfn,
1662 /* dependent device mapping */
1663 tmp = pci_find_upstream_pcie_bridge(pdev);
1666 /* Secondary interface's bus number and devfn 0 */
1667 parent = pdev->bus->self;
1668 while (parent != tmp) {
1669 ret = domain_context_mapping_one(domain,
1670 pci_domain_nr(parent->bus),
1671 parent->bus->number,
1672 parent->devfn, translation);
1675 parent = parent->bus->self;
1677 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1678 return domain_context_mapping_one(domain,
1679 pci_domain_nr(tmp->subordinate),
1680 tmp->subordinate->number, 0,
1682 else /* this is a legacy PCI bridge */
1683 return domain_context_mapping_one(domain,
1684 pci_domain_nr(tmp->bus),
1690 static int domain_context_mapped(struct pci_dev *pdev)
1693 struct pci_dev *tmp, *parent;
1694 struct intel_iommu *iommu;
1696 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1701 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1704 /* dependent device mapping */
1705 tmp = pci_find_upstream_pcie_bridge(pdev);
1708 /* Secondary interface's bus number and devfn 0 */
1709 parent = pdev->bus->self;
1710 while (parent != tmp) {
1711 ret = device_context_mapped(iommu, parent->bus->number,
1715 parent = parent->bus->self;
1717 if (pci_is_pcie(tmp))
1718 return device_context_mapped(iommu, tmp->subordinate->number,
1721 return device_context_mapped(iommu, tmp->bus->number,
1725 /* Returns a number of VTD pages, but aligned to MM page size */
1726 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1729 host_addr &= ~PAGE_MASK;
1730 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1733 /* Return largest possible superpage level for a given mapping */
1734 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1735 unsigned long iov_pfn,
1736 unsigned long phy_pfn,
1737 unsigned long pages)
1739 int support, level = 1;
1740 unsigned long pfnmerge;
1742 support = domain->iommu_superpage;
1744 /* To use a large page, the virtual *and* physical addresses
1745 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1746 of them will mean we have to use smaller pages. So just
1747 merge them and check both at once. */
1748 pfnmerge = iov_pfn | phy_pfn;
1750 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1751 pages >>= VTD_STRIDE_SHIFT;
1754 pfnmerge >>= VTD_STRIDE_SHIFT;
1761 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1762 struct scatterlist *sg, unsigned long phys_pfn,
1763 unsigned long nr_pages, int prot)
1765 struct dma_pte *first_pte = NULL, *pte = NULL;
1766 phys_addr_t uninitialized_var(pteval);
1767 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1768 unsigned long sg_res;
1769 unsigned int largepage_lvl = 0;
1770 unsigned long lvl_pages = 0;
1772 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1774 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1777 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1782 sg_res = nr_pages + 1;
1783 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1786 while (nr_pages > 0) {
1790 sg_res = aligned_nrpages(sg->offset, sg->length);
1791 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1792 sg->dma_length = sg->length;
1793 pteval = page_to_phys(sg_page(sg)) | prot;
1794 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1798 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1800 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1803 /* It is large page*/
1804 if (largepage_lvl > 1) {
1805 pteval |= DMA_PTE_LARGE_PAGE;
1806 /* Ensure that old small page tables are removed to make room
1807 for superpage, if they exist. */
1808 dma_pte_clear_range(domain, iov_pfn,
1809 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1810 dma_pte_free_pagetable(domain, iov_pfn,
1811 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1813 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1817 /* We don't need lock here, nobody else
1818 * touches the iova range
1820 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1822 static int dumps = 5;
1823 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1824 iov_pfn, tmp, (unsigned long long)pteval);
1827 debug_dma_dump_mappings(NULL);
1832 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1834 BUG_ON(nr_pages < lvl_pages);
1835 BUG_ON(sg_res < lvl_pages);
1837 nr_pages -= lvl_pages;
1838 iov_pfn += lvl_pages;
1839 phys_pfn += lvl_pages;
1840 pteval += lvl_pages * VTD_PAGE_SIZE;
1841 sg_res -= lvl_pages;
1843 /* If the next PTE would be the first in a new page, then we
1844 need to flush the cache on the entries we've just written.
1845 And then we'll need to recalculate 'pte', so clear it and
1846 let it get set again in the if (!pte) block above.
1848 If we're done (!nr_pages) we need to flush the cache too.
1850 Also if we've been setting superpages, we may need to
1851 recalculate 'pte' and switch back to smaller pages for the
1852 end of the mapping, if the trailing size is not enough to
1853 use another superpage (i.e. sg_res < lvl_pages). */
1855 if (!nr_pages || first_pte_in_page(pte) ||
1856 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1857 domain_flush_cache(domain, first_pte,
1858 (void *)pte - (void *)first_pte);
1862 if (!sg_res && nr_pages)
1868 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1869 struct scatterlist *sg, unsigned long nr_pages,
1872 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1875 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1876 unsigned long phys_pfn, unsigned long nr_pages,
1879 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1882 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1887 clear_context_table(iommu, bus, devfn);
1888 iommu->flush.flush_context(iommu, 0, 0, 0,
1889 DMA_CCMD_GLOBAL_INVL);
1890 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1893 static void domain_remove_dev_info(struct dmar_domain *domain)
1895 struct device_domain_info *info;
1896 unsigned long flags;
1897 struct intel_iommu *iommu;
1899 spin_lock_irqsave(&device_domain_lock, flags);
1900 while (!list_empty(&domain->devices)) {
1901 info = list_entry(domain->devices.next,
1902 struct device_domain_info, link);
1903 list_del(&info->link);
1904 list_del(&info->global);
1906 info->dev->dev.archdata.iommu = NULL;
1907 spin_unlock_irqrestore(&device_domain_lock, flags);
1909 iommu_disable_dev_iotlb(info);
1910 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1911 iommu_detach_dev(iommu, info->bus, info->devfn);
1912 free_devinfo_mem(info);
1914 spin_lock_irqsave(&device_domain_lock, flags);
1916 spin_unlock_irqrestore(&device_domain_lock, flags);
1921 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1923 static struct dmar_domain *
1924 find_domain(struct pci_dev *pdev)
1926 struct device_domain_info *info;
1928 /* No lock here, assumes no domain exit in normal case */
1929 info = pdev->dev.archdata.iommu;
1931 return info->domain;
1935 /* domain is initialized */
1936 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1938 struct dmar_domain *domain, *found = NULL;
1939 struct intel_iommu *iommu;
1940 struct dmar_drhd_unit *drhd;
1941 struct device_domain_info *info, *tmp;
1942 struct pci_dev *dev_tmp;
1943 unsigned long flags;
1944 int bus = 0, devfn = 0;
1948 domain = find_domain(pdev);
1952 segment = pci_domain_nr(pdev->bus);
1954 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1956 if (pci_is_pcie(dev_tmp)) {
1957 bus = dev_tmp->subordinate->number;
1960 bus = dev_tmp->bus->number;
1961 devfn = dev_tmp->devfn;
1963 spin_lock_irqsave(&device_domain_lock, flags);
1964 list_for_each_entry(info, &device_domain_list, global) {
1965 if (info->segment == segment &&
1966 info->bus == bus && info->devfn == devfn) {
1967 found = info->domain;
1971 spin_unlock_irqrestore(&device_domain_lock, flags);
1972 /* pcie-pci bridge already has a domain, uses it */
1979 domain = alloc_domain();
1983 /* Allocate new domain for the device */
1984 drhd = dmar_find_matched_drhd_unit(pdev);
1986 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1990 iommu = drhd->iommu;
1992 ret = iommu_attach_domain(domain, iommu);
1994 free_domain_mem(domain);
1998 if (domain_init(domain, gaw)) {
1999 domain_exit(domain);
2003 /* register pcie-to-pci device */
2005 info = alloc_devinfo_mem();
2007 domain_exit(domain);
2010 info->segment = segment;
2012 info->devfn = devfn;
2014 info->domain = domain;
2015 /* This domain is shared by devices under p2p bridge */
2016 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2018 /* pcie-to-pci bridge already has a domain, uses it */
2020 spin_lock_irqsave(&device_domain_lock, flags);
2021 list_for_each_entry(tmp, &device_domain_list, global) {
2022 if (tmp->segment == segment &&
2023 tmp->bus == bus && tmp->devfn == devfn) {
2024 found = tmp->domain;
2029 spin_unlock_irqrestore(&device_domain_lock, flags);
2030 free_devinfo_mem(info);
2031 domain_exit(domain);
2034 list_add(&info->link, &domain->devices);
2035 list_add(&info->global, &device_domain_list);
2036 spin_unlock_irqrestore(&device_domain_lock, flags);
2041 info = alloc_devinfo_mem();
2044 info->segment = segment;
2045 info->bus = pdev->bus->number;
2046 info->devfn = pdev->devfn;
2048 info->domain = domain;
2049 spin_lock_irqsave(&device_domain_lock, flags);
2050 /* somebody is fast */
2051 found = find_domain(pdev);
2052 if (found != NULL) {
2053 spin_unlock_irqrestore(&device_domain_lock, flags);
2054 if (found != domain) {
2055 domain_exit(domain);
2058 free_devinfo_mem(info);
2061 list_add(&info->link, &domain->devices);
2062 list_add(&info->global, &device_domain_list);
2063 pdev->dev.archdata.iommu = info;
2064 spin_unlock_irqrestore(&device_domain_lock, flags);
2067 /* recheck it here, maybe others set it */
2068 return find_domain(pdev);
2071 static int iommu_identity_mapping;
2072 #define IDENTMAP_ALL 1
2073 #define IDENTMAP_GFX 2
2074 #define IDENTMAP_AZALIA 4
2076 static int iommu_domain_identity_map(struct dmar_domain *domain,
2077 unsigned long long start,
2078 unsigned long long end)
2080 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2081 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2083 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2084 dma_to_mm_pfn(last_vpfn))) {
2085 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2089 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2090 start, end, domain->id);
2092 * RMRR range might have overlap with physical memory range,
2095 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2097 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2098 last_vpfn - first_vpfn + 1,
2099 DMA_PTE_READ|DMA_PTE_WRITE);
2102 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2103 unsigned long long start,
2104 unsigned long long end)
2106 struct dmar_domain *domain;
2109 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2113 /* For _hardware_ passthrough, don't bother. But for software
2114 passthrough, we do it anyway -- it may indicate a memory
2115 range which is reserved in E820, so which didn't get set
2116 up to start with in si_domain */
2117 if (domain == si_domain && hw_pass_through) {
2118 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2119 pci_name(pdev), start, end);
2124 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2125 pci_name(pdev), start, end);
2128 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2129 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2130 dmi_get_system_info(DMI_BIOS_VENDOR),
2131 dmi_get_system_info(DMI_BIOS_VERSION),
2132 dmi_get_system_info(DMI_PRODUCT_VERSION));
2137 if (end >> agaw_to_width(domain->agaw)) {
2138 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2139 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2140 agaw_to_width(domain->agaw),
2141 dmi_get_system_info(DMI_BIOS_VENDOR),
2142 dmi_get_system_info(DMI_BIOS_VERSION),
2143 dmi_get_system_info(DMI_PRODUCT_VERSION));
2148 ret = iommu_domain_identity_map(domain, start, end);
2152 /* context entry init */
2153 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2160 domain_exit(domain);
2164 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2165 struct pci_dev *pdev)
2167 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2169 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2173 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2174 static inline void iommu_prepare_isa(void)
2176 struct pci_dev *pdev;
2179 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2183 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2184 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2187 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2188 "floppy might not work\n");
2192 static inline void iommu_prepare_isa(void)
2196 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2198 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2200 static int __init si_domain_work_fn(unsigned long start_pfn,
2201 unsigned long end_pfn, void *datax)
2205 *ret = iommu_domain_identity_map(si_domain,
2206 (uint64_t)start_pfn << PAGE_SHIFT,
2207 (uint64_t)end_pfn << PAGE_SHIFT);
2212 static int __init si_domain_init(int hw)
2214 struct dmar_drhd_unit *drhd;
2215 struct intel_iommu *iommu;
2218 si_domain = alloc_domain();
2222 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2224 for_each_active_iommu(iommu, drhd) {
2225 ret = iommu_attach_domain(si_domain, iommu);
2227 domain_exit(si_domain);
2232 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2233 domain_exit(si_domain);
2237 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2242 for_each_online_node(nid) {
2243 work_with_active_regions(nid, si_domain_work_fn, &ret);
2251 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2252 struct pci_dev *pdev);
2253 static int identity_mapping(struct pci_dev *pdev)
2255 struct device_domain_info *info;
2257 if (likely(!iommu_identity_mapping))
2260 info = pdev->dev.archdata.iommu;
2261 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2262 return (info->domain == si_domain);
2267 static int domain_add_dev_info(struct dmar_domain *domain,
2268 struct pci_dev *pdev,
2271 struct device_domain_info *info;
2272 unsigned long flags;
2275 info = alloc_devinfo_mem();
2279 info->segment = pci_domain_nr(pdev->bus);
2280 info->bus = pdev->bus->number;
2281 info->devfn = pdev->devfn;
2283 info->domain = domain;
2285 spin_lock_irqsave(&device_domain_lock, flags);
2286 list_add(&info->link, &domain->devices);
2287 list_add(&info->global, &device_domain_list);
2288 pdev->dev.archdata.iommu = info;
2289 spin_unlock_irqrestore(&device_domain_lock, flags);
2291 ret = domain_context_mapping(domain, pdev, translation);
2293 spin_lock_irqsave(&device_domain_lock, flags);
2294 list_del(&info->link);
2295 list_del(&info->global);
2296 pdev->dev.archdata.iommu = NULL;
2297 spin_unlock_irqrestore(&device_domain_lock, flags);
2298 free_devinfo_mem(info);
2305 static bool device_has_rmrr(struct pci_dev *dev)
2307 struct dmar_rmrr_unit *rmrr;
2310 for_each_rmrr_units(rmrr) {
2311 for (i = 0; i < rmrr->devices_cnt; i++) {
2313 * Return TRUE if this RMRR contains the device that
2316 if (rmrr->devices[i] == dev)
2323 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2327 * We want to prevent any device associated with an RMRR from
2328 * getting placed into the SI Domain. This is done because
2329 * problems exist when devices are moved in and out of domains
2330 * and their respective RMRR info is lost. We exempt USB devices
2331 * from this process due to their usage of RMRRs that are known
2332 * to not be needed after BIOS hand-off to OS.
2334 if (device_has_rmrr(pdev) &&
2335 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2338 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2341 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2344 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2348 * We want to start off with all devices in the 1:1 domain, and
2349 * take them out later if we find they can't access all of memory.
2351 * However, we can't do this for PCI devices behind bridges,
2352 * because all PCI devices behind the same bridge will end up
2353 * with the same source-id on their transactions.
2355 * Practically speaking, we can't change things around for these
2356 * devices at run-time, because we can't be sure there'll be no
2357 * DMA transactions in flight for any of their siblings.
2359 * So PCI devices (unless they're on the root bus) as well as
2360 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2361 * the 1:1 domain, just in _case_ one of their siblings turns out
2362 * not to be able to map all of memory.
2364 if (!pci_is_pcie(pdev)) {
2365 if (!pci_is_root_bus(pdev->bus))
2367 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2369 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2373 * At boot time, we don't yet know if devices will be 64-bit capable.
2374 * Assume that they will -- if they turn out not to be, then we can
2375 * take them out of the 1:1 domain later.
2379 * If the device's dma_mask is less than the system's memory
2380 * size then this is not a candidate for identity mapping.
2382 u64 dma_mask = pdev->dma_mask;
2384 if (pdev->dev.coherent_dma_mask &&
2385 pdev->dev.coherent_dma_mask < dma_mask)
2386 dma_mask = pdev->dev.coherent_dma_mask;
2388 return dma_mask >= dma_get_required_mask(&pdev->dev);
2394 static int __init iommu_prepare_static_identity_mapping(int hw)
2396 struct pci_dev *pdev = NULL;
2399 ret = si_domain_init(hw);
2403 for_each_pci_dev(pdev) {
2404 /* Skip Host/PCI Bridge devices */
2405 if (IS_BRIDGE_HOST_DEVICE(pdev))
2407 if (iommu_should_identity_map(pdev, 1)) {
2408 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2409 hw ? "hardware" : "software", pci_name(pdev));
2411 ret = domain_add_dev_info(si_domain, pdev,
2412 hw ? CONTEXT_TT_PASS_THROUGH :
2413 CONTEXT_TT_MULTI_LEVEL);
2422 static int __init init_dmars(void)
2424 struct dmar_drhd_unit *drhd;
2425 struct dmar_rmrr_unit *rmrr;
2426 struct pci_dev *pdev;
2427 struct intel_iommu *iommu;
2433 * initialize and program root entry to not present
2436 for_each_drhd_unit(drhd) {
2439 * lock not needed as this is only incremented in the single
2440 * threaded kernel __init code path all other access are read
2445 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2448 printk(KERN_ERR "Allocating global iommu array failed\n");
2453 deferred_flush = kzalloc(g_num_of_iommus *
2454 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2455 if (!deferred_flush) {
2460 for_each_drhd_unit(drhd) {
2464 iommu = drhd->iommu;
2465 g_iommus[iommu->seq_id] = iommu;
2467 ret = iommu_init_domains(iommu);
2473 * we could share the same root & context tables
2474 * among all IOMMU's. Need to Split it later.
2476 ret = iommu_alloc_root_entry(iommu);
2478 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2481 if (!ecap_pass_through(iommu->ecap))
2482 hw_pass_through = 0;
2486 * Start from the sane iommu hardware state.
2488 for_each_drhd_unit(drhd) {
2492 iommu = drhd->iommu;
2495 * If the queued invalidation is already initialized by us
2496 * (for example, while enabling interrupt-remapping) then
2497 * we got the things already rolling from a sane state.
2503 * Clear any previous faults.
2505 dmar_fault(-1, iommu);
2507 * Disable queued invalidation if supported and already enabled
2508 * before OS handover.
2510 dmar_disable_qi(iommu);
2513 for_each_drhd_unit(drhd) {
2517 iommu = drhd->iommu;
2519 if (dmar_enable_qi(iommu)) {
2521 * Queued Invalidate not enabled, use Register Based
2524 iommu->flush.flush_context = __iommu_flush_context;
2525 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2526 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2529 (unsigned long long)drhd->reg_base_addr);
2531 iommu->flush.flush_context = qi_flush_context;
2532 iommu->flush.flush_iotlb = qi_flush_iotlb;
2533 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2536 (unsigned long long)drhd->reg_base_addr);
2540 if (iommu_pass_through)
2541 iommu_identity_mapping |= IDENTMAP_ALL;
2543 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2544 iommu_identity_mapping |= IDENTMAP_GFX;
2547 check_tylersburg_isoch();
2550 * If pass through is not set or not enabled, setup context entries for
2551 * identity mappings for rmrr, gfx, and isa and may fall back to static
2552 * identity mapping if iommu_identity_mapping is set.
2554 if (iommu_identity_mapping) {
2555 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2557 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2563 * for each dev attached to rmrr
2565 * locate drhd for dev, alloc domain for dev
2566 * allocate free domain
2567 * allocate page table entries for rmrr
2568 * if context not allocated for bus
2569 * allocate and init context
2570 * set present in root table for this bus
2571 * init context with domain, translation etc
2575 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2576 for_each_rmrr_units(rmrr) {
2577 for (i = 0; i < rmrr->devices_cnt; i++) {
2578 pdev = rmrr->devices[i];
2580 * some BIOS lists non-exist devices in DMAR
2585 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2588 "IOMMU: mapping reserved region failed\n");
2592 iommu_prepare_isa();
2597 * global invalidate context cache
2598 * global invalidate iotlb
2599 * enable translation
2601 for_each_drhd_unit(drhd) {
2602 if (drhd->ignored) {
2604 * we always have to disable PMRs or DMA may fail on
2608 iommu_disable_protect_mem_regions(drhd->iommu);
2611 iommu = drhd->iommu;
2613 iommu_flush_write_buffer(iommu);
2615 ret = dmar_set_interrupt(iommu);
2619 iommu_set_root_entry(iommu);
2621 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2622 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2624 ret = iommu_enable_translation(iommu);
2628 iommu_disable_protect_mem_regions(iommu);
2633 for_each_drhd_unit(drhd) {
2636 iommu = drhd->iommu;
2643 /* This takes a number of _MM_ pages, not VTD pages */
2644 static struct iova *intel_alloc_iova(struct device *dev,
2645 struct dmar_domain *domain,
2646 unsigned long nrpages, uint64_t dma_mask)
2648 struct pci_dev *pdev = to_pci_dev(dev);
2649 struct iova *iova = NULL;
2651 /* Restrict dma_mask to the width that the iommu can handle */
2652 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2654 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2656 * First try to allocate an io virtual address in
2657 * DMA_BIT_MASK(32) and if that fails then try allocating
2660 iova = alloc_iova(&domain->iovad, nrpages,
2661 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2665 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2666 if (unlikely(!iova)) {
2667 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2668 nrpages, pci_name(pdev));
2675 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2677 struct dmar_domain *domain;
2680 domain = get_domain_for_dev(pdev,
2681 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2684 "Allocating domain for %s failed", pci_name(pdev));
2688 /* make sure context mapping is ok */
2689 if (unlikely(!domain_context_mapped(pdev))) {
2690 ret = domain_context_mapping(domain, pdev,
2691 CONTEXT_TT_MULTI_LEVEL);
2694 "Domain context map for %s failed",
2703 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2705 struct device_domain_info *info;
2707 /* No lock here, assumes no domain exit in normal case */
2708 info = dev->dev.archdata.iommu;
2710 return info->domain;
2712 return __get_valid_domain_for_dev(dev);
2715 static int iommu_dummy(struct pci_dev *pdev)
2717 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2720 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2721 static int iommu_no_mapping(struct device *dev)
2723 struct pci_dev *pdev;
2726 if (unlikely(dev->bus != &pci_bus_type))
2729 pdev = to_pci_dev(dev);
2730 if (iommu_dummy(pdev))
2733 if (!iommu_identity_mapping)
2736 found = identity_mapping(pdev);
2738 if (iommu_should_identity_map(pdev, 0))
2742 * 32 bit DMA is removed from si_domain and fall back
2743 * to non-identity mapping.
2745 domain_remove_one_dev_info(si_domain, pdev);
2746 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2752 * In case of a detached 64 bit DMA device from vm, the device
2753 * is put into si_domain for identity mapping.
2755 if (iommu_should_identity_map(pdev, 0)) {
2757 ret = domain_add_dev_info(si_domain, pdev,
2759 CONTEXT_TT_PASS_THROUGH :
2760 CONTEXT_TT_MULTI_LEVEL);
2762 printk(KERN_INFO "64bit %s uses identity mapping\n",
2772 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2773 size_t size, int dir, u64 dma_mask)
2775 struct pci_dev *pdev = to_pci_dev(hwdev);
2776 struct dmar_domain *domain;
2777 phys_addr_t start_paddr;
2781 struct intel_iommu *iommu;
2782 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2784 BUG_ON(dir == DMA_NONE);
2786 if (iommu_no_mapping(hwdev))
2789 domain = get_valid_domain_for_dev(pdev);
2793 iommu = domain_get_iommu(domain);
2794 size = aligned_nrpages(paddr, size);
2796 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2801 * Check if DMAR supports zero-length reads on write only
2804 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2805 !cap_zlr(iommu->cap))
2806 prot |= DMA_PTE_READ;
2807 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2808 prot |= DMA_PTE_WRITE;
2810 * paddr - (paddr + size) might be partial page, we should map the whole
2811 * page. Note: if two part of one page are separately mapped, we
2812 * might have two guest_addr mapping to the same host paddr, but this
2813 * is not a big problem
2815 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2816 mm_to_dma_pfn(paddr_pfn), size, prot);
2820 /* it's a non-present to present mapping. Only flush if caching mode */
2821 if (cap_caching_mode(iommu->cap))
2822 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2824 iommu_flush_write_buffer(iommu);
2826 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2827 start_paddr += paddr & ~PAGE_MASK;
2832 __free_iova(&domain->iovad, iova);
2833 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2834 pci_name(pdev), size, (unsigned long long)paddr, dir);
2838 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2839 unsigned long offset, size_t size,
2840 enum dma_data_direction dir,
2841 struct dma_attrs *attrs)
2843 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2844 dir, to_pci_dev(dev)->dma_mask);
2847 static void flush_unmaps(void)
2853 /* just flush them all */
2854 for (i = 0; i < g_num_of_iommus; i++) {
2855 struct intel_iommu *iommu = g_iommus[i];
2859 if (!deferred_flush[i].next)
2862 /* In caching mode, global flushes turn emulation expensive */
2863 if (!cap_caching_mode(iommu->cap))
2864 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2865 DMA_TLB_GLOBAL_FLUSH);
2866 for (j = 0; j < deferred_flush[i].next; j++) {
2868 struct iova *iova = deferred_flush[i].iova[j];
2869 struct dmar_domain *domain = deferred_flush[i].domain[j];
2871 /* On real hardware multiple invalidations are expensive */
2872 if (cap_caching_mode(iommu->cap))
2873 iommu_flush_iotlb_psi(iommu, domain->id,
2874 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2876 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2877 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2878 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2880 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2882 deferred_flush[i].next = 0;
2888 static void flush_unmaps_timeout(unsigned long data)
2890 unsigned long flags;
2892 spin_lock_irqsave(&async_umap_flush_lock, flags);
2894 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2897 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2899 unsigned long flags;
2901 struct intel_iommu *iommu;
2903 spin_lock_irqsave(&async_umap_flush_lock, flags);
2904 if (list_size == HIGH_WATER_MARK)
2907 iommu = domain_get_iommu(dom);
2908 iommu_id = iommu->seq_id;
2910 next = deferred_flush[iommu_id].next;
2911 deferred_flush[iommu_id].domain[next] = dom;
2912 deferred_flush[iommu_id].iova[next] = iova;
2913 deferred_flush[iommu_id].next++;
2916 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2920 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2923 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2924 size_t size, enum dma_data_direction dir,
2925 struct dma_attrs *attrs)
2927 struct pci_dev *pdev = to_pci_dev(dev);
2928 struct dmar_domain *domain;
2929 unsigned long start_pfn, last_pfn;
2931 struct intel_iommu *iommu;
2933 if (iommu_no_mapping(dev))
2936 domain = find_domain(pdev);
2939 iommu = domain_get_iommu(domain);
2941 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2942 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2943 (unsigned long long)dev_addr))
2946 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2947 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2949 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2950 pci_name(pdev), start_pfn, last_pfn);
2952 /* clear the whole page */
2953 dma_pte_clear_range(domain, start_pfn, last_pfn);
2955 /* free page tables */
2956 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2958 if (intel_iommu_strict) {
2959 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2960 last_pfn - start_pfn + 1, 0);
2962 __free_iova(&domain->iovad, iova);
2964 add_unmap(domain, iova);
2966 * queue up the release of the unmap to save the 1/6th of the
2967 * cpu used up by the iotlb flush operation...
2972 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2973 dma_addr_t *dma_handle, gfp_t flags)
2978 size = PAGE_ALIGN(size);
2979 order = get_order(size);
2981 if (!iommu_no_mapping(hwdev))
2982 flags &= ~(GFP_DMA | GFP_DMA32);
2983 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2984 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2990 vaddr = (void *)__get_free_pages(flags, order);
2993 memset(vaddr, 0, size);
2995 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2997 hwdev->coherent_dma_mask);
3000 free_pages((unsigned long)vaddr, order);
3004 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3005 dma_addr_t dma_handle)
3009 size = PAGE_ALIGN(size);
3010 order = get_order(size);
3012 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3013 free_pages((unsigned long)vaddr, order);
3016 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3017 int nelems, enum dma_data_direction dir,
3018 struct dma_attrs *attrs)
3020 struct pci_dev *pdev = to_pci_dev(hwdev);
3021 struct dmar_domain *domain;
3022 unsigned long start_pfn, last_pfn;
3024 struct intel_iommu *iommu;
3026 if (iommu_no_mapping(hwdev))
3029 domain = find_domain(pdev);
3032 iommu = domain_get_iommu(domain);
3034 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3035 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3036 (unsigned long long)sglist[0].dma_address))
3039 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3040 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3042 /* clear the whole page */
3043 dma_pte_clear_range(domain, start_pfn, last_pfn);
3045 /* free page tables */
3046 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3048 if (intel_iommu_strict) {
3049 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3050 last_pfn - start_pfn + 1, 0);
3052 __free_iova(&domain->iovad, iova);
3054 add_unmap(domain, iova);
3056 * queue up the release of the unmap to save the 1/6th of the
3057 * cpu used up by the iotlb flush operation...
3062 static int intel_nontranslate_map_sg(struct device *hddev,
3063 struct scatterlist *sglist, int nelems, int dir)
3066 struct scatterlist *sg;
3068 for_each_sg(sglist, sg, nelems, i) {
3069 BUG_ON(!sg_page(sg));
3070 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3071 sg->dma_length = sg->length;
3076 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3077 enum dma_data_direction dir, struct dma_attrs *attrs)
3080 struct pci_dev *pdev = to_pci_dev(hwdev);
3081 struct dmar_domain *domain;
3084 struct iova *iova = NULL;
3086 struct scatterlist *sg;
3087 unsigned long start_vpfn;
3088 struct intel_iommu *iommu;
3090 BUG_ON(dir == DMA_NONE);
3091 if (iommu_no_mapping(hwdev))
3092 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3094 domain = get_valid_domain_for_dev(pdev);
3098 iommu = domain_get_iommu(domain);
3100 for_each_sg(sglist, sg, nelems, i)
3101 size += aligned_nrpages(sg->offset, sg->length);
3103 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3106 sglist->dma_length = 0;
3111 * Check if DMAR supports zero-length reads on write only
3114 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3115 !cap_zlr(iommu->cap))
3116 prot |= DMA_PTE_READ;
3117 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3118 prot |= DMA_PTE_WRITE;
3120 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3122 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3123 if (unlikely(ret)) {
3124 /* clear the page */
3125 dma_pte_clear_range(domain, start_vpfn,
3126 start_vpfn + size - 1);
3127 /* free page tables */
3128 dma_pte_free_pagetable(domain, start_vpfn,
3129 start_vpfn + size - 1);
3131 __free_iova(&domain->iovad, iova);
3135 /* it's a non-present to present mapping. Only flush if caching mode */
3136 if (cap_caching_mode(iommu->cap))
3137 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3139 iommu_flush_write_buffer(iommu);
3144 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3149 struct dma_map_ops intel_dma_ops = {
3150 .alloc_coherent = intel_alloc_coherent,
3151 .free_coherent = intel_free_coherent,
3152 .map_sg = intel_map_sg,
3153 .unmap_sg = intel_unmap_sg,
3154 .map_page = intel_map_page,
3155 .unmap_page = intel_unmap_page,
3156 .mapping_error = intel_mapping_error,
3159 static inline int iommu_domain_cache_init(void)
3163 iommu_domain_cache = kmem_cache_create("iommu_domain",
3164 sizeof(struct dmar_domain),
3169 if (!iommu_domain_cache) {
3170 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3177 static inline int iommu_devinfo_cache_init(void)
3181 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3182 sizeof(struct device_domain_info),
3186 if (!iommu_devinfo_cache) {
3187 printk(KERN_ERR "Couldn't create devinfo cache\n");
3194 static inline int iommu_iova_cache_init(void)
3198 iommu_iova_cache = kmem_cache_create("iommu_iova",
3199 sizeof(struct iova),
3203 if (!iommu_iova_cache) {
3204 printk(KERN_ERR "Couldn't create iova cache\n");
3211 static int __init iommu_init_mempool(void)
3214 ret = iommu_iova_cache_init();
3218 ret = iommu_domain_cache_init();
3222 ret = iommu_devinfo_cache_init();
3226 kmem_cache_destroy(iommu_domain_cache);
3228 kmem_cache_destroy(iommu_iova_cache);
3233 static void __init iommu_exit_mempool(void)
3235 kmem_cache_destroy(iommu_devinfo_cache);
3236 kmem_cache_destroy(iommu_domain_cache);
3237 kmem_cache_destroy(iommu_iova_cache);
3241 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3243 struct dmar_drhd_unit *drhd;
3247 /* We know that this device on this chipset has its own IOMMU.
3248 * If we find it under a different IOMMU, then the BIOS is lying
3249 * to us. Hope that the IOMMU for this device is actually
3250 * disabled, and it needs no translation...
3252 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3254 /* "can't" happen */
3255 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3258 vtbar &= 0xffff0000;
3260 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3261 drhd = dmar_find_matched_drhd_unit(pdev);
3262 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3263 TAINT_FIRMWARE_WORKAROUND,
3264 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3265 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3267 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3269 static void __init init_no_remapping_devices(void)
3271 struct dmar_drhd_unit *drhd;
3273 for_each_drhd_unit(drhd) {
3274 if (!drhd->include_all) {
3276 for (i = 0; i < drhd->devices_cnt; i++)
3277 if (drhd->devices[i] != NULL)
3279 /* ignore DMAR unit if no pci devices exist */
3280 if (i == drhd->devices_cnt)
3285 for_each_drhd_unit(drhd) {
3287 if (drhd->ignored || drhd->include_all)
3290 for (i = 0; i < drhd->devices_cnt; i++)
3291 if (drhd->devices[i] &&
3292 !IS_GFX_DEVICE(drhd->devices[i]))
3295 if (i < drhd->devices_cnt)
3298 /* This IOMMU has *only* gfx devices. Either bypass it or
3299 set the gfx_mapped flag, as appropriate */
3301 intel_iommu_gfx_mapped = 1;
3304 for (i = 0; i < drhd->devices_cnt; i++) {
3305 if (!drhd->devices[i])
3307 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3313 #ifdef CONFIG_SUSPEND
3314 static int init_iommu_hw(void)
3316 struct dmar_drhd_unit *drhd;
3317 struct intel_iommu *iommu = NULL;
3319 for_each_active_iommu(iommu, drhd)
3321 dmar_reenable_qi(iommu);
3323 for_each_iommu(iommu, drhd) {
3324 if (drhd->ignored) {
3326 * we always have to disable PMRs or DMA may fail on
3330 iommu_disable_protect_mem_regions(iommu);
3334 iommu_flush_write_buffer(iommu);
3336 iommu_set_root_entry(iommu);
3338 iommu->flush.flush_context(iommu, 0, 0, 0,
3339 DMA_CCMD_GLOBAL_INVL);
3340 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3341 DMA_TLB_GLOBAL_FLUSH);
3342 if (iommu_enable_translation(iommu))
3344 iommu_disable_protect_mem_regions(iommu);
3350 static void iommu_flush_all(void)
3352 struct dmar_drhd_unit *drhd;
3353 struct intel_iommu *iommu;
3355 for_each_active_iommu(iommu, drhd) {
3356 iommu->flush.flush_context(iommu, 0, 0, 0,
3357 DMA_CCMD_GLOBAL_INVL);
3358 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3359 DMA_TLB_GLOBAL_FLUSH);
3363 static int iommu_suspend(void)
3365 struct dmar_drhd_unit *drhd;
3366 struct intel_iommu *iommu = NULL;
3369 for_each_active_iommu(iommu, drhd) {
3370 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3372 if (!iommu->iommu_state)
3378 for_each_active_iommu(iommu, drhd) {
3379 iommu_disable_translation(iommu);
3381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3383 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3384 readl(iommu->reg + DMAR_FECTL_REG);
3385 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3386 readl(iommu->reg + DMAR_FEDATA_REG);
3387 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3388 readl(iommu->reg + DMAR_FEADDR_REG);
3389 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3390 readl(iommu->reg + DMAR_FEUADDR_REG);
3392 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3397 for_each_active_iommu(iommu, drhd)
3398 kfree(iommu->iommu_state);
3403 static void iommu_resume(void)
3405 struct dmar_drhd_unit *drhd;
3406 struct intel_iommu *iommu = NULL;
3409 if (init_iommu_hw()) {
3411 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3413 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3417 for_each_active_iommu(iommu, drhd) {
3419 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3421 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3422 iommu->reg + DMAR_FECTL_REG);
3423 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3424 iommu->reg + DMAR_FEDATA_REG);
3425 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3426 iommu->reg + DMAR_FEADDR_REG);
3427 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3428 iommu->reg + DMAR_FEUADDR_REG);
3430 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3433 for_each_active_iommu(iommu, drhd)
3434 kfree(iommu->iommu_state);
3437 static struct syscore_ops iommu_syscore_ops = {
3438 .resume = iommu_resume,
3439 .suspend = iommu_suspend,
3442 static void __init init_iommu_pm_ops(void)
3444 register_syscore_ops(&iommu_syscore_ops);
3448 static inline void init_iommu_pm_ops(void) {}
3449 #endif /* CONFIG_PM */
3451 LIST_HEAD(dmar_rmrr_units);
3453 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3455 list_add(&rmrr->list, &dmar_rmrr_units);
3459 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3461 struct acpi_dmar_reserved_memory *rmrr;
3462 struct dmar_rmrr_unit *rmrru;
3464 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3468 rmrru->hdr = header;
3469 rmrr = (struct acpi_dmar_reserved_memory *)header;
3470 rmrru->base_address = rmrr->base_address;
3471 rmrru->end_address = rmrr->end_address;
3473 dmar_register_rmrr_unit(rmrru);
3478 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3480 struct acpi_dmar_reserved_memory *rmrr;
3483 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3484 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3485 ((void *)rmrr) + rmrr->header.length,
3486 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3488 if (ret || (rmrru->devices_cnt == 0)) {
3489 list_del(&rmrru->list);
3495 static LIST_HEAD(dmar_atsr_units);
3497 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3499 struct acpi_dmar_atsr *atsr;
3500 struct dmar_atsr_unit *atsru;
3502 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3503 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3508 atsru->include_all = atsr->flags & 0x1;
3510 list_add(&atsru->list, &dmar_atsr_units);
3515 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3518 struct acpi_dmar_atsr *atsr;
3520 if (atsru->include_all)
3523 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3524 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3525 (void *)atsr + atsr->header.length,
3526 &atsru->devices_cnt, &atsru->devices,
3528 if (rc || !atsru->devices_cnt) {
3529 list_del(&atsru->list);
3536 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3539 struct pci_bus *bus;
3540 struct acpi_dmar_atsr *atsr;
3541 struct dmar_atsr_unit *atsru;
3543 dev = pci_physfn(dev);
3545 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3546 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3547 if (atsr->segment == pci_domain_nr(dev->bus))
3554 for (bus = dev->bus; bus; bus = bus->parent) {
3555 struct pci_dev *bridge = bus->self;
3557 if (!bridge || !pci_is_pcie(bridge) ||
3558 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3561 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3562 for (i = 0; i < atsru->devices_cnt; i++)
3563 if (atsru->devices[i] == bridge)
3569 if (atsru->include_all)
3575 int __init dmar_parse_rmrr_atsr_dev(void)
3577 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3578 struct dmar_atsr_unit *atsr, *atsr_n;
3581 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3582 ret = rmrr_parse_dev(rmrr);
3587 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3588 ret = atsr_parse_dev(atsr);
3597 * Here we only respond to action of unbound device from driver.
3599 * Added device is not attached to its DMAR domain here yet. That will happen
3600 * when mapping the device to iova.
3602 static int device_notifier(struct notifier_block *nb,
3603 unsigned long action, void *data)
3605 struct device *dev = data;
3606 struct pci_dev *pdev = to_pci_dev(dev);
3607 struct dmar_domain *domain;
3609 if (iommu_no_mapping(dev))
3612 domain = find_domain(pdev);
3616 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3617 domain_remove_one_dev_info(domain, pdev);
3619 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3620 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3621 list_empty(&domain->devices))
3622 domain_exit(domain);
3628 static struct notifier_block device_nb = {
3629 .notifier_call = device_notifier,
3632 int __init intel_iommu_init(void)
3636 /* VT-d is required for a TXT/tboot launch, so enforce that */
3637 force_on = tboot_force_iommu();
3639 if (dmar_table_init()) {
3641 panic("tboot: Failed to initialize DMAR table\n");
3645 if (dmar_dev_scope_init() < 0) {
3647 panic("tboot: Failed to initialize DMAR device scope\n");
3651 if (no_iommu || dmar_disabled)
3654 if (iommu_init_mempool()) {
3656 panic("tboot: Failed to initialize iommu memory\n");
3660 if (list_empty(&dmar_rmrr_units))
3661 printk(KERN_INFO "DMAR: No RMRR found\n");
3663 if (list_empty(&dmar_atsr_units))
3664 printk(KERN_INFO "DMAR: No ATSR found\n");
3666 if (dmar_init_reserved_ranges()) {
3668 panic("tboot: Failed to reserve iommu ranges\n");
3672 init_no_remapping_devices();
3677 panic("tboot: Failed to initialize DMARs\n");
3678 printk(KERN_ERR "IOMMU: dmar init failed\n");
3679 put_iova_domain(&reserved_iova_list);
3680 iommu_exit_mempool();
3684 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3686 init_timer(&unmap_timer);
3687 #ifdef CONFIG_SWIOTLB
3690 dma_ops = &intel_dma_ops;
3692 init_iommu_pm_ops();
3694 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3696 bus_register_notifier(&pci_bus_type, &device_nb);
3698 intel_iommu_enabled = 1;
3703 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3704 struct pci_dev *pdev)
3706 struct pci_dev *tmp, *parent;
3708 if (!iommu || !pdev)
3711 /* dependent device detach */
3712 tmp = pci_find_upstream_pcie_bridge(pdev);
3713 /* Secondary interface's bus number and devfn 0 */
3715 parent = pdev->bus->self;
3716 while (parent != tmp) {
3717 iommu_detach_dev(iommu, parent->bus->number,
3719 parent = parent->bus->self;
3721 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3722 iommu_detach_dev(iommu,
3723 tmp->subordinate->number, 0);
3724 else /* this is a legacy PCI bridge */
3725 iommu_detach_dev(iommu, tmp->bus->number,
3730 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3731 struct pci_dev *pdev)
3733 struct device_domain_info *info;
3734 struct intel_iommu *iommu;
3735 unsigned long flags;
3737 struct list_head *entry, *tmp;
3739 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3744 spin_lock_irqsave(&device_domain_lock, flags);
3745 list_for_each_safe(entry, tmp, &domain->devices) {
3746 info = list_entry(entry, struct device_domain_info, link);
3747 if (info->segment == pci_domain_nr(pdev->bus) &&
3748 info->bus == pdev->bus->number &&
3749 info->devfn == pdev->devfn) {
3750 list_del(&info->link);
3751 list_del(&info->global);
3753 info->dev->dev.archdata.iommu = NULL;
3754 spin_unlock_irqrestore(&device_domain_lock, flags);
3756 iommu_disable_dev_iotlb(info);
3757 iommu_detach_dev(iommu, info->bus, info->devfn);
3758 iommu_detach_dependent_devices(iommu, pdev);
3759 free_devinfo_mem(info);
3761 spin_lock_irqsave(&device_domain_lock, flags);
3769 /* if there is no other devices under the same iommu
3770 * owned by this domain, clear this iommu in iommu_bmp
3771 * update iommu count and coherency
3773 if (iommu == device_to_iommu(info->segment, info->bus,
3778 spin_unlock_irqrestore(&device_domain_lock, flags);
3781 unsigned long tmp_flags;
3782 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3783 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3784 domain->iommu_count--;
3785 domain_update_iommu_cap(domain);
3786 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3788 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3789 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3790 spin_lock_irqsave(&iommu->lock, tmp_flags);
3791 clear_bit(domain->id, iommu->domain_ids);
3792 iommu->domains[domain->id] = NULL;
3793 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3798 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3800 struct device_domain_info *info;
3801 struct intel_iommu *iommu;
3802 unsigned long flags1, flags2;
3804 spin_lock_irqsave(&device_domain_lock, flags1);
3805 while (!list_empty(&domain->devices)) {
3806 info = list_entry(domain->devices.next,
3807 struct device_domain_info, link);
3808 list_del(&info->link);
3809 list_del(&info->global);
3811 info->dev->dev.archdata.iommu = NULL;
3813 spin_unlock_irqrestore(&device_domain_lock, flags1);
3815 iommu_disable_dev_iotlb(info);
3816 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3817 iommu_detach_dev(iommu, info->bus, info->devfn);
3818 iommu_detach_dependent_devices(iommu, info->dev);
3820 /* clear this iommu in iommu_bmp, update iommu count
3823 spin_lock_irqsave(&domain->iommu_lock, flags2);
3824 if (test_and_clear_bit(iommu->seq_id,
3825 &domain->iommu_bmp)) {
3826 domain->iommu_count--;
3827 domain_update_iommu_cap(domain);
3829 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3831 free_devinfo_mem(info);
3832 spin_lock_irqsave(&device_domain_lock, flags1);
3834 spin_unlock_irqrestore(&device_domain_lock, flags1);
3837 /* domain id for virtual machine, it won't be set in context */
3838 static unsigned long vm_domid;
3840 static struct dmar_domain *iommu_alloc_vm_domain(void)
3842 struct dmar_domain *domain;
3844 domain = alloc_domain_mem();
3848 domain->id = vm_domid++;
3850 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3851 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3856 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3860 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3861 spin_lock_init(&domain->iommu_lock);
3863 domain_reserve_special_ranges(domain);
3865 /* calculate AGAW */
3866 domain->gaw = guest_width;
3867 adjust_width = guestwidth_to_adjustwidth(guest_width);
3868 domain->agaw = width_to_agaw(adjust_width);
3870 INIT_LIST_HEAD(&domain->devices);
3872 domain->iommu_count = 0;
3873 domain->iommu_coherency = 0;
3874 domain->iommu_snooping = 0;
3875 domain->iommu_superpage = 0;
3876 domain->max_addr = 0;
3879 /* always allocate the top pgd */
3880 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3883 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3887 static void iommu_free_vm_domain(struct dmar_domain *domain)
3889 unsigned long flags;
3890 struct dmar_drhd_unit *drhd;
3891 struct intel_iommu *iommu;
3893 unsigned long ndomains;
3895 for_each_drhd_unit(drhd) {
3898 iommu = drhd->iommu;
3900 ndomains = cap_ndoms(iommu->cap);
3901 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3902 if (iommu->domains[i] == domain) {
3903 spin_lock_irqsave(&iommu->lock, flags);
3904 clear_bit(i, iommu->domain_ids);
3905 iommu->domains[i] = NULL;
3906 spin_unlock_irqrestore(&iommu->lock, flags);
3913 static void vm_domain_exit(struct dmar_domain *domain)
3915 /* Domain 0 is reserved, so dont process it */
3919 vm_domain_remove_all_dev_info(domain);
3921 put_iova_domain(&domain->iovad);
3924 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3926 /* free page tables */
3927 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3929 iommu_free_vm_domain(domain);
3930 free_domain_mem(domain);
3933 static int intel_iommu_domain_init(struct iommu_domain *domain)
3935 struct dmar_domain *dmar_domain;
3937 dmar_domain = iommu_alloc_vm_domain();
3940 "intel_iommu_domain_init: dmar_domain == NULL\n");
3943 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3945 "intel_iommu_domain_init() failed\n");
3946 vm_domain_exit(dmar_domain);
3949 domain_update_iommu_cap(dmar_domain);
3950 domain->priv = dmar_domain;
3955 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3957 struct dmar_domain *dmar_domain = domain->priv;
3959 domain->priv = NULL;
3960 vm_domain_exit(dmar_domain);
3963 static int intel_iommu_attach_device(struct iommu_domain *domain,
3966 struct dmar_domain *dmar_domain = domain->priv;
3967 struct pci_dev *pdev = to_pci_dev(dev);
3968 struct intel_iommu *iommu;
3971 /* normally pdev is not mapped */
3972 if (unlikely(domain_context_mapped(pdev))) {
3973 struct dmar_domain *old_domain;
3975 old_domain = find_domain(pdev);
3977 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3978 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3979 domain_remove_one_dev_info(old_domain, pdev);
3981 domain_remove_dev_info(old_domain);
3985 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3990 /* check if this iommu agaw is sufficient for max mapped address */
3991 addr_width = agaw_to_width(iommu->agaw);
3992 if (addr_width > cap_mgaw(iommu->cap))
3993 addr_width = cap_mgaw(iommu->cap);
3995 if (dmar_domain->max_addr > (1LL << addr_width)) {
3996 printk(KERN_ERR "%s: iommu width (%d) is not "
3997 "sufficient for the mapped address (%llx)\n",
3998 __func__, addr_width, dmar_domain->max_addr);
4001 dmar_domain->gaw = addr_width;
4004 * Knock out extra levels of page tables if necessary
4006 while (iommu->agaw < dmar_domain->agaw) {
4007 struct dma_pte *pte;
4009 pte = dmar_domain->pgd;
4010 if (dma_pte_present(pte)) {
4011 dmar_domain->pgd = (struct dma_pte *)
4012 phys_to_virt(dma_pte_addr(pte));
4013 free_pgtable_page(pte);
4015 dmar_domain->agaw--;
4018 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
4021 static void intel_iommu_detach_device(struct iommu_domain *domain,
4024 struct dmar_domain *dmar_domain = domain->priv;
4025 struct pci_dev *pdev = to_pci_dev(dev);
4027 domain_remove_one_dev_info(dmar_domain, pdev);
4030 static int intel_iommu_map(struct iommu_domain *domain,
4031 unsigned long iova, phys_addr_t hpa,
4032 int gfp_order, int iommu_prot)
4034 struct dmar_domain *dmar_domain = domain->priv;
4040 if (iommu_prot & IOMMU_READ)
4041 prot |= DMA_PTE_READ;
4042 if (iommu_prot & IOMMU_WRITE)
4043 prot |= DMA_PTE_WRITE;
4044 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4045 prot |= DMA_PTE_SNP;
4047 size = PAGE_SIZE << gfp_order;
4048 max_addr = iova + size;
4049 if (dmar_domain->max_addr < max_addr) {
4052 /* check if minimum agaw is sufficient for mapped address */
4053 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4054 if (end < max_addr) {
4055 printk(KERN_ERR "%s: iommu width (%d) is not "
4056 "sufficient for the mapped address (%llx)\n",
4057 __func__, dmar_domain->gaw, max_addr);
4060 dmar_domain->max_addr = max_addr;
4062 /* Round up size to next multiple of PAGE_SIZE, if it and
4063 the low bits of hpa would take us onto the next page */
4064 size = aligned_nrpages(hpa, size);
4065 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4066 hpa >> VTD_PAGE_SHIFT, size, prot);
4070 static int intel_iommu_unmap(struct iommu_domain *domain,
4071 unsigned long iova, int gfp_order)
4073 struct dmar_domain *dmar_domain = domain->priv;
4074 size_t size = PAGE_SIZE << gfp_order;
4077 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4078 (iova + size - 1) >> VTD_PAGE_SHIFT);
4080 if (dmar_domain->max_addr == iova + size)
4081 dmar_domain->max_addr = iova;
4086 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4089 struct dmar_domain *dmar_domain = domain->priv;
4090 struct dma_pte *pte;
4093 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4095 phys = dma_pte_addr(pte);
4100 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4103 struct dmar_domain *dmar_domain = domain->priv;
4105 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4106 return dmar_domain->iommu_snooping;
4107 if (cap == IOMMU_CAP_INTR_REMAP)
4108 return intr_remapping_enabled;
4113 static struct iommu_ops intel_iommu_ops = {
4114 .domain_init = intel_iommu_domain_init,
4115 .domain_destroy = intel_iommu_domain_destroy,
4116 .attach_dev = intel_iommu_attach_device,
4117 .detach_dev = intel_iommu_detach_device,
4118 .map = intel_iommu_map,
4119 .unmap = intel_iommu_unmap,
4120 .iova_to_phys = intel_iommu_iova_to_phys,
4121 .domain_has_cap = intel_iommu_domain_has_cap,
4124 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4126 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4127 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4131 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4132 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4133 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4134 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4135 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4136 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4139 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4142 * Mobile 4 Series Chipset neglects to set RWBF capability,
4145 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4149 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4152 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4153 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4154 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4155 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4156 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4157 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4158 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4159 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4161 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4165 if (pci_read_config_word(dev, GGC, &ggc))
4168 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4169 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4171 } else if (dmar_map_gfx) {
4172 /* we have to ensure the gfx device is idle before we flush */
4173 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4174 intel_iommu_strict = 1;
4177 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4178 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4179 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4180 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4182 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4183 ISOCH DMAR unit for the Azalia sound device, but not give it any
4184 TLB entries, which causes it to deadlock. Check for that. We do
4185 this in a function called from init_dmars(), instead of in a PCI
4186 quirk, because we don't want to print the obnoxious "BIOS broken"
4187 message if VT-d is actually disabled.
4189 static void __init check_tylersburg_isoch(void)
4191 struct pci_dev *pdev;
4192 uint32_t vtisochctrl;
4194 /* If there's no Azalia in the system anyway, forget it. */
4195 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4200 /* System Management Registers. Might be hidden, in which case
4201 we can't do the sanity check. But that's OK, because the
4202 known-broken BIOSes _don't_ actually hide it, so far. */
4203 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4207 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4214 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4215 if (vtisochctrl & 1)
4218 /* Drop all bits other than the number of TLB entries */
4219 vtisochctrl &= 0x1c;
4221 /* If we have the recommended number of TLB entries (16), fine. */
4222 if (vtisochctrl == 0x10)
4225 /* Zero TLB entries? You get to ride the short bus to school. */
4227 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4228 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4229 dmi_get_system_info(DMI_BIOS_VENDOR),
4230 dmi_get_system_info(DMI_BIOS_VERSION),
4231 dmi_get_system_info(DMI_PRODUCT_VERSION));
4232 iommu_identity_mapping |= IDENTMAP_AZALIA;
4236 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",