2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #define ROOT_SIZE VTD_PAGE_SIZE
48 #define CONTEXT_SIZE VTD_PAGE_SIZE
50 #define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
64 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
74 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
75 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
77 /* page table handling */
78 #define LEVEL_STRIDE (9)
79 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81 static inline int agaw_to_level(int agaw)
86 static inline int agaw_to_width(int agaw)
88 return 30 + agaw * LEVEL_STRIDE;
91 static inline int width_to_agaw(int width)
93 return (width - 30) / LEVEL_STRIDE;
96 static inline unsigned int level_to_offset_bits(int level)
98 return (level - 1) * LEVEL_STRIDE;
101 static inline int pfn_level_offset(unsigned long pfn, int level)
103 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
106 static inline unsigned long level_mask(int level)
108 return -1UL << level_to_offset_bits(level);
111 static inline unsigned long level_size(int level)
113 return 1UL << level_to_offset_bits(level);
116 static inline unsigned long align_to_level(unsigned long pfn, int level)
118 return (pfn + level_size(level) - 1) & level_mask(level);
121 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
123 return 1 << ((lvl - 1) * LEVEL_STRIDE);
126 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
127 are never going to work. */
128 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
130 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
133 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
135 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
137 static inline unsigned long page_to_dma_pfn(struct page *pg)
139 return mm_to_dma_pfn(page_to_pfn(pg));
141 static inline unsigned long virt_to_dma_pfn(void *p)
143 return page_to_dma_pfn(virt_to_page(p));
146 /* global iommu list, set NULL for ignored DMAR units */
147 static struct intel_iommu **g_iommus;
149 static void __init check_tylersburg_isoch(void);
150 static int rwbf_quirk;
153 * set to 1 to panic kernel if can't successfully enable VT-d
154 * (used when kernel is launched w/ TXT)
156 static int force_on = 0;
161 * 12-63: Context Ptr (12 - (haw-1))
168 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
169 static inline bool root_present(struct root_entry *root)
171 return (root->val & 1);
173 static inline void set_root_present(struct root_entry *root)
177 static inline void set_root_value(struct root_entry *root, unsigned long value)
179 root->val |= value & VTD_PAGE_MASK;
182 static inline struct context_entry *
183 get_context_addr_from_root(struct root_entry *root)
185 return (struct context_entry *)
186 (root_present(root)?phys_to_virt(
187 root->val & VTD_PAGE_MASK) :
194 * 1: fault processing disable
195 * 2-3: translation type
196 * 12-63: address space root
202 struct context_entry {
207 static inline bool context_present(struct context_entry *context)
209 return (context->lo & 1);
211 static inline void context_set_present(struct context_entry *context)
216 static inline void context_set_fault_enable(struct context_entry *context)
218 context->lo &= (((u64)-1) << 2) | 1;
221 static inline void context_set_translation_type(struct context_entry *context,
224 context->lo &= (((u64)-1) << 4) | 3;
225 context->lo |= (value & 3) << 2;
228 static inline void context_set_address_root(struct context_entry *context,
231 context->lo |= value & VTD_PAGE_MASK;
234 static inline void context_set_address_width(struct context_entry *context,
237 context->hi |= value & 7;
240 static inline void context_set_domain_id(struct context_entry *context,
243 context->hi |= (value & ((1 << 16) - 1)) << 8;
246 static inline void context_clear_entry(struct context_entry *context)
259 * 12-63: Host physcial address
265 static inline void dma_clear_pte(struct dma_pte *pte)
270 static inline void dma_set_pte_readable(struct dma_pte *pte)
272 pte->val |= DMA_PTE_READ;
275 static inline void dma_set_pte_writable(struct dma_pte *pte)
277 pte->val |= DMA_PTE_WRITE;
280 static inline void dma_set_pte_snp(struct dma_pte *pte)
282 pte->val |= DMA_PTE_SNP;
285 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
287 pte->val = (pte->val & ~3) | (prot & 3);
290 static inline u64 dma_pte_addr(struct dma_pte *pte)
293 return pte->val & VTD_PAGE_MASK;
295 /* Must have a full atomic 64-bit read */
296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
300 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
302 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
305 static inline bool dma_pte_present(struct dma_pte *pte)
307 return (pte->val & 3) != 0;
310 static inline bool dma_pte_superpage(struct dma_pte *pte)
312 return (pte->val & (1 << 7));
315 static inline int first_pte_in_page(struct dma_pte *pte)
317 return !((unsigned long)pte & ~VTD_PAGE_MASK);
321 * This domain is a statically identity mapping domain.
322 * 1. This domain creats a static 1:1 mapping to all usable memory.
323 * 2. It maps to each iommu if successful.
324 * 3. Each iommu mapps to this domain if successful.
326 static struct dmar_domain *si_domain;
327 static int hw_pass_through = 1;
329 /* devices under the same p2p bridge are owned in one domain */
330 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
332 /* domain represents a virtual machine, more than one devices
333 * across iommus may be owned in one domain, e.g. kvm guest.
335 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
337 /* si_domain contains mulitple devices */
338 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
341 int id; /* domain id */
342 int nid; /* node id */
343 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
345 struct list_head devices; /* all devices' list */
346 struct iova_domain iovad; /* iova's that belong to this domain */
348 struct dma_pte *pgd; /* virtual address */
349 int gaw; /* max guest address width */
351 /* adjusted guest address width, 0 is level 2 30-bit */
354 int flags; /* flags to find out type of domain */
356 int iommu_coherency;/* indicate coherency of iommu access */
357 int iommu_snooping; /* indicate snooping control feature*/
358 int iommu_count; /* reference count of iommu */
359 int iommu_superpage;/* Level of superpages supported:
360 0 == 4KiB (no superpages), 1 == 2MiB,
361 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
362 spinlock_t iommu_lock; /* protect iommu set in domain */
363 u64 max_addr; /* maximum mapped address */
366 /* PCI domain-device relationship */
367 struct device_domain_info {
368 struct list_head link; /* link to domain siblings */
369 struct list_head global; /* link to global list */
370 int segment; /* PCI domain */
371 u8 bus; /* PCI bus number */
372 u8 devfn; /* PCI devfn number */
373 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
374 struct intel_iommu *iommu; /* IOMMU used by this device */
375 struct dmar_domain *domain; /* pointer to domain */
378 static void flush_unmaps_timeout(unsigned long data);
380 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
382 #define HIGH_WATER_MARK 250
383 struct deferred_flush_tables {
385 struct iova *iova[HIGH_WATER_MARK];
386 struct dmar_domain *domain[HIGH_WATER_MARK];
389 static struct deferred_flush_tables *deferred_flush;
391 /* bitmap for indexing intel_iommus */
392 static int g_num_of_iommus;
394 static DEFINE_SPINLOCK(async_umap_flush_lock);
395 static LIST_HEAD(unmaps_to_do);
398 static long list_size;
400 static void domain_remove_dev_info(struct dmar_domain *domain);
402 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
403 int dmar_disabled = 0;
405 int dmar_disabled = 1;
406 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
408 int intel_iommu_enabled = 0;
409 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
411 static int dmar_map_gfx = 1;
412 static int dmar_forcedac;
413 static int intel_iommu_strict;
414 static int intel_iommu_superpage = 1;
416 int intel_iommu_gfx_mapped;
417 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
419 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
420 static DEFINE_SPINLOCK(device_domain_lock);
421 static LIST_HEAD(device_domain_list);
423 static struct iommu_ops intel_iommu_ops;
425 static int __init intel_iommu_setup(char *str)
430 if (!strncmp(str, "on", 2)) {
432 printk(KERN_INFO "Intel-IOMMU: enabled\n");
433 } else if (!strncmp(str, "off", 3)) {
435 printk(KERN_INFO "Intel-IOMMU: disabled\n");
436 } else if (!strncmp(str, "igfx_off", 8)) {
439 "Intel-IOMMU: disable GFX device mapping\n");
440 } else if (!strncmp(str, "forcedac", 8)) {
442 "Intel-IOMMU: Forcing DAC for PCI devices\n");
444 } else if (!strncmp(str, "strict", 6)) {
446 "Intel-IOMMU: disable batched IOTLB flush\n");
447 intel_iommu_strict = 1;
448 } else if (!strncmp(str, "sp_off", 6)) {
450 "Intel-IOMMU: disable supported super page\n");
451 intel_iommu_superpage = 0;
454 str += strcspn(str, ",");
460 __setup("intel_iommu=", intel_iommu_setup);
462 static struct kmem_cache *iommu_domain_cache;
463 static struct kmem_cache *iommu_devinfo_cache;
464 static struct kmem_cache *iommu_iova_cache;
466 static inline void *alloc_pgtable_page(int node)
471 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
473 vaddr = page_address(page);
477 static inline void free_pgtable_page(void *vaddr)
479 free_page((unsigned long)vaddr);
482 static inline void *alloc_domain_mem(void)
484 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
487 static void free_domain_mem(void *vaddr)
489 kmem_cache_free(iommu_domain_cache, vaddr);
492 static inline void * alloc_devinfo_mem(void)
494 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
497 static inline void free_devinfo_mem(void *vaddr)
499 kmem_cache_free(iommu_devinfo_cache, vaddr);
502 struct iova *alloc_iova_mem(void)
504 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
507 void free_iova_mem(struct iova *iova)
509 kmem_cache_free(iommu_iova_cache, iova);
513 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
518 sagaw = cap_sagaw(iommu->cap);
519 for (agaw = width_to_agaw(max_gaw);
521 if (test_bit(agaw, &sagaw))
529 * Calculate max SAGAW for each iommu.
531 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
533 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
537 * calculate agaw for each iommu.
538 * "SAGAW" may be different across iommus, use a default agaw, and
539 * get a supported less agaw for iommus that don't support the default agaw.
541 int iommu_calculate_agaw(struct intel_iommu *iommu)
543 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
546 /* This functionin only returns single iommu in a domain */
547 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
551 /* si_domain and vm domain should not get here. */
552 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
553 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
555 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
556 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
559 return g_iommus[iommu_id];
562 static void domain_update_iommu_coherency(struct dmar_domain *domain)
566 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
568 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
570 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
571 if (!ecap_coherent(g_iommus[i]->ecap)) {
572 domain->iommu_coherency = 0;
578 static void domain_update_iommu_snooping(struct dmar_domain *domain)
582 domain->iommu_snooping = 1;
584 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
585 if (!ecap_sc_support(g_iommus[i]->ecap)) {
586 domain->iommu_snooping = 0;
592 static void domain_update_iommu_superpage(struct dmar_domain *domain)
594 struct dmar_drhd_unit *drhd;
595 struct intel_iommu *iommu = NULL;
598 if (!intel_iommu_superpage) {
599 domain->iommu_superpage = 0;
603 /* set iommu_superpage to the smallest common denominator */
604 for_each_active_iommu(iommu, drhd) {
605 mask &= cap_super_page_val(iommu->cap);
610 domain->iommu_superpage = fls(mask);
613 /* Some capabilities may be different across iommus */
614 static void domain_update_iommu_cap(struct dmar_domain *domain)
616 domain_update_iommu_coherency(domain);
617 domain_update_iommu_snooping(domain);
618 domain_update_iommu_superpage(domain);
621 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
623 struct dmar_drhd_unit *drhd = NULL;
626 for_each_drhd_unit(drhd) {
629 if (segment != drhd->segment)
632 for (i = 0; i < drhd->devices_cnt; i++) {
633 if (drhd->devices[i] &&
634 drhd->devices[i]->bus->number == bus &&
635 drhd->devices[i]->devfn == devfn)
637 if (drhd->devices[i] &&
638 drhd->devices[i]->subordinate &&
639 drhd->devices[i]->subordinate->number <= bus &&
640 drhd->devices[i]->subordinate->subordinate >= bus)
644 if (drhd->include_all)
651 static void domain_flush_cache(struct dmar_domain *domain,
652 void *addr, int size)
654 if (!domain->iommu_coherency)
655 clflush_cache_range(addr, size);
658 /* Gets context entry for a given bus and devfn */
659 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
662 struct root_entry *root;
663 struct context_entry *context;
664 unsigned long phy_addr;
667 spin_lock_irqsave(&iommu->lock, flags);
668 root = &iommu->root_entry[bus];
669 context = get_context_addr_from_root(root);
671 context = (struct context_entry *)
672 alloc_pgtable_page(iommu->node);
674 spin_unlock_irqrestore(&iommu->lock, flags);
677 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
678 phy_addr = virt_to_phys((void *)context);
679 set_root_value(root, phy_addr);
680 set_root_present(root);
681 __iommu_flush_cache(iommu, root, sizeof(*root));
683 spin_unlock_irqrestore(&iommu->lock, flags);
684 return &context[devfn];
687 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
689 struct root_entry *root;
690 struct context_entry *context;
694 spin_lock_irqsave(&iommu->lock, flags);
695 root = &iommu->root_entry[bus];
696 context = get_context_addr_from_root(root);
701 ret = context_present(&context[devfn]);
703 spin_unlock_irqrestore(&iommu->lock, flags);
707 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
709 struct root_entry *root;
710 struct context_entry *context;
713 spin_lock_irqsave(&iommu->lock, flags);
714 root = &iommu->root_entry[bus];
715 context = get_context_addr_from_root(root);
717 context_clear_entry(&context[devfn]);
718 __iommu_flush_cache(iommu, &context[devfn], \
721 spin_unlock_irqrestore(&iommu->lock, flags);
724 static void free_context_table(struct intel_iommu *iommu)
726 struct root_entry *root;
729 struct context_entry *context;
731 spin_lock_irqsave(&iommu->lock, flags);
732 if (!iommu->root_entry) {
735 for (i = 0; i < ROOT_ENTRY_NR; i++) {
736 root = &iommu->root_entry[i];
737 context = get_context_addr_from_root(root);
739 free_pgtable_page(context);
741 free_pgtable_page(iommu->root_entry);
742 iommu->root_entry = NULL;
744 spin_unlock_irqrestore(&iommu->lock, flags);
747 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
748 unsigned long pfn, int target_level)
750 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
751 struct dma_pte *parent, *pte = NULL;
752 int level = agaw_to_level(domain->agaw);
755 BUG_ON(!domain->pgd);
756 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
757 parent = domain->pgd;
762 offset = pfn_level_offset(pfn, level);
763 pte = &parent[offset];
764 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
766 if (level == target_level)
769 if (!dma_pte_present(pte)) {
772 tmp_page = alloc_pgtable_page(domain->nid);
777 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
778 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
779 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
780 /* Someone else set it while we were thinking; use theirs. */
781 free_pgtable_page(tmp_page);
784 domain_flush_cache(domain, pte, sizeof(*pte));
787 parent = phys_to_virt(dma_pte_addr(pte));
795 /* return address's pte at specific level */
796 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
798 int level, int *large_page)
800 struct dma_pte *parent, *pte = NULL;
801 int total = agaw_to_level(domain->agaw);
804 parent = domain->pgd;
805 while (level <= total) {
806 offset = pfn_level_offset(pfn, total);
807 pte = &parent[offset];
811 if (!dma_pte_present(pte)) {
816 if (pte->val & DMA_PTE_LARGE_PAGE) {
821 parent = phys_to_virt(dma_pte_addr(pte));
827 /* clear last level pte, a tlb flush should be followed */
828 static int dma_pte_clear_range(struct dmar_domain *domain,
829 unsigned long start_pfn,
830 unsigned long last_pfn)
832 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
833 unsigned int large_page = 1;
834 struct dma_pte *first_pte, *pte;
837 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
838 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
839 BUG_ON(start_pfn > last_pfn);
841 /* we don't need lock here; nobody else touches the iova range */
844 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
846 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
851 start_pfn += lvl_to_nr_pages(large_page);
853 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
855 domain_flush_cache(domain, first_pte,
856 (void *)pte - (void *)first_pte);
858 } while (start_pfn && start_pfn <= last_pfn);
860 order = (large_page - 1) * 9;
864 static void dma_pte_free_level(struct dmar_domain *domain, int level,
865 struct dma_pte *pte, unsigned long pfn,
866 unsigned long start_pfn, unsigned long last_pfn)
868 pfn = max(start_pfn, pfn);
869 pte = &pte[pfn_level_offset(pfn, level)];
872 unsigned long level_pfn;
873 struct dma_pte *level_pte;
875 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
878 level_pfn = pfn & level_mask(level - 1);
879 level_pte = phys_to_virt(dma_pte_addr(pte));
882 dma_pte_free_level(domain, level - 1, level_pte,
883 level_pfn, start_pfn, last_pfn);
885 /* If range covers entire pagetable, free it */
886 if (!(start_pfn > level_pfn ||
887 last_pfn < level_pfn + level_size(level) - 1)) {
889 domain_flush_cache(domain, pte, sizeof(*pte));
890 free_pgtable_page(level_pte);
893 pfn += level_size(level);
894 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
897 /* free page table pages. last level pte should already be cleared */
898 static void dma_pte_free_pagetable(struct dmar_domain *domain,
899 unsigned long start_pfn,
900 unsigned long last_pfn)
902 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
904 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
905 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
906 BUG_ON(start_pfn > last_pfn);
908 /* We don't need lock here; nobody else touches the iova range */
909 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
910 domain->pgd, 0, start_pfn, last_pfn);
913 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
914 free_pgtable_page(domain->pgd);
920 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
922 struct root_entry *root;
925 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
929 __iommu_flush_cache(iommu, root, ROOT_SIZE);
931 spin_lock_irqsave(&iommu->lock, flags);
932 iommu->root_entry = root;
933 spin_unlock_irqrestore(&iommu->lock, flags);
938 static void iommu_set_root_entry(struct intel_iommu *iommu)
944 addr = iommu->root_entry;
946 raw_spin_lock_irqsave(&iommu->register_lock, flag);
947 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
949 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
951 /* Make sure hardware complete it */
952 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
953 readl, (sts & DMA_GSTS_RTPS), sts);
955 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
958 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
963 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
966 raw_spin_lock_irqsave(&iommu->register_lock, flag);
967 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
969 /* Make sure hardware complete it */
970 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
971 readl, (!(val & DMA_GSTS_WBFS)), val);
973 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
976 /* return value determine if we need a write buffer flush */
977 static void __iommu_flush_context(struct intel_iommu *iommu,
978 u16 did, u16 source_id, u8 function_mask,
985 case DMA_CCMD_GLOBAL_INVL:
986 val = DMA_CCMD_GLOBAL_INVL;
988 case DMA_CCMD_DOMAIN_INVL:
989 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
991 case DMA_CCMD_DEVICE_INVL:
992 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
993 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1000 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1001 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1003 /* Make sure hardware complete it */
1004 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1005 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1007 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1010 /* return value determine if we need a write buffer flush */
1011 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1012 u64 addr, unsigned int size_order, u64 type)
1014 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1015 u64 val = 0, val_iva = 0;
1019 case DMA_TLB_GLOBAL_FLUSH:
1020 /* global flush doesn't need set IVA_REG */
1021 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1023 case DMA_TLB_DSI_FLUSH:
1024 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1026 case DMA_TLB_PSI_FLUSH:
1027 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1028 /* Note: always flush non-leaf currently */
1029 val_iva = size_order | addr;
1034 /* Note: set drain read/write */
1037 * This is probably to be super secure.. Looks like we can
1038 * ignore it without any impact.
1040 if (cap_read_drain(iommu->cap))
1041 val |= DMA_TLB_READ_DRAIN;
1043 if (cap_write_drain(iommu->cap))
1044 val |= DMA_TLB_WRITE_DRAIN;
1046 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1047 /* Note: Only uses first TLB reg currently */
1049 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1050 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1052 /* Make sure hardware complete it */
1053 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1054 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1056 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1058 /* check IOTLB invalidation granularity */
1059 if (DMA_TLB_IAIG(val) == 0)
1060 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1061 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1062 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1063 (unsigned long long)DMA_TLB_IIRG(type),
1064 (unsigned long long)DMA_TLB_IAIG(val));
1067 static struct device_domain_info *iommu_support_dev_iotlb(
1068 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1071 unsigned long flags;
1072 struct device_domain_info *info;
1073 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1075 if (!ecap_dev_iotlb_support(iommu->ecap))
1081 spin_lock_irqsave(&device_domain_lock, flags);
1082 list_for_each_entry(info, &domain->devices, link)
1083 if (info->bus == bus && info->devfn == devfn) {
1087 spin_unlock_irqrestore(&device_domain_lock, flags);
1089 if (!found || !info->dev)
1092 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1095 if (!dmar_find_matched_atsr_unit(info->dev))
1098 info->iommu = iommu;
1103 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1108 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1111 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1113 if (!info->dev || !pci_ats_enabled(info->dev))
1116 pci_disable_ats(info->dev);
1119 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1120 u64 addr, unsigned mask)
1123 unsigned long flags;
1124 struct device_domain_info *info;
1126 spin_lock_irqsave(&device_domain_lock, flags);
1127 list_for_each_entry(info, &domain->devices, link) {
1128 if (!info->dev || !pci_ats_enabled(info->dev))
1131 sid = info->bus << 8 | info->devfn;
1132 qdep = pci_ats_queue_depth(info->dev);
1133 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1135 spin_unlock_irqrestore(&device_domain_lock, flags);
1138 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1139 unsigned long pfn, unsigned int pages, int map)
1141 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1142 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1147 * Fallback to domain selective flush if no PSI support or the size is
1149 * PSI requires page size to be 2 ^ x, and the base address is naturally
1150 * aligned to the size
1152 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1153 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1156 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1160 * In caching mode, changes of pages from non-present to present require
1161 * flush. However, device IOTLB doesn't need to be flushed in this case.
1163 if (!cap_caching_mode(iommu->cap) || !map)
1164 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1167 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1170 unsigned long flags;
1172 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1173 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1174 pmen &= ~DMA_PMEN_EPM;
1175 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1177 /* wait for the protected region status bit to clear */
1178 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1179 readl, !(pmen & DMA_PMEN_PRS), pmen);
1181 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1184 static int iommu_enable_translation(struct intel_iommu *iommu)
1187 unsigned long flags;
1189 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1190 iommu->gcmd |= DMA_GCMD_TE;
1191 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1193 /* Make sure hardware complete it */
1194 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1195 readl, (sts & DMA_GSTS_TES), sts);
1197 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1201 static int iommu_disable_translation(struct intel_iommu *iommu)
1206 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1207 iommu->gcmd &= ~DMA_GCMD_TE;
1208 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1210 /* Make sure hardware complete it */
1211 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1212 readl, (!(sts & DMA_GSTS_TES)), sts);
1214 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1219 static int iommu_init_domains(struct intel_iommu *iommu)
1221 unsigned long ndomains;
1222 unsigned long nlongs;
1224 ndomains = cap_ndoms(iommu->cap);
1225 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1227 nlongs = BITS_TO_LONGS(ndomains);
1229 spin_lock_init(&iommu->lock);
1231 /* TBD: there might be 64K domains,
1232 * consider other allocation for future chip
1234 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1235 if (!iommu->domain_ids) {
1236 printk(KERN_ERR "Allocating domain id array failed\n");
1239 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1241 if (!iommu->domains) {
1242 printk(KERN_ERR "Allocating domain array failed\n");
1247 * if Caching mode is set, then invalid translations are tagged
1248 * with domainid 0. Hence we need to pre-allocate it.
1250 if (cap_caching_mode(iommu->cap))
1251 set_bit(0, iommu->domain_ids);
1256 static void domain_exit(struct dmar_domain *domain);
1257 static void vm_domain_exit(struct dmar_domain *domain);
1259 void free_dmar_iommu(struct intel_iommu *iommu)
1261 struct dmar_domain *domain;
1263 unsigned long flags;
1265 if ((iommu->domains) && (iommu->domain_ids)) {
1266 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1267 domain = iommu->domains[i];
1268 clear_bit(i, iommu->domain_ids);
1270 spin_lock_irqsave(&domain->iommu_lock, flags);
1271 if (--domain->iommu_count == 0) {
1272 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1273 vm_domain_exit(domain);
1275 domain_exit(domain);
1277 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1281 if (iommu->gcmd & DMA_GCMD_TE)
1282 iommu_disable_translation(iommu);
1285 irq_set_handler_data(iommu->irq, NULL);
1286 /* This will mask the irq */
1287 free_irq(iommu->irq, iommu);
1288 destroy_irq(iommu->irq);
1291 kfree(iommu->domains);
1292 kfree(iommu->domain_ids);
1294 g_iommus[iommu->seq_id] = NULL;
1296 /* if all iommus are freed, free g_iommus */
1297 for (i = 0; i < g_num_of_iommus; i++) {
1302 if (i == g_num_of_iommus)
1305 /* free context mapping */
1306 free_context_table(iommu);
1309 static struct dmar_domain *alloc_domain(void)
1311 struct dmar_domain *domain;
1313 domain = alloc_domain_mem();
1318 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1324 static int iommu_attach_domain(struct dmar_domain *domain,
1325 struct intel_iommu *iommu)
1328 unsigned long ndomains;
1329 unsigned long flags;
1331 ndomains = cap_ndoms(iommu->cap);
1333 spin_lock_irqsave(&iommu->lock, flags);
1335 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1336 if (num >= ndomains) {
1337 spin_unlock_irqrestore(&iommu->lock, flags);
1338 printk(KERN_ERR "IOMMU: no free domain ids\n");
1343 set_bit(num, iommu->domain_ids);
1344 set_bit(iommu->seq_id, &domain->iommu_bmp);
1345 iommu->domains[num] = domain;
1346 spin_unlock_irqrestore(&iommu->lock, flags);
1351 static void iommu_detach_domain(struct dmar_domain *domain,
1352 struct intel_iommu *iommu)
1354 unsigned long flags;
1358 spin_lock_irqsave(&iommu->lock, flags);
1359 ndomains = cap_ndoms(iommu->cap);
1360 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1361 if (iommu->domains[num] == domain) {
1368 clear_bit(num, iommu->domain_ids);
1369 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1370 iommu->domains[num] = NULL;
1372 spin_unlock_irqrestore(&iommu->lock, flags);
1375 static struct iova_domain reserved_iova_list;
1376 static struct lock_class_key reserved_rbtree_key;
1378 static int dmar_init_reserved_ranges(void)
1380 struct pci_dev *pdev = NULL;
1384 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1386 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1387 &reserved_rbtree_key);
1389 /* IOAPIC ranges shouldn't be accessed by DMA */
1390 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1391 IOVA_PFN(IOAPIC_RANGE_END));
1393 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1397 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1398 for_each_pci_dev(pdev) {
1401 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1402 r = &pdev->resource[i];
1403 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1405 iova = reserve_iova(&reserved_iova_list,
1409 printk(KERN_ERR "Reserve iova failed\n");
1417 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1419 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1422 static inline int guestwidth_to_adjustwidth(int gaw)
1425 int r = (gaw - 12) % 9;
1436 static int domain_init(struct dmar_domain *domain, int guest_width)
1438 struct intel_iommu *iommu;
1439 int adjust_width, agaw;
1440 unsigned long sagaw;
1442 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1443 spin_lock_init(&domain->iommu_lock);
1445 domain_reserve_special_ranges(domain);
1447 /* calculate AGAW */
1448 iommu = domain_get_iommu(domain);
1449 if (guest_width > cap_mgaw(iommu->cap))
1450 guest_width = cap_mgaw(iommu->cap);
1451 domain->gaw = guest_width;
1452 adjust_width = guestwidth_to_adjustwidth(guest_width);
1453 agaw = width_to_agaw(adjust_width);
1454 sagaw = cap_sagaw(iommu->cap);
1455 if (!test_bit(agaw, &sagaw)) {
1456 /* hardware doesn't support it, choose a bigger one */
1457 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1458 agaw = find_next_bit(&sagaw, 5, agaw);
1462 domain->agaw = agaw;
1463 INIT_LIST_HEAD(&domain->devices);
1465 if (ecap_coherent(iommu->ecap))
1466 domain->iommu_coherency = 1;
1468 domain->iommu_coherency = 0;
1470 if (ecap_sc_support(iommu->ecap))
1471 domain->iommu_snooping = 1;
1473 domain->iommu_snooping = 0;
1475 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1476 domain->iommu_count = 1;
1477 domain->nid = iommu->node;
1479 /* always allocate the top pgd */
1480 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1483 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1487 static void domain_exit(struct dmar_domain *domain)
1489 struct dmar_drhd_unit *drhd;
1490 struct intel_iommu *iommu;
1492 /* Domain 0 is reserved, so dont process it */
1496 /* Flush any lazy unmaps that may reference this domain */
1497 if (!intel_iommu_strict)
1498 flush_unmaps_timeout(0);
1500 domain_remove_dev_info(domain);
1502 put_iova_domain(&domain->iovad);
1505 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1507 /* free page tables */
1508 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1510 for_each_active_iommu(iommu, drhd)
1511 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1512 iommu_detach_domain(domain, iommu);
1514 free_domain_mem(domain);
1517 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1518 u8 bus, u8 devfn, int translation)
1520 struct context_entry *context;
1521 unsigned long flags;
1522 struct intel_iommu *iommu;
1523 struct dma_pte *pgd;
1525 unsigned long ndomains;
1528 struct device_domain_info *info = NULL;
1530 pr_debug("Set context mapping for %02x:%02x.%d\n",
1531 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1533 BUG_ON(!domain->pgd);
1534 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1535 translation != CONTEXT_TT_MULTI_LEVEL);
1537 iommu = device_to_iommu(segment, bus, devfn);
1541 context = device_to_context_entry(iommu, bus, devfn);
1544 spin_lock_irqsave(&iommu->lock, flags);
1545 if (context_present(context)) {
1546 spin_unlock_irqrestore(&iommu->lock, flags);
1553 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1554 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1557 /* find an available domain id for this device in iommu */
1558 ndomains = cap_ndoms(iommu->cap);
1559 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1560 if (iommu->domains[num] == domain) {
1568 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1569 if (num >= ndomains) {
1570 spin_unlock_irqrestore(&iommu->lock, flags);
1571 printk(KERN_ERR "IOMMU: no free domain ids\n");
1575 set_bit(num, iommu->domain_ids);
1576 iommu->domains[num] = domain;
1580 /* Skip top levels of page tables for
1581 * iommu which has less agaw than default.
1582 * Unnecessary for PT mode.
1584 if (translation != CONTEXT_TT_PASS_THROUGH) {
1585 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1586 pgd = phys_to_virt(dma_pte_addr(pgd));
1587 if (!dma_pte_present(pgd)) {
1588 spin_unlock_irqrestore(&iommu->lock, flags);
1595 context_set_domain_id(context, id);
1597 if (translation != CONTEXT_TT_PASS_THROUGH) {
1598 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1599 translation = info ? CONTEXT_TT_DEV_IOTLB :
1600 CONTEXT_TT_MULTI_LEVEL;
1603 * In pass through mode, AW must be programmed to indicate the largest
1604 * AGAW value supported by hardware. And ASR is ignored by hardware.
1606 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1607 context_set_address_width(context, iommu->msagaw);
1609 context_set_address_root(context, virt_to_phys(pgd));
1610 context_set_address_width(context, iommu->agaw);
1613 context_set_translation_type(context, translation);
1614 context_set_fault_enable(context);
1615 context_set_present(context);
1616 domain_flush_cache(domain, context, sizeof(*context));
1619 * It's a non-present to present mapping. If hardware doesn't cache
1620 * non-present entry we only need to flush the write-buffer. If the
1621 * _does_ cache non-present entries, then it does so in the special
1622 * domain #0, which we have to flush:
1624 if (cap_caching_mode(iommu->cap)) {
1625 iommu->flush.flush_context(iommu, 0,
1626 (((u16)bus) << 8) | devfn,
1627 DMA_CCMD_MASK_NOBIT,
1628 DMA_CCMD_DEVICE_INVL);
1629 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1631 iommu_flush_write_buffer(iommu);
1633 iommu_enable_dev_iotlb(info);
1634 spin_unlock_irqrestore(&iommu->lock, flags);
1636 spin_lock_irqsave(&domain->iommu_lock, flags);
1637 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1638 domain->iommu_count++;
1639 if (domain->iommu_count == 1)
1640 domain->nid = iommu->node;
1641 domain_update_iommu_cap(domain);
1643 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1648 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1652 struct pci_dev *tmp, *parent;
1654 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1655 pdev->bus->number, pdev->devfn,
1660 /* dependent device mapping */
1661 tmp = pci_find_upstream_pcie_bridge(pdev);
1664 /* Secondary interface's bus number and devfn 0 */
1665 parent = pdev->bus->self;
1666 while (parent != tmp) {
1667 ret = domain_context_mapping_one(domain,
1668 pci_domain_nr(parent->bus),
1669 parent->bus->number,
1670 parent->devfn, translation);
1673 parent = parent->bus->self;
1675 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1676 return domain_context_mapping_one(domain,
1677 pci_domain_nr(tmp->subordinate),
1678 tmp->subordinate->number, 0,
1680 else /* this is a legacy PCI bridge */
1681 return domain_context_mapping_one(domain,
1682 pci_domain_nr(tmp->bus),
1688 static int domain_context_mapped(struct pci_dev *pdev)
1691 struct pci_dev *tmp, *parent;
1692 struct intel_iommu *iommu;
1694 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1699 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1702 /* dependent device mapping */
1703 tmp = pci_find_upstream_pcie_bridge(pdev);
1706 /* Secondary interface's bus number and devfn 0 */
1707 parent = pdev->bus->self;
1708 while (parent != tmp) {
1709 ret = device_context_mapped(iommu, parent->bus->number,
1713 parent = parent->bus->self;
1715 if (pci_is_pcie(tmp))
1716 return device_context_mapped(iommu, tmp->subordinate->number,
1719 return device_context_mapped(iommu, tmp->bus->number,
1723 /* Returns a number of VTD pages, but aligned to MM page size */
1724 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1727 host_addr &= ~PAGE_MASK;
1728 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1731 /* Return largest possible superpage level for a given mapping */
1732 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1733 unsigned long iov_pfn,
1734 unsigned long phy_pfn,
1735 unsigned long pages)
1737 int support, level = 1;
1738 unsigned long pfnmerge;
1740 support = domain->iommu_superpage;
1742 /* To use a large page, the virtual *and* physical addresses
1743 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1744 of them will mean we have to use smaller pages. So just
1745 merge them and check both at once. */
1746 pfnmerge = iov_pfn | phy_pfn;
1748 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1749 pages >>= VTD_STRIDE_SHIFT;
1752 pfnmerge >>= VTD_STRIDE_SHIFT;
1759 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1760 struct scatterlist *sg, unsigned long phys_pfn,
1761 unsigned long nr_pages, int prot)
1763 struct dma_pte *first_pte = NULL, *pte = NULL;
1764 phys_addr_t uninitialized_var(pteval);
1765 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1766 unsigned long sg_res = 0;
1767 unsigned int largepage_lvl = 0;
1768 unsigned long lvl_pages = 0;
1770 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1772 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1775 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1779 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1782 while (nr_pages > 0) {
1786 sg_res = aligned_nrpages(sg->offset, sg->length);
1787 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1788 sg->dma_length = sg->length;
1789 pteval = page_to_phys(sg_page(sg)) | prot;
1790 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1794 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1796 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1799 /* It is large page*/
1800 if (largepage_lvl > 1) {
1801 pteval |= DMA_PTE_LARGE_PAGE;
1802 /* Ensure that old small page tables are removed to make room
1803 for superpage, if they exist. */
1804 dma_pte_clear_range(domain, iov_pfn,
1805 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1806 dma_pte_free_pagetable(domain, iov_pfn,
1807 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1809 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1813 /* We don't need lock here, nobody else
1814 * touches the iova range
1816 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1818 static int dumps = 5;
1819 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1820 iov_pfn, tmp, (unsigned long long)pteval);
1823 debug_dma_dump_mappings(NULL);
1828 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1830 BUG_ON(nr_pages < lvl_pages);
1831 BUG_ON(sg_res < lvl_pages);
1833 nr_pages -= lvl_pages;
1834 iov_pfn += lvl_pages;
1835 phys_pfn += lvl_pages;
1836 pteval += lvl_pages * VTD_PAGE_SIZE;
1837 sg_res -= lvl_pages;
1839 /* If the next PTE would be the first in a new page, then we
1840 need to flush the cache on the entries we've just written.
1841 And then we'll need to recalculate 'pte', so clear it and
1842 let it get set again in the if (!pte) block above.
1844 If we're done (!nr_pages) we need to flush the cache too.
1846 Also if we've been setting superpages, we may need to
1847 recalculate 'pte' and switch back to smaller pages for the
1848 end of the mapping, if the trailing size is not enough to
1849 use another superpage (i.e. sg_res < lvl_pages). */
1851 if (!nr_pages || first_pte_in_page(pte) ||
1852 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1853 domain_flush_cache(domain, first_pte,
1854 (void *)pte - (void *)first_pte);
1858 if (!sg_res && nr_pages)
1864 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1865 struct scatterlist *sg, unsigned long nr_pages,
1868 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1871 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1872 unsigned long phys_pfn, unsigned long nr_pages,
1875 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1878 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1883 clear_context_table(iommu, bus, devfn);
1884 iommu->flush.flush_context(iommu, 0, 0, 0,
1885 DMA_CCMD_GLOBAL_INVL);
1886 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1889 static void domain_remove_dev_info(struct dmar_domain *domain)
1891 struct device_domain_info *info;
1892 unsigned long flags;
1893 struct intel_iommu *iommu;
1895 spin_lock_irqsave(&device_domain_lock, flags);
1896 while (!list_empty(&domain->devices)) {
1897 info = list_entry(domain->devices.next,
1898 struct device_domain_info, link);
1899 list_del(&info->link);
1900 list_del(&info->global);
1902 info->dev->dev.archdata.iommu = NULL;
1903 spin_unlock_irqrestore(&device_domain_lock, flags);
1905 iommu_disable_dev_iotlb(info);
1906 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1907 iommu_detach_dev(iommu, info->bus, info->devfn);
1908 free_devinfo_mem(info);
1910 spin_lock_irqsave(&device_domain_lock, flags);
1912 spin_unlock_irqrestore(&device_domain_lock, flags);
1917 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1919 static struct dmar_domain *
1920 find_domain(struct pci_dev *pdev)
1922 struct device_domain_info *info;
1924 /* No lock here, assumes no domain exit in normal case */
1925 info = pdev->dev.archdata.iommu;
1927 return info->domain;
1931 /* domain is initialized */
1932 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1934 struct dmar_domain *domain, *found = NULL;
1935 struct intel_iommu *iommu;
1936 struct dmar_drhd_unit *drhd;
1937 struct device_domain_info *info, *tmp;
1938 struct pci_dev *dev_tmp;
1939 unsigned long flags;
1940 int bus = 0, devfn = 0;
1944 domain = find_domain(pdev);
1948 segment = pci_domain_nr(pdev->bus);
1950 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1952 if (pci_is_pcie(dev_tmp)) {
1953 bus = dev_tmp->subordinate->number;
1956 bus = dev_tmp->bus->number;
1957 devfn = dev_tmp->devfn;
1959 spin_lock_irqsave(&device_domain_lock, flags);
1960 list_for_each_entry(info, &device_domain_list, global) {
1961 if (info->segment == segment &&
1962 info->bus == bus && info->devfn == devfn) {
1963 found = info->domain;
1967 spin_unlock_irqrestore(&device_domain_lock, flags);
1968 /* pcie-pci bridge already has a domain, uses it */
1975 domain = alloc_domain();
1979 /* Allocate new domain for the device */
1980 drhd = dmar_find_matched_drhd_unit(pdev);
1982 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1986 iommu = drhd->iommu;
1988 ret = iommu_attach_domain(domain, iommu);
1990 free_domain_mem(domain);
1994 if (domain_init(domain, gaw)) {
1995 domain_exit(domain);
1999 /* register pcie-to-pci device */
2001 info = alloc_devinfo_mem();
2003 domain_exit(domain);
2006 info->segment = segment;
2008 info->devfn = devfn;
2010 info->domain = domain;
2011 /* This domain is shared by devices under p2p bridge */
2012 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2014 /* pcie-to-pci bridge already has a domain, uses it */
2016 spin_lock_irqsave(&device_domain_lock, flags);
2017 list_for_each_entry(tmp, &device_domain_list, global) {
2018 if (tmp->segment == segment &&
2019 tmp->bus == bus && tmp->devfn == devfn) {
2020 found = tmp->domain;
2025 spin_unlock_irqrestore(&device_domain_lock, flags);
2026 free_devinfo_mem(info);
2027 domain_exit(domain);
2030 list_add(&info->link, &domain->devices);
2031 list_add(&info->global, &device_domain_list);
2032 spin_unlock_irqrestore(&device_domain_lock, flags);
2037 info = alloc_devinfo_mem();
2040 info->segment = segment;
2041 info->bus = pdev->bus->number;
2042 info->devfn = pdev->devfn;
2044 info->domain = domain;
2045 spin_lock_irqsave(&device_domain_lock, flags);
2046 /* somebody is fast */
2047 found = find_domain(pdev);
2048 if (found != NULL) {
2049 spin_unlock_irqrestore(&device_domain_lock, flags);
2050 if (found != domain) {
2051 domain_exit(domain);
2054 free_devinfo_mem(info);
2057 list_add(&info->link, &domain->devices);
2058 list_add(&info->global, &device_domain_list);
2059 pdev->dev.archdata.iommu = info;
2060 spin_unlock_irqrestore(&device_domain_lock, flags);
2063 /* recheck it here, maybe others set it */
2064 return find_domain(pdev);
2067 static int iommu_identity_mapping;
2068 #define IDENTMAP_ALL 1
2069 #define IDENTMAP_GFX 2
2070 #define IDENTMAP_AZALIA 4
2072 static int iommu_domain_identity_map(struct dmar_domain *domain,
2073 unsigned long long start,
2074 unsigned long long end)
2076 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2077 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2079 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2080 dma_to_mm_pfn(last_vpfn))) {
2081 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2085 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2086 start, end, domain->id);
2088 * RMRR range might have overlap with physical memory range,
2091 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2093 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2094 last_vpfn - first_vpfn + 1,
2095 DMA_PTE_READ|DMA_PTE_WRITE);
2098 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2099 unsigned long long start,
2100 unsigned long long end)
2102 struct dmar_domain *domain;
2105 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2109 /* For _hardware_ passthrough, don't bother. But for software
2110 passthrough, we do it anyway -- it may indicate a memory
2111 range which is reserved in E820, so which didn't get set
2112 up to start with in si_domain */
2113 if (domain == si_domain && hw_pass_through) {
2114 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2115 pci_name(pdev), start, end);
2120 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2121 pci_name(pdev), start, end);
2124 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2125 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2126 dmi_get_system_info(DMI_BIOS_VENDOR),
2127 dmi_get_system_info(DMI_BIOS_VERSION),
2128 dmi_get_system_info(DMI_PRODUCT_VERSION));
2133 if (end >> agaw_to_width(domain->agaw)) {
2134 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2135 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2136 agaw_to_width(domain->agaw),
2137 dmi_get_system_info(DMI_BIOS_VENDOR),
2138 dmi_get_system_info(DMI_BIOS_VERSION),
2139 dmi_get_system_info(DMI_PRODUCT_VERSION));
2144 ret = iommu_domain_identity_map(domain, start, end);
2148 /* context entry init */
2149 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2156 domain_exit(domain);
2160 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2161 struct pci_dev *pdev)
2163 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2165 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2169 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2170 static inline void iommu_prepare_isa(void)
2172 struct pci_dev *pdev;
2175 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2179 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2180 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2183 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2184 "floppy might not work\n");
2188 static inline void iommu_prepare_isa(void)
2192 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2194 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2196 static int __init si_domain_work_fn(unsigned long start_pfn,
2197 unsigned long end_pfn, void *datax)
2201 *ret = iommu_domain_identity_map(si_domain,
2202 (uint64_t)start_pfn << PAGE_SHIFT,
2203 (uint64_t)end_pfn << PAGE_SHIFT);
2208 static int __init si_domain_init(int hw)
2210 struct dmar_drhd_unit *drhd;
2211 struct intel_iommu *iommu;
2214 si_domain = alloc_domain();
2218 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2220 for_each_active_iommu(iommu, drhd) {
2221 ret = iommu_attach_domain(si_domain, iommu);
2223 domain_exit(si_domain);
2228 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2229 domain_exit(si_domain);
2233 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2238 for_each_online_node(nid) {
2239 work_with_active_regions(nid, si_domain_work_fn, &ret);
2247 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2248 struct pci_dev *pdev);
2249 static int identity_mapping(struct pci_dev *pdev)
2251 struct device_domain_info *info;
2253 if (likely(!iommu_identity_mapping))
2256 info = pdev->dev.archdata.iommu;
2257 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2258 return (info->domain == si_domain);
2263 static int domain_add_dev_info(struct dmar_domain *domain,
2264 struct pci_dev *pdev,
2267 struct device_domain_info *info;
2268 unsigned long flags;
2271 info = alloc_devinfo_mem();
2275 info->segment = pci_domain_nr(pdev->bus);
2276 info->bus = pdev->bus->number;
2277 info->devfn = pdev->devfn;
2279 info->domain = domain;
2281 spin_lock_irqsave(&device_domain_lock, flags);
2282 list_add(&info->link, &domain->devices);
2283 list_add(&info->global, &device_domain_list);
2284 pdev->dev.archdata.iommu = info;
2285 spin_unlock_irqrestore(&device_domain_lock, flags);
2287 ret = domain_context_mapping(domain, pdev, translation);
2289 spin_lock_irqsave(&device_domain_lock, flags);
2290 list_del(&info->link);
2291 list_del(&info->global);
2292 pdev->dev.archdata.iommu = NULL;
2293 spin_unlock_irqrestore(&device_domain_lock, flags);
2294 free_devinfo_mem(info);
2301 static bool device_has_rmrr(struct pci_dev *dev)
2303 struct dmar_rmrr_unit *rmrr;
2306 for_each_rmrr_units(rmrr) {
2307 for (i = 0; i < rmrr->devices_cnt; i++) {
2309 * Return TRUE if this RMRR contains the device that
2312 if (rmrr->devices[i] == dev)
2320 * There are a couple cases where we need to restrict the functionality of
2321 * devices associated with RMRRs. The first is when evaluating a device for
2322 * identity mapping because problems exist when devices are moved in and out
2323 * of domains and their respective RMRR information is lost. This means that
2324 * a device with associated RMRRs will never be in a "passthrough" domain.
2325 * The second is use of the device through the IOMMU API. This interface
2326 * expects to have full control of the IOVA space for the device. We cannot
2327 * satisfy both the requirement that RMRR access is maintained and have an
2328 * unencumbered IOVA space. We also have no ability to quiesce the device's
2329 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2330 * We therefore prevent devices associated with an RMRR from participating in
2331 * the IOMMU API, which eliminates them from device assignment.
2333 * In both cases we assume that PCI USB devices with RMRRs have them largely
2334 * for historical reasons and that the RMRR space is not actively used post
2335 * boot. This exclusion may change if vendors begin to abuse it.
2337 static bool device_is_rmrr_locked(struct pci_dev *pdev)
2339 return device_has_rmrr(pdev) &&
2340 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB;
2343 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2346 if (device_is_rmrr_locked(pdev))
2349 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2352 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2355 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2359 * We want to start off with all devices in the 1:1 domain, and
2360 * take them out later if we find they can't access all of memory.
2362 * However, we can't do this for PCI devices behind bridges,
2363 * because all PCI devices behind the same bridge will end up
2364 * with the same source-id on their transactions.
2366 * Practically speaking, we can't change things around for these
2367 * devices at run-time, because we can't be sure there'll be no
2368 * DMA transactions in flight for any of their siblings.
2370 * So PCI devices (unless they're on the root bus) as well as
2371 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2372 * the 1:1 domain, just in _case_ one of their siblings turns out
2373 * not to be able to map all of memory.
2375 if (!pci_is_pcie(pdev)) {
2376 if (!pci_is_root_bus(pdev->bus))
2378 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2380 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2384 * At boot time, we don't yet know if devices will be 64-bit capable.
2385 * Assume that they will -- if they turn out not to be, then we can
2386 * take them out of the 1:1 domain later.
2390 * If the device's dma_mask is less than the system's memory
2391 * size then this is not a candidate for identity mapping.
2393 u64 dma_mask = pdev->dma_mask;
2395 if (pdev->dev.coherent_dma_mask &&
2396 pdev->dev.coherent_dma_mask < dma_mask)
2397 dma_mask = pdev->dev.coherent_dma_mask;
2399 return dma_mask >= dma_get_required_mask(&pdev->dev);
2405 static int __init iommu_prepare_static_identity_mapping(int hw)
2407 struct pci_dev *pdev = NULL;
2410 ret = si_domain_init(hw);
2414 for_each_pci_dev(pdev) {
2415 /* Skip Host/PCI Bridge devices */
2416 if (IS_BRIDGE_HOST_DEVICE(pdev))
2418 if (iommu_should_identity_map(pdev, 1)) {
2419 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2420 hw ? "hardware" : "software", pci_name(pdev));
2422 ret = domain_add_dev_info(si_domain, pdev,
2423 hw ? CONTEXT_TT_PASS_THROUGH :
2424 CONTEXT_TT_MULTI_LEVEL);
2433 static int __init init_dmars(void)
2435 struct dmar_drhd_unit *drhd;
2436 struct dmar_rmrr_unit *rmrr;
2437 struct pci_dev *pdev;
2438 struct intel_iommu *iommu;
2444 * initialize and program root entry to not present
2447 for_each_drhd_unit(drhd) {
2450 * lock not needed as this is only incremented in the single
2451 * threaded kernel __init code path all other access are read
2456 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2459 printk(KERN_ERR "Allocating global iommu array failed\n");
2464 deferred_flush = kzalloc(g_num_of_iommus *
2465 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2466 if (!deferred_flush) {
2471 for_each_drhd_unit(drhd) {
2475 iommu = drhd->iommu;
2476 g_iommus[iommu->seq_id] = iommu;
2478 ret = iommu_init_domains(iommu);
2484 * we could share the same root & context tables
2485 * among all IOMMU's. Need to Split it later.
2487 ret = iommu_alloc_root_entry(iommu);
2489 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2492 if (!ecap_pass_through(iommu->ecap))
2493 hw_pass_through = 0;
2497 * Start from the sane iommu hardware state.
2499 for_each_drhd_unit(drhd) {
2503 iommu = drhd->iommu;
2506 * If the queued invalidation is already initialized by us
2507 * (for example, while enabling interrupt-remapping) then
2508 * we got the things already rolling from a sane state.
2514 * Clear any previous faults.
2516 dmar_fault(-1, iommu);
2518 * Disable queued invalidation if supported and already enabled
2519 * before OS handover.
2521 dmar_disable_qi(iommu);
2524 for_each_drhd_unit(drhd) {
2528 iommu = drhd->iommu;
2530 if (dmar_enable_qi(iommu)) {
2532 * Queued Invalidate not enabled, use Register Based
2535 iommu->flush.flush_context = __iommu_flush_context;
2536 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2537 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2540 (unsigned long long)drhd->reg_base_addr);
2542 iommu->flush.flush_context = qi_flush_context;
2543 iommu->flush.flush_iotlb = qi_flush_iotlb;
2544 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2547 (unsigned long long)drhd->reg_base_addr);
2551 if (iommu_pass_through)
2552 iommu_identity_mapping |= IDENTMAP_ALL;
2554 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2555 iommu_identity_mapping |= IDENTMAP_GFX;
2558 check_tylersburg_isoch();
2561 * If pass through is not set or not enabled, setup context entries for
2562 * identity mappings for rmrr, gfx, and isa and may fall back to static
2563 * identity mapping if iommu_identity_mapping is set.
2565 if (iommu_identity_mapping) {
2566 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2568 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2574 * for each dev attached to rmrr
2576 * locate drhd for dev, alloc domain for dev
2577 * allocate free domain
2578 * allocate page table entries for rmrr
2579 * if context not allocated for bus
2580 * allocate and init context
2581 * set present in root table for this bus
2582 * init context with domain, translation etc
2586 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2587 for_each_rmrr_units(rmrr) {
2588 for (i = 0; i < rmrr->devices_cnt; i++) {
2589 pdev = rmrr->devices[i];
2591 * some BIOS lists non-exist devices in DMAR
2596 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2599 "IOMMU: mapping reserved region failed\n");
2603 iommu_prepare_isa();
2608 * global invalidate context cache
2609 * global invalidate iotlb
2610 * enable translation
2612 for_each_drhd_unit(drhd) {
2613 if (drhd->ignored) {
2615 * we always have to disable PMRs or DMA may fail on
2619 iommu_disable_protect_mem_regions(drhd->iommu);
2622 iommu = drhd->iommu;
2624 iommu_flush_write_buffer(iommu);
2626 ret = dmar_set_interrupt(iommu);
2630 iommu_set_root_entry(iommu);
2632 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2633 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2635 ret = iommu_enable_translation(iommu);
2639 iommu_disable_protect_mem_regions(iommu);
2644 for_each_drhd_unit(drhd) {
2647 iommu = drhd->iommu;
2654 /* This takes a number of _MM_ pages, not VTD pages */
2655 static struct iova *intel_alloc_iova(struct device *dev,
2656 struct dmar_domain *domain,
2657 unsigned long nrpages, uint64_t dma_mask)
2659 struct pci_dev *pdev = to_pci_dev(dev);
2660 struct iova *iova = NULL;
2662 /* Restrict dma_mask to the width that the iommu can handle */
2663 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2665 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2667 * First try to allocate an io virtual address in
2668 * DMA_BIT_MASK(32) and if that fails then try allocating
2671 iova = alloc_iova(&domain->iovad, nrpages,
2672 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2676 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2677 if (unlikely(!iova)) {
2678 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2679 nrpages, pci_name(pdev));
2686 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2688 struct dmar_domain *domain;
2691 domain = get_domain_for_dev(pdev,
2692 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2695 "Allocating domain for %s failed", pci_name(pdev));
2699 /* make sure context mapping is ok */
2700 if (unlikely(!domain_context_mapped(pdev))) {
2701 ret = domain_context_mapping(domain, pdev,
2702 CONTEXT_TT_MULTI_LEVEL);
2705 "Domain context map for %s failed",
2714 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2716 struct device_domain_info *info;
2718 /* No lock here, assumes no domain exit in normal case */
2719 info = dev->dev.archdata.iommu;
2721 return info->domain;
2723 return __get_valid_domain_for_dev(dev);
2726 static int iommu_dummy(struct pci_dev *pdev)
2728 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2731 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2732 static int iommu_no_mapping(struct device *dev)
2734 struct pci_dev *pdev;
2737 if (unlikely(dev->bus != &pci_bus_type))
2740 pdev = to_pci_dev(dev);
2741 if (iommu_dummy(pdev))
2744 if (!iommu_identity_mapping)
2747 found = identity_mapping(pdev);
2749 if (iommu_should_identity_map(pdev, 0))
2753 * 32 bit DMA is removed from si_domain and fall back
2754 * to non-identity mapping.
2756 domain_remove_one_dev_info(si_domain, pdev);
2757 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2763 * In case of a detached 64 bit DMA device from vm, the device
2764 * is put into si_domain for identity mapping.
2766 if (iommu_should_identity_map(pdev, 0)) {
2768 ret = domain_add_dev_info(si_domain, pdev,
2770 CONTEXT_TT_PASS_THROUGH :
2771 CONTEXT_TT_MULTI_LEVEL);
2773 printk(KERN_INFO "64bit %s uses identity mapping\n",
2783 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2784 size_t size, int dir, u64 dma_mask)
2786 struct pci_dev *pdev = to_pci_dev(hwdev);
2787 struct dmar_domain *domain;
2788 phys_addr_t start_paddr;
2792 struct intel_iommu *iommu;
2793 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2795 BUG_ON(dir == DMA_NONE);
2797 if (iommu_no_mapping(hwdev))
2800 domain = get_valid_domain_for_dev(pdev);
2804 iommu = domain_get_iommu(domain);
2805 size = aligned_nrpages(paddr, size);
2807 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2812 * Check if DMAR supports zero-length reads on write only
2815 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2816 !cap_zlr(iommu->cap))
2817 prot |= DMA_PTE_READ;
2818 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2819 prot |= DMA_PTE_WRITE;
2821 * paddr - (paddr + size) might be partial page, we should map the whole
2822 * page. Note: if two part of one page are separately mapped, we
2823 * might have two guest_addr mapping to the same host paddr, but this
2824 * is not a big problem
2826 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2827 mm_to_dma_pfn(paddr_pfn), size, prot);
2831 /* it's a non-present to present mapping. Only flush if caching mode */
2832 if (cap_caching_mode(iommu->cap))
2833 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2835 iommu_flush_write_buffer(iommu);
2837 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2838 start_paddr += paddr & ~PAGE_MASK;
2843 __free_iova(&domain->iovad, iova);
2844 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2845 pci_name(pdev), size, (unsigned long long)paddr, dir);
2849 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2850 unsigned long offset, size_t size,
2851 enum dma_data_direction dir,
2852 struct dma_attrs *attrs)
2854 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2855 dir, to_pci_dev(dev)->dma_mask);
2858 static void flush_unmaps(void)
2864 /* just flush them all */
2865 for (i = 0; i < g_num_of_iommus; i++) {
2866 struct intel_iommu *iommu = g_iommus[i];
2870 if (!deferred_flush[i].next)
2873 /* In caching mode, global flushes turn emulation expensive */
2874 if (!cap_caching_mode(iommu->cap))
2875 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2876 DMA_TLB_GLOBAL_FLUSH);
2877 for (j = 0; j < deferred_flush[i].next; j++) {
2879 struct iova *iova = deferred_flush[i].iova[j];
2880 struct dmar_domain *domain = deferred_flush[i].domain[j];
2882 /* On real hardware multiple invalidations are expensive */
2883 if (cap_caching_mode(iommu->cap))
2884 iommu_flush_iotlb_psi(iommu, domain->id,
2885 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2887 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2888 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2889 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2891 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2893 deferred_flush[i].next = 0;
2899 static void flush_unmaps_timeout(unsigned long data)
2901 unsigned long flags;
2903 spin_lock_irqsave(&async_umap_flush_lock, flags);
2905 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2908 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2910 unsigned long flags;
2912 struct intel_iommu *iommu;
2914 spin_lock_irqsave(&async_umap_flush_lock, flags);
2915 if (list_size == HIGH_WATER_MARK)
2918 iommu = domain_get_iommu(dom);
2919 iommu_id = iommu->seq_id;
2921 next = deferred_flush[iommu_id].next;
2922 deferred_flush[iommu_id].domain[next] = dom;
2923 deferred_flush[iommu_id].iova[next] = iova;
2924 deferred_flush[iommu_id].next++;
2927 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2931 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2934 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2935 size_t size, enum dma_data_direction dir,
2936 struct dma_attrs *attrs)
2938 struct pci_dev *pdev = to_pci_dev(dev);
2939 struct dmar_domain *domain;
2940 unsigned long start_pfn, last_pfn;
2942 struct intel_iommu *iommu;
2944 if (iommu_no_mapping(dev))
2947 domain = find_domain(pdev);
2950 iommu = domain_get_iommu(domain);
2952 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2953 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2954 (unsigned long long)dev_addr))
2957 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2958 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2960 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2961 pci_name(pdev), start_pfn, last_pfn);
2963 /* clear the whole page */
2964 dma_pte_clear_range(domain, start_pfn, last_pfn);
2966 /* free page tables */
2967 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2969 if (intel_iommu_strict) {
2970 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2971 last_pfn - start_pfn + 1, 0);
2973 __free_iova(&domain->iovad, iova);
2975 add_unmap(domain, iova);
2977 * queue up the release of the unmap to save the 1/6th of the
2978 * cpu used up by the iotlb flush operation...
2983 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2984 dma_addr_t *dma_handle, gfp_t flags)
2989 size = PAGE_ALIGN(size);
2990 order = get_order(size);
2992 if (!iommu_no_mapping(hwdev))
2993 flags &= ~(GFP_DMA | GFP_DMA32);
2994 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2995 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3001 vaddr = (void *)__get_free_pages(flags, order);
3004 memset(vaddr, 0, size);
3006 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3008 hwdev->coherent_dma_mask);
3011 free_pages((unsigned long)vaddr, order);
3015 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3016 dma_addr_t dma_handle)
3020 size = PAGE_ALIGN(size);
3021 order = get_order(size);
3023 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3024 free_pages((unsigned long)vaddr, order);
3027 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3028 int nelems, enum dma_data_direction dir,
3029 struct dma_attrs *attrs)
3031 struct pci_dev *pdev = to_pci_dev(hwdev);
3032 struct dmar_domain *domain;
3033 unsigned long start_pfn, last_pfn;
3035 struct intel_iommu *iommu;
3037 if (iommu_no_mapping(hwdev))
3040 domain = find_domain(pdev);
3043 iommu = domain_get_iommu(domain);
3045 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3046 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3047 (unsigned long long)sglist[0].dma_address))
3050 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3051 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3053 /* clear the whole page */
3054 dma_pte_clear_range(domain, start_pfn, last_pfn);
3056 /* free page tables */
3057 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3059 if (intel_iommu_strict) {
3060 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3061 last_pfn - start_pfn + 1, 0);
3063 __free_iova(&domain->iovad, iova);
3065 add_unmap(domain, iova);
3067 * queue up the release of the unmap to save the 1/6th of the
3068 * cpu used up by the iotlb flush operation...
3073 static int intel_nontranslate_map_sg(struct device *hddev,
3074 struct scatterlist *sglist, int nelems, int dir)
3077 struct scatterlist *sg;
3079 for_each_sg(sglist, sg, nelems, i) {
3080 BUG_ON(!sg_page(sg));
3081 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3082 sg->dma_length = sg->length;
3087 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3088 enum dma_data_direction dir, struct dma_attrs *attrs)
3091 struct pci_dev *pdev = to_pci_dev(hwdev);
3092 struct dmar_domain *domain;
3095 struct iova *iova = NULL;
3097 struct scatterlist *sg;
3098 unsigned long start_vpfn;
3099 struct intel_iommu *iommu;
3101 BUG_ON(dir == DMA_NONE);
3102 if (iommu_no_mapping(hwdev))
3103 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3105 domain = get_valid_domain_for_dev(pdev);
3109 iommu = domain_get_iommu(domain);
3111 for_each_sg(sglist, sg, nelems, i)
3112 size += aligned_nrpages(sg->offset, sg->length);
3114 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3117 sglist->dma_length = 0;
3122 * Check if DMAR supports zero-length reads on write only
3125 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3126 !cap_zlr(iommu->cap))
3127 prot |= DMA_PTE_READ;
3128 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3129 prot |= DMA_PTE_WRITE;
3131 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3133 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3134 if (unlikely(ret)) {
3135 /* clear the page */
3136 dma_pte_clear_range(domain, start_vpfn,
3137 start_vpfn + size - 1);
3138 /* free page tables */
3139 dma_pte_free_pagetable(domain, start_vpfn,
3140 start_vpfn + size - 1);
3142 __free_iova(&domain->iovad, iova);
3146 /* it's a non-present to present mapping. Only flush if caching mode */
3147 if (cap_caching_mode(iommu->cap))
3148 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3150 iommu_flush_write_buffer(iommu);
3155 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3160 struct dma_map_ops intel_dma_ops = {
3161 .alloc_coherent = intel_alloc_coherent,
3162 .free_coherent = intel_free_coherent,
3163 .map_sg = intel_map_sg,
3164 .unmap_sg = intel_unmap_sg,
3165 .map_page = intel_map_page,
3166 .unmap_page = intel_unmap_page,
3167 .mapping_error = intel_mapping_error,
3170 static inline int iommu_domain_cache_init(void)
3174 iommu_domain_cache = kmem_cache_create("iommu_domain",
3175 sizeof(struct dmar_domain),
3180 if (!iommu_domain_cache) {
3181 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3188 static inline int iommu_devinfo_cache_init(void)
3192 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3193 sizeof(struct device_domain_info),
3197 if (!iommu_devinfo_cache) {
3198 printk(KERN_ERR "Couldn't create devinfo cache\n");
3205 static inline int iommu_iova_cache_init(void)
3209 iommu_iova_cache = kmem_cache_create("iommu_iova",
3210 sizeof(struct iova),
3214 if (!iommu_iova_cache) {
3215 printk(KERN_ERR "Couldn't create iova cache\n");
3222 static int __init iommu_init_mempool(void)
3225 ret = iommu_iova_cache_init();
3229 ret = iommu_domain_cache_init();
3233 ret = iommu_devinfo_cache_init();
3237 kmem_cache_destroy(iommu_domain_cache);
3239 kmem_cache_destroy(iommu_iova_cache);
3244 static void __init iommu_exit_mempool(void)
3246 kmem_cache_destroy(iommu_devinfo_cache);
3247 kmem_cache_destroy(iommu_domain_cache);
3248 kmem_cache_destroy(iommu_iova_cache);
3252 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3254 struct dmar_drhd_unit *drhd;
3258 /* We know that this device on this chipset has its own IOMMU.
3259 * If we find it under a different IOMMU, then the BIOS is lying
3260 * to us. Hope that the IOMMU for this device is actually
3261 * disabled, and it needs no translation...
3263 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3265 /* "can't" happen */
3266 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3269 vtbar &= 0xffff0000;
3271 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3272 drhd = dmar_find_matched_drhd_unit(pdev);
3273 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3274 TAINT_FIRMWARE_WORKAROUND,
3275 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3276 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3278 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3280 static void __init init_no_remapping_devices(void)
3282 struct dmar_drhd_unit *drhd;
3284 for_each_drhd_unit(drhd) {
3285 if (!drhd->include_all) {
3287 for (i = 0; i < drhd->devices_cnt; i++)
3288 if (drhd->devices[i] != NULL)
3290 /* ignore DMAR unit if no pci devices exist */
3291 if (i == drhd->devices_cnt)
3296 for_each_drhd_unit(drhd) {
3298 if (drhd->ignored || drhd->include_all)
3301 for (i = 0; i < drhd->devices_cnt; i++)
3302 if (drhd->devices[i] &&
3303 !IS_GFX_DEVICE(drhd->devices[i]))
3306 if (i < drhd->devices_cnt)
3309 /* This IOMMU has *only* gfx devices. Either bypass it or
3310 set the gfx_mapped flag, as appropriate */
3312 intel_iommu_gfx_mapped = 1;
3315 for (i = 0; i < drhd->devices_cnt; i++) {
3316 if (!drhd->devices[i])
3318 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3324 #ifdef CONFIG_SUSPEND
3325 static int init_iommu_hw(void)
3327 struct dmar_drhd_unit *drhd;
3328 struct intel_iommu *iommu = NULL;
3330 for_each_active_iommu(iommu, drhd)
3332 dmar_reenable_qi(iommu);
3334 for_each_iommu(iommu, drhd) {
3335 if (drhd->ignored) {
3337 * we always have to disable PMRs or DMA may fail on
3341 iommu_disable_protect_mem_regions(iommu);
3345 iommu_flush_write_buffer(iommu);
3347 iommu_set_root_entry(iommu);
3349 iommu->flush.flush_context(iommu, 0, 0, 0,
3350 DMA_CCMD_GLOBAL_INVL);
3351 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3352 DMA_TLB_GLOBAL_FLUSH);
3353 if (iommu_enable_translation(iommu))
3355 iommu_disable_protect_mem_regions(iommu);
3361 static void iommu_flush_all(void)
3363 struct dmar_drhd_unit *drhd;
3364 struct intel_iommu *iommu;
3366 for_each_active_iommu(iommu, drhd) {
3367 iommu->flush.flush_context(iommu, 0, 0, 0,
3368 DMA_CCMD_GLOBAL_INVL);
3369 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3370 DMA_TLB_GLOBAL_FLUSH);
3374 static int iommu_suspend(void)
3376 struct dmar_drhd_unit *drhd;
3377 struct intel_iommu *iommu = NULL;
3380 for_each_active_iommu(iommu, drhd) {
3381 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3383 if (!iommu->iommu_state)
3389 for_each_active_iommu(iommu, drhd) {
3390 iommu_disable_translation(iommu);
3392 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3394 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3395 readl(iommu->reg + DMAR_FECTL_REG);
3396 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3397 readl(iommu->reg + DMAR_FEDATA_REG);
3398 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3399 readl(iommu->reg + DMAR_FEADDR_REG);
3400 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3401 readl(iommu->reg + DMAR_FEUADDR_REG);
3403 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3408 for_each_active_iommu(iommu, drhd)
3409 kfree(iommu->iommu_state);
3414 static void iommu_resume(void)
3416 struct dmar_drhd_unit *drhd;
3417 struct intel_iommu *iommu = NULL;
3420 if (init_iommu_hw()) {
3422 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3424 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3428 for_each_active_iommu(iommu, drhd) {
3430 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3432 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3433 iommu->reg + DMAR_FECTL_REG);
3434 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3435 iommu->reg + DMAR_FEDATA_REG);
3436 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3437 iommu->reg + DMAR_FEADDR_REG);
3438 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3439 iommu->reg + DMAR_FEUADDR_REG);
3441 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3444 for_each_active_iommu(iommu, drhd)
3445 kfree(iommu->iommu_state);
3448 static struct syscore_ops iommu_syscore_ops = {
3449 .resume = iommu_resume,
3450 .suspend = iommu_suspend,
3453 static void __init init_iommu_pm_ops(void)
3455 register_syscore_ops(&iommu_syscore_ops);
3459 static inline void init_iommu_pm_ops(void) {}
3460 #endif /* CONFIG_PM */
3462 LIST_HEAD(dmar_rmrr_units);
3464 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3466 list_add(&rmrr->list, &dmar_rmrr_units);
3470 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3472 struct acpi_dmar_reserved_memory *rmrr;
3473 struct dmar_rmrr_unit *rmrru;
3475 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3479 rmrru->hdr = header;
3480 rmrr = (struct acpi_dmar_reserved_memory *)header;
3481 rmrru->base_address = rmrr->base_address;
3482 rmrru->end_address = rmrr->end_address;
3484 dmar_register_rmrr_unit(rmrru);
3489 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3491 struct acpi_dmar_reserved_memory *rmrr;
3494 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3495 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3496 ((void *)rmrr) + rmrr->header.length,
3497 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3499 if (ret || (rmrru->devices_cnt == 0)) {
3500 list_del(&rmrru->list);
3506 static LIST_HEAD(dmar_atsr_units);
3508 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3510 struct acpi_dmar_atsr *atsr;
3511 struct dmar_atsr_unit *atsru;
3513 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3514 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3519 atsru->include_all = atsr->flags & 0x1;
3521 list_add(&atsru->list, &dmar_atsr_units);
3526 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3529 struct acpi_dmar_atsr *atsr;
3531 if (atsru->include_all)
3534 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3535 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3536 (void *)atsr + atsr->header.length,
3537 &atsru->devices_cnt, &atsru->devices,
3539 if (rc || !atsru->devices_cnt) {
3540 list_del(&atsru->list);
3547 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3550 struct pci_bus *bus;
3551 struct acpi_dmar_atsr *atsr;
3552 struct dmar_atsr_unit *atsru;
3554 dev = pci_physfn(dev);
3556 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3557 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3558 if (atsr->segment == pci_domain_nr(dev->bus))
3565 for (bus = dev->bus; bus; bus = bus->parent) {
3566 struct pci_dev *bridge = bus->self;
3568 if (!bridge || !pci_is_pcie(bridge) ||
3569 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3572 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3573 for (i = 0; i < atsru->devices_cnt; i++)
3574 if (atsru->devices[i] == bridge)
3580 if (atsru->include_all)
3586 int __init dmar_parse_rmrr_atsr_dev(void)
3588 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3589 struct dmar_atsr_unit *atsr, *atsr_n;
3592 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3593 ret = rmrr_parse_dev(rmrr);
3598 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3599 ret = atsr_parse_dev(atsr);
3608 * Here we only respond to action of unbound device from driver.
3610 * Added device is not attached to its DMAR domain here yet. That will happen
3611 * when mapping the device to iova.
3613 static int device_notifier(struct notifier_block *nb,
3614 unsigned long action, void *data)
3616 struct device *dev = data;
3617 struct pci_dev *pdev = to_pci_dev(dev);
3618 struct dmar_domain *domain;
3620 if (iommu_no_mapping(dev))
3623 domain = find_domain(pdev);
3627 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3628 domain_remove_one_dev_info(domain, pdev);
3630 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3631 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3632 list_empty(&domain->devices))
3633 domain_exit(domain);
3639 static struct notifier_block device_nb = {
3640 .notifier_call = device_notifier,
3643 int __init intel_iommu_init(void)
3646 struct dmar_drhd_unit *drhd;
3648 /* VT-d is required for a TXT/tboot launch, so enforce that */
3649 force_on = tboot_force_iommu();
3651 if (dmar_table_init()) {
3653 panic("tboot: Failed to initialize DMAR table\n");
3658 * Disable translation if already enabled prior to OS handover.
3660 for_each_drhd_unit(drhd) {
3661 struct intel_iommu *iommu;
3666 iommu = drhd->iommu;
3667 if (iommu->gcmd & DMA_GCMD_TE)
3668 iommu_disable_translation(iommu);
3671 if (dmar_dev_scope_init() < 0) {
3673 panic("tboot: Failed to initialize DMAR device scope\n");
3677 if (no_iommu || dmar_disabled)
3680 if (iommu_init_mempool()) {
3682 panic("tboot: Failed to initialize iommu memory\n");
3686 if (list_empty(&dmar_rmrr_units))
3687 printk(KERN_INFO "DMAR: No RMRR found\n");
3689 if (list_empty(&dmar_atsr_units))
3690 printk(KERN_INFO "DMAR: No ATSR found\n");
3692 if (dmar_init_reserved_ranges()) {
3694 panic("tboot: Failed to reserve iommu ranges\n");
3698 init_no_remapping_devices();
3703 panic("tboot: Failed to initialize DMARs\n");
3704 printk(KERN_ERR "IOMMU: dmar init failed\n");
3705 put_iova_domain(&reserved_iova_list);
3706 iommu_exit_mempool();
3710 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3712 init_timer(&unmap_timer);
3713 #ifdef CONFIG_SWIOTLB
3716 dma_ops = &intel_dma_ops;
3718 init_iommu_pm_ops();
3720 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3722 bus_register_notifier(&pci_bus_type, &device_nb);
3724 intel_iommu_enabled = 1;
3729 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3730 struct pci_dev *pdev)
3732 struct pci_dev *tmp, *parent;
3734 if (!iommu || !pdev)
3737 /* dependent device detach */
3738 tmp = pci_find_upstream_pcie_bridge(pdev);
3739 /* Secondary interface's bus number and devfn 0 */
3741 parent = pdev->bus->self;
3742 while (parent != tmp) {
3743 iommu_detach_dev(iommu, parent->bus->number,
3745 parent = parent->bus->self;
3747 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3748 iommu_detach_dev(iommu,
3749 tmp->subordinate->number, 0);
3750 else /* this is a legacy PCI bridge */
3751 iommu_detach_dev(iommu, tmp->bus->number,
3756 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3757 struct pci_dev *pdev)
3759 struct device_domain_info *info;
3760 struct intel_iommu *iommu;
3761 unsigned long flags;
3763 struct list_head *entry, *tmp;
3765 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3770 spin_lock_irqsave(&device_domain_lock, flags);
3771 list_for_each_safe(entry, tmp, &domain->devices) {
3772 info = list_entry(entry, struct device_domain_info, link);
3773 if (info->segment == pci_domain_nr(pdev->bus) &&
3774 info->bus == pdev->bus->number &&
3775 info->devfn == pdev->devfn) {
3776 list_del(&info->link);
3777 list_del(&info->global);
3779 info->dev->dev.archdata.iommu = NULL;
3780 spin_unlock_irqrestore(&device_domain_lock, flags);
3782 iommu_disable_dev_iotlb(info);
3783 iommu_detach_dev(iommu, info->bus, info->devfn);
3784 iommu_detach_dependent_devices(iommu, pdev);
3785 free_devinfo_mem(info);
3787 spin_lock_irqsave(&device_domain_lock, flags);
3795 /* if there is no other devices under the same iommu
3796 * owned by this domain, clear this iommu in iommu_bmp
3797 * update iommu count and coherency
3799 if (iommu == device_to_iommu(info->segment, info->bus,
3804 spin_unlock_irqrestore(&device_domain_lock, flags);
3807 unsigned long tmp_flags;
3808 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3809 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3810 domain->iommu_count--;
3811 domain_update_iommu_cap(domain);
3812 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3814 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3815 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3816 spin_lock_irqsave(&iommu->lock, tmp_flags);
3817 clear_bit(domain->id, iommu->domain_ids);
3818 iommu->domains[domain->id] = NULL;
3819 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3824 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3826 struct device_domain_info *info;
3827 struct intel_iommu *iommu;
3828 unsigned long flags1, flags2;
3830 spin_lock_irqsave(&device_domain_lock, flags1);
3831 while (!list_empty(&domain->devices)) {
3832 info = list_entry(domain->devices.next,
3833 struct device_domain_info, link);
3834 list_del(&info->link);
3835 list_del(&info->global);
3837 info->dev->dev.archdata.iommu = NULL;
3839 spin_unlock_irqrestore(&device_domain_lock, flags1);
3841 iommu_disable_dev_iotlb(info);
3842 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3843 iommu_detach_dev(iommu, info->bus, info->devfn);
3844 iommu_detach_dependent_devices(iommu, info->dev);
3846 /* clear this iommu in iommu_bmp, update iommu count
3849 spin_lock_irqsave(&domain->iommu_lock, flags2);
3850 if (test_and_clear_bit(iommu->seq_id,
3851 &domain->iommu_bmp)) {
3852 domain->iommu_count--;
3853 domain_update_iommu_cap(domain);
3855 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3857 free_devinfo_mem(info);
3858 spin_lock_irqsave(&device_domain_lock, flags1);
3860 spin_unlock_irqrestore(&device_domain_lock, flags1);
3863 /* domain id for virtual machine, it won't be set in context */
3864 static unsigned long vm_domid;
3866 static struct dmar_domain *iommu_alloc_vm_domain(void)
3868 struct dmar_domain *domain;
3870 domain = alloc_domain_mem();
3874 domain->id = vm_domid++;
3876 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3877 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3882 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3886 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3887 spin_lock_init(&domain->iommu_lock);
3889 domain_reserve_special_ranges(domain);
3891 /* calculate AGAW */
3892 domain->gaw = guest_width;
3893 adjust_width = guestwidth_to_adjustwidth(guest_width);
3894 domain->agaw = width_to_agaw(adjust_width);
3896 INIT_LIST_HEAD(&domain->devices);
3898 domain->iommu_count = 0;
3899 domain->iommu_coherency = 0;
3900 domain->iommu_snooping = 0;
3901 domain->iommu_superpage = 0;
3902 domain->max_addr = 0;
3905 /* always allocate the top pgd */
3906 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3909 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3913 static void iommu_free_vm_domain(struct dmar_domain *domain)
3915 unsigned long flags;
3916 struct dmar_drhd_unit *drhd;
3917 struct intel_iommu *iommu;
3919 unsigned long ndomains;
3921 for_each_drhd_unit(drhd) {
3924 iommu = drhd->iommu;
3926 ndomains = cap_ndoms(iommu->cap);
3927 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3928 if (iommu->domains[i] == domain) {
3929 spin_lock_irqsave(&iommu->lock, flags);
3930 clear_bit(i, iommu->domain_ids);
3931 iommu->domains[i] = NULL;
3932 spin_unlock_irqrestore(&iommu->lock, flags);
3939 static void vm_domain_exit(struct dmar_domain *domain)
3941 /* Domain 0 is reserved, so dont process it */
3945 vm_domain_remove_all_dev_info(domain);
3947 put_iova_domain(&domain->iovad);
3950 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3952 /* free page tables */
3953 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3955 iommu_free_vm_domain(domain);
3956 free_domain_mem(domain);
3959 static int intel_iommu_domain_init(struct iommu_domain *domain)
3961 struct dmar_domain *dmar_domain;
3963 dmar_domain = iommu_alloc_vm_domain();
3966 "intel_iommu_domain_init: dmar_domain == NULL\n");
3969 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3971 "intel_iommu_domain_init() failed\n");
3972 vm_domain_exit(dmar_domain);
3975 domain_update_iommu_cap(dmar_domain);
3976 domain->priv = dmar_domain;
3981 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3983 struct dmar_domain *dmar_domain = domain->priv;
3985 domain->priv = NULL;
3986 vm_domain_exit(dmar_domain);
3989 static int intel_iommu_attach_device(struct iommu_domain *domain,
3992 struct dmar_domain *dmar_domain = domain->priv;
3993 struct pci_dev *pdev = to_pci_dev(dev);
3994 struct intel_iommu *iommu;
3997 if (device_is_rmrr_locked(pdev)) {
3998 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4002 /* normally pdev is not mapped */
4003 if (unlikely(domain_context_mapped(pdev))) {
4004 struct dmar_domain *old_domain;
4006 old_domain = find_domain(pdev);
4008 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4009 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4010 domain_remove_one_dev_info(old_domain, pdev);
4012 domain_remove_dev_info(old_domain);
4016 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4021 /* check if this iommu agaw is sufficient for max mapped address */
4022 addr_width = agaw_to_width(iommu->agaw);
4023 if (addr_width > cap_mgaw(iommu->cap))
4024 addr_width = cap_mgaw(iommu->cap);
4026 if (dmar_domain->max_addr > (1LL << addr_width)) {
4027 printk(KERN_ERR "%s: iommu width (%d) is not "
4028 "sufficient for the mapped address (%llx)\n",
4029 __func__, addr_width, dmar_domain->max_addr);
4032 dmar_domain->gaw = addr_width;
4035 * Knock out extra levels of page tables if necessary
4037 while (iommu->agaw < dmar_domain->agaw) {
4038 struct dma_pte *pte;
4040 pte = dmar_domain->pgd;
4041 if (dma_pte_present(pte)) {
4042 dmar_domain->pgd = (struct dma_pte *)
4043 phys_to_virt(dma_pte_addr(pte));
4044 free_pgtable_page(pte);
4046 dmar_domain->agaw--;
4049 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
4052 static void intel_iommu_detach_device(struct iommu_domain *domain,
4055 struct dmar_domain *dmar_domain = domain->priv;
4056 struct pci_dev *pdev = to_pci_dev(dev);
4058 domain_remove_one_dev_info(dmar_domain, pdev);
4061 static int intel_iommu_map(struct iommu_domain *domain,
4062 unsigned long iova, phys_addr_t hpa,
4063 int gfp_order, int iommu_prot)
4065 struct dmar_domain *dmar_domain = domain->priv;
4071 if (iommu_prot & IOMMU_READ)
4072 prot |= DMA_PTE_READ;
4073 if (iommu_prot & IOMMU_WRITE)
4074 prot |= DMA_PTE_WRITE;
4075 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4076 prot |= DMA_PTE_SNP;
4078 size = PAGE_SIZE << gfp_order;
4079 max_addr = iova + size;
4080 if (dmar_domain->max_addr < max_addr) {
4083 /* check if minimum agaw is sufficient for mapped address */
4084 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4085 if (end < max_addr) {
4086 printk(KERN_ERR "%s: iommu width (%d) is not "
4087 "sufficient for the mapped address (%llx)\n",
4088 __func__, dmar_domain->gaw, max_addr);
4091 dmar_domain->max_addr = max_addr;
4093 /* Round up size to next multiple of PAGE_SIZE, if it and
4094 the low bits of hpa would take us onto the next page */
4095 size = aligned_nrpages(hpa, size);
4096 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4097 hpa >> VTD_PAGE_SHIFT, size, prot);
4101 static int intel_iommu_unmap(struct iommu_domain *domain,
4102 unsigned long iova, int gfp_order)
4104 struct dmar_domain *dmar_domain = domain->priv;
4105 size_t size = PAGE_SIZE << gfp_order;
4106 int order, iommu_id;
4108 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4109 (iova + size - 1) >> VTD_PAGE_SHIFT);
4111 if (dmar_domain->max_addr == iova + size)
4112 dmar_domain->max_addr = iova;
4114 for_each_set_bit(iommu_id, &dmar_domain->iommu_bmp, g_num_of_iommus) {
4115 struct intel_iommu *iommu = g_iommus[iommu_id];
4119 * find bit position of dmar_domain
4121 ndomains = cap_ndoms(iommu->cap);
4122 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4123 if (iommu->domains[num] == dmar_domain)
4124 iommu_flush_iotlb_psi(iommu, num,
4125 iova >> VTD_PAGE_SHIFT,
4133 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4136 struct dmar_domain *dmar_domain = domain->priv;
4137 struct dma_pte *pte;
4140 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4142 phys = dma_pte_addr(pte);
4147 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4150 struct dmar_domain *dmar_domain = domain->priv;
4152 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4153 return dmar_domain->iommu_snooping;
4154 if (cap == IOMMU_CAP_INTR_REMAP)
4155 return intr_remapping_enabled;
4160 static struct iommu_ops intel_iommu_ops = {
4161 .domain_init = intel_iommu_domain_init,
4162 .domain_destroy = intel_iommu_domain_destroy,
4163 .attach_dev = intel_iommu_attach_device,
4164 .detach_dev = intel_iommu_detach_device,
4165 .map = intel_iommu_map,
4166 .unmap = intel_iommu_unmap,
4167 .iova_to_phys = intel_iommu_iova_to_phys,
4168 .domain_has_cap = intel_iommu_domain_has_cap,
4171 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4173 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4174 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4178 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4179 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4180 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4181 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4182 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4183 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4184 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4186 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4189 * Mobile 4 Series Chipset neglects to set RWBF capability,
4190 * but needs it. Same seems to hold for the desktop versions.
4192 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4196 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4197 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4198 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4199 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4200 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4201 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4202 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4205 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4206 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4207 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4208 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4209 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4210 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4211 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4212 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4214 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4218 if (pci_read_config_word(dev, GGC, &ggc))
4221 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4222 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4224 } else if (dmar_map_gfx) {
4225 /* we have to ensure the gfx device is idle before we flush */
4226 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4227 intel_iommu_strict = 1;
4230 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4231 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4232 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4233 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4235 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4236 ISOCH DMAR unit for the Azalia sound device, but not give it any
4237 TLB entries, which causes it to deadlock. Check for that. We do
4238 this in a function called from init_dmars(), instead of in a PCI
4239 quirk, because we don't want to print the obnoxious "BIOS broken"
4240 message if VT-d is actually disabled.
4242 static void __init check_tylersburg_isoch(void)
4244 struct pci_dev *pdev;
4245 uint32_t vtisochctrl;
4247 /* If there's no Azalia in the system anyway, forget it. */
4248 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4253 /* System Management Registers. Might be hidden, in which case
4254 we can't do the sanity check. But that's OK, because the
4255 known-broken BIOSes _don't_ actually hide it, so far. */
4256 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4260 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4267 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4268 if (vtisochctrl & 1)
4271 /* Drop all bits other than the number of TLB entries */
4272 vtisochctrl &= 0x1c;
4274 /* If we have the recommended number of TLB entries (16), fine. */
4275 if (vtisochctrl == 0x10)
4278 /* Zero TLB entries? You get to ride the short bus to school. */
4280 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4281 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4282 dmi_get_system_info(DMI_BIOS_VENDOR),
4283 dmi_get_system_info(DMI_BIOS_VERSION),
4284 dmi_get_system_info(DMI_PRODUCT_VERSION));
4285 iommu_identity_mapping |= IDENTMAP_AZALIA;
4289 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",