2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
60 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
61 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
62 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
64 #ifndef PHYSICAL_PAGE_MASK
65 #define PHYSICAL_PAGE_MASK PAGE_MASK
68 /* global iommu list, set NULL for ignored DMAR units */
69 static struct intel_iommu **g_iommus;
71 static int rwbf_quirk;
76 * 12-63: Context Ptr (12 - (haw-1))
83 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
84 static inline bool root_present(struct root_entry *root)
86 return (root->val & 1);
88 static inline void set_root_present(struct root_entry *root)
92 static inline void set_root_value(struct root_entry *root, unsigned long value)
94 root->val |= value & VTD_PAGE_MASK;
97 static inline struct context_entry *
98 get_context_addr_from_root(struct root_entry *root)
100 return (struct context_entry *)
101 (root_present(root)?phys_to_virt(
102 root->val & VTD_PAGE_MASK) :
109 * 1: fault processing disable
110 * 2-3: translation type
111 * 12-63: address space root
117 struct context_entry {
122 static inline bool context_present(struct context_entry *context)
124 return (context->lo & 1);
126 static inline void context_set_present(struct context_entry *context)
131 static inline void context_set_fault_enable(struct context_entry *context)
133 context->lo &= (((u64)-1) << 2) | 1;
136 static inline void context_set_translation_type(struct context_entry *context,
139 context->lo &= (((u64)-1) << 4) | 3;
140 context->lo |= (value & 3) << 2;
143 static inline void context_set_address_root(struct context_entry *context,
146 context->lo |= value & VTD_PAGE_MASK;
149 static inline void context_set_address_width(struct context_entry *context,
152 context->hi |= value & 7;
155 static inline void context_set_domain_id(struct context_entry *context,
158 context->hi |= (value & ((1 << 16) - 1)) << 8;
161 static inline void context_clear_entry(struct context_entry *context)
174 * 12-63: Host physcial address
180 static inline void dma_clear_pte(struct dma_pte *pte)
185 static inline void dma_set_pte_readable(struct dma_pte *pte)
187 pte->val |= DMA_PTE_READ;
190 static inline void dma_set_pte_writable(struct dma_pte *pte)
192 pte->val |= DMA_PTE_WRITE;
195 static inline void dma_set_pte_snp(struct dma_pte *pte)
197 pte->val |= DMA_PTE_SNP;
200 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
202 pte->val = (pte->val & ~3) | (prot & 3);
205 static inline u64 dma_pte_addr(struct dma_pte *pte)
207 return (pte->val & VTD_PAGE_MASK);
210 static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
212 pte->val |= (addr & VTD_PAGE_MASK);
215 static inline bool dma_pte_present(struct dma_pte *pte)
217 return (pte->val & 3) != 0;
220 /* devices under the same p2p bridge are owned in one domain */
221 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
223 /* domain represents a virtual machine, more than one devices
224 * across iommus may be owned in one domain, e.g. kvm guest.
226 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
229 int id; /* domain id */
230 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
232 struct list_head devices; /* all devices' list */
233 struct iova_domain iovad; /* iova's that belong to this domain */
235 struct dma_pte *pgd; /* virtual address */
236 spinlock_t mapping_lock; /* page table lock */
237 int gaw; /* max guest address width */
239 /* adjusted guest address width, 0 is level 2 30-bit */
242 int flags; /* flags to find out type of domain */
244 int iommu_coherency;/* indicate coherency of iommu access */
245 int iommu_snooping; /* indicate snooping control feature*/
246 int iommu_count; /* reference count of iommu */
247 spinlock_t iommu_lock; /* protect iommu set in domain */
248 u64 max_addr; /* maximum mapped address */
251 /* PCI domain-device relationship */
252 struct device_domain_info {
253 struct list_head link; /* link to domain siblings */
254 struct list_head global; /* link to global list */
255 int segment; /* PCI domain */
256 u8 bus; /* PCI bus number */
257 u8 devfn; /* PCI devfn number */
258 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
259 struct intel_iommu *iommu; /* IOMMU used by this device */
260 struct dmar_domain *domain; /* pointer to domain */
263 static void flush_unmaps_timeout(unsigned long data);
265 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
267 #define HIGH_WATER_MARK 250
268 struct deferred_flush_tables {
270 struct iova *iova[HIGH_WATER_MARK];
271 struct dmar_domain *domain[HIGH_WATER_MARK];
274 static struct deferred_flush_tables *deferred_flush;
276 /* bitmap for indexing intel_iommus */
277 static int g_num_of_iommus;
279 static DEFINE_SPINLOCK(async_umap_flush_lock);
280 static LIST_HEAD(unmaps_to_do);
283 static long list_size;
285 static void domain_remove_dev_info(struct dmar_domain *domain);
287 #ifdef CONFIG_DMAR_DEFAULT_ON
288 int dmar_disabled = 0;
290 int dmar_disabled = 1;
291 #endif /*CONFIG_DMAR_DEFAULT_ON*/
293 static int __initdata dmar_map_gfx = 1;
294 static int dmar_forcedac;
295 static int intel_iommu_strict;
297 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
298 static DEFINE_SPINLOCK(device_domain_lock);
299 static LIST_HEAD(device_domain_list);
301 static struct iommu_ops intel_iommu_ops;
303 static int __init intel_iommu_setup(char *str)
308 if (!strncmp(str, "on", 2)) {
310 printk(KERN_INFO "Intel-IOMMU: enabled\n");
311 } else if (!strncmp(str, "off", 3)) {
313 printk(KERN_INFO "Intel-IOMMU: disabled\n");
314 } else if (!strncmp(str, "igfx_off", 8)) {
317 "Intel-IOMMU: disable GFX device mapping\n");
318 } else if (!strncmp(str, "forcedac", 8)) {
320 "Intel-IOMMU: Forcing DAC for PCI devices\n");
322 } else if (!strncmp(str, "strict", 6)) {
324 "Intel-IOMMU: disable batched IOTLB flush\n");
325 intel_iommu_strict = 1;
328 str += strcspn(str, ",");
334 __setup("intel_iommu=", intel_iommu_setup);
336 static struct kmem_cache *iommu_domain_cache;
337 static struct kmem_cache *iommu_devinfo_cache;
338 static struct kmem_cache *iommu_iova_cache;
340 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
345 /* trying to avoid low memory issues */
346 flags = current->flags & PF_MEMALLOC;
347 current->flags |= PF_MEMALLOC;
348 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
349 current->flags &= (~PF_MEMALLOC | flags);
354 static inline void *alloc_pgtable_page(void)
359 /* trying to avoid low memory issues */
360 flags = current->flags & PF_MEMALLOC;
361 current->flags |= PF_MEMALLOC;
362 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
363 current->flags &= (~PF_MEMALLOC | flags);
367 static inline void free_pgtable_page(void *vaddr)
369 free_page((unsigned long)vaddr);
372 static inline void *alloc_domain_mem(void)
374 return iommu_kmem_cache_alloc(iommu_domain_cache);
377 static void free_domain_mem(void *vaddr)
379 kmem_cache_free(iommu_domain_cache, vaddr);
382 static inline void * alloc_devinfo_mem(void)
384 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
387 static inline void free_devinfo_mem(void *vaddr)
389 kmem_cache_free(iommu_devinfo_cache, vaddr);
392 struct iova *alloc_iova_mem(void)
394 return iommu_kmem_cache_alloc(iommu_iova_cache);
397 void free_iova_mem(struct iova *iova)
399 kmem_cache_free(iommu_iova_cache, iova);
403 static inline int width_to_agaw(int width);
405 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
410 sagaw = cap_sagaw(iommu->cap);
411 for (agaw = width_to_agaw(max_gaw);
413 if (test_bit(agaw, &sagaw))
421 * Calculate max SAGAW for each iommu.
423 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
425 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
429 * calculate agaw for each iommu.
430 * "SAGAW" may be different across iommus, use a default agaw, and
431 * get a supported less agaw for iommus that don't support the default agaw.
433 int iommu_calculate_agaw(struct intel_iommu *iommu)
435 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
438 /* in native case, each domain is related to only one iommu */
439 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
443 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
445 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
446 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
449 return g_iommus[iommu_id];
452 static void domain_update_iommu_coherency(struct dmar_domain *domain)
456 domain->iommu_coherency = 1;
458 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
459 for (; i < g_num_of_iommus; ) {
460 if (!ecap_coherent(g_iommus[i]->ecap)) {
461 domain->iommu_coherency = 0;
464 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
468 static void domain_update_iommu_snooping(struct dmar_domain *domain)
472 domain->iommu_snooping = 1;
474 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
475 for (; i < g_num_of_iommus; ) {
476 if (!ecap_sc_support(g_iommus[i]->ecap)) {
477 domain->iommu_snooping = 0;
480 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
484 /* Some capabilities may be different across iommus */
485 static void domain_update_iommu_cap(struct dmar_domain *domain)
487 domain_update_iommu_coherency(domain);
488 domain_update_iommu_snooping(domain);
491 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
493 struct dmar_drhd_unit *drhd = NULL;
496 for_each_drhd_unit(drhd) {
499 if (segment != drhd->segment)
502 for (i = 0; i < drhd->devices_cnt; i++) {
503 if (drhd->devices[i] &&
504 drhd->devices[i]->bus->number == bus &&
505 drhd->devices[i]->devfn == devfn)
507 if (drhd->devices[i] &&
508 drhd->devices[i]->subordinate &&
509 drhd->devices[i]->subordinate->number <= bus &&
510 drhd->devices[i]->subordinate->subordinate >= bus)
514 if (drhd->include_all)
521 static void domain_flush_cache(struct dmar_domain *domain,
522 void *addr, int size)
524 if (!domain->iommu_coherency)
525 clflush_cache_range(addr, size);
528 /* Gets context entry for a given bus and devfn */
529 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
532 struct root_entry *root;
533 struct context_entry *context;
534 unsigned long phy_addr;
537 spin_lock_irqsave(&iommu->lock, flags);
538 root = &iommu->root_entry[bus];
539 context = get_context_addr_from_root(root);
541 context = (struct context_entry *)alloc_pgtable_page();
543 spin_unlock_irqrestore(&iommu->lock, flags);
546 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
547 phy_addr = virt_to_phys((void *)context);
548 set_root_value(root, phy_addr);
549 set_root_present(root);
550 __iommu_flush_cache(iommu, root, sizeof(*root));
552 spin_unlock_irqrestore(&iommu->lock, flags);
553 return &context[devfn];
556 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
558 struct root_entry *root;
559 struct context_entry *context;
563 spin_lock_irqsave(&iommu->lock, flags);
564 root = &iommu->root_entry[bus];
565 context = get_context_addr_from_root(root);
570 ret = context_present(&context[devfn]);
572 spin_unlock_irqrestore(&iommu->lock, flags);
576 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
578 struct root_entry *root;
579 struct context_entry *context;
582 spin_lock_irqsave(&iommu->lock, flags);
583 root = &iommu->root_entry[bus];
584 context = get_context_addr_from_root(root);
586 context_clear_entry(&context[devfn]);
587 __iommu_flush_cache(iommu, &context[devfn], \
590 spin_unlock_irqrestore(&iommu->lock, flags);
593 static void free_context_table(struct intel_iommu *iommu)
595 struct root_entry *root;
598 struct context_entry *context;
600 spin_lock_irqsave(&iommu->lock, flags);
601 if (!iommu->root_entry) {
604 for (i = 0; i < ROOT_ENTRY_NR; i++) {
605 root = &iommu->root_entry[i];
606 context = get_context_addr_from_root(root);
608 free_pgtable_page(context);
610 free_pgtable_page(iommu->root_entry);
611 iommu->root_entry = NULL;
613 spin_unlock_irqrestore(&iommu->lock, flags);
616 /* page table handling */
617 #define LEVEL_STRIDE (9)
618 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
620 static inline int agaw_to_level(int agaw)
625 static inline int agaw_to_width(int agaw)
627 return 30 + agaw * LEVEL_STRIDE;
631 static inline int width_to_agaw(int width)
633 return (width - 30) / LEVEL_STRIDE;
636 static inline unsigned int level_to_offset_bits(int level)
638 return (12 + (level - 1) * LEVEL_STRIDE);
641 static inline int address_level_offset(u64 addr, int level)
643 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
646 static inline u64 level_mask(int level)
648 return ((u64)-1 << level_to_offset_bits(level));
651 static inline u64 level_size(int level)
653 return ((u64)1 << level_to_offset_bits(level));
656 static inline u64 align_to_level(u64 addr, int level)
658 return ((addr + level_size(level) - 1) & level_mask(level));
661 static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
663 int addr_width = agaw_to_width(domain->agaw);
664 struct dma_pte *parent, *pte = NULL;
665 int level = agaw_to_level(domain->agaw);
669 BUG_ON(!domain->pgd);
671 addr &= (((u64)1) << addr_width) - 1;
672 parent = domain->pgd;
674 spin_lock_irqsave(&domain->mapping_lock, flags);
678 offset = address_level_offset(addr, level);
679 pte = &parent[offset];
683 if (!dma_pte_present(pte)) {
684 tmp_page = alloc_pgtable_page();
687 spin_unlock_irqrestore(&domain->mapping_lock,
691 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
692 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
694 * high level table always sets r/w, last level page
695 * table control read/write
697 dma_set_pte_readable(pte);
698 dma_set_pte_writable(pte);
699 domain_flush_cache(domain, pte, sizeof(*pte));
701 parent = phys_to_virt(dma_pte_addr(pte));
705 spin_unlock_irqrestore(&domain->mapping_lock, flags);
709 /* return address's pte at specific level */
710 static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
713 struct dma_pte *parent, *pte = NULL;
714 int total = agaw_to_level(domain->agaw);
717 parent = domain->pgd;
718 while (level <= total) {
719 offset = address_level_offset(addr, total);
720 pte = &parent[offset];
724 if (!dma_pte_present(pte))
726 parent = phys_to_virt(dma_pte_addr(pte));
732 /* clear one page's page table */
733 static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
735 struct dma_pte *pte = NULL;
737 /* get last level pte */
738 pte = dma_addr_level_pte(domain, addr, 1);
742 domain_flush_cache(domain, pte, sizeof(*pte));
746 /* clear last level pte, a tlb flush should be followed */
747 static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
749 int addr_width = agaw_to_width(domain->agaw);
752 start &= (((u64)1) << addr_width) - 1;
753 end &= (((u64)1) << addr_width) - 1;
754 /* in case it's partial page */
756 end = PAGE_ALIGN(end);
757 npages = (end - start) / VTD_PAGE_SIZE;
759 /* we don't need lock here, nobody else touches the iova range */
761 dma_pte_clear_one(domain, start);
762 start += VTD_PAGE_SIZE;
766 /* free page table pages. last level pte should already be cleared */
767 static void dma_pte_free_pagetable(struct dmar_domain *domain,
770 int addr_width = agaw_to_width(domain->agaw);
772 int total = agaw_to_level(domain->agaw);
776 start &= (((u64)1) << addr_width) - 1;
777 end &= (((u64)1) << addr_width) - 1;
779 /* we don't need lock here, nobody else touches the iova range */
781 while (level <= total) {
782 tmp = align_to_level(start, level);
783 if (tmp >= end || (tmp + level_size(level) > end))
787 pte = dma_addr_level_pte(domain, tmp, level);
790 phys_to_virt(dma_pte_addr(pte)));
792 domain_flush_cache(domain, pte, sizeof(*pte));
794 tmp += level_size(level);
799 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
800 free_pgtable_page(domain->pgd);
806 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
808 struct root_entry *root;
811 root = (struct root_entry *)alloc_pgtable_page();
815 __iommu_flush_cache(iommu, root, ROOT_SIZE);
817 spin_lock_irqsave(&iommu->lock, flags);
818 iommu->root_entry = root;
819 spin_unlock_irqrestore(&iommu->lock, flags);
824 static void iommu_set_root_entry(struct intel_iommu *iommu)
830 addr = iommu->root_entry;
832 spin_lock_irqsave(&iommu->register_lock, flag);
833 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
835 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
837 /* Make sure hardware complete it */
838 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
839 readl, (sts & DMA_GSTS_RTPS), sts);
841 spin_unlock_irqrestore(&iommu->register_lock, flag);
844 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
849 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
852 spin_lock_irqsave(&iommu->register_lock, flag);
853 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
855 /* Make sure hardware complete it */
856 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
857 readl, (!(val & DMA_GSTS_WBFS)), val);
859 spin_unlock_irqrestore(&iommu->register_lock, flag);
862 /* return value determine if we need a write buffer flush */
863 static void __iommu_flush_context(struct intel_iommu *iommu,
864 u16 did, u16 source_id, u8 function_mask,
871 case DMA_CCMD_GLOBAL_INVL:
872 val = DMA_CCMD_GLOBAL_INVL;
874 case DMA_CCMD_DOMAIN_INVL:
875 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
877 case DMA_CCMD_DEVICE_INVL:
878 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
879 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
886 spin_lock_irqsave(&iommu->register_lock, flag);
887 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
889 /* Make sure hardware complete it */
890 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
891 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
893 spin_unlock_irqrestore(&iommu->register_lock, flag);
896 /* return value determine if we need a write buffer flush */
897 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
898 u64 addr, unsigned int size_order, u64 type)
900 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
901 u64 val = 0, val_iva = 0;
905 case DMA_TLB_GLOBAL_FLUSH:
906 /* global flush doesn't need set IVA_REG */
907 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
909 case DMA_TLB_DSI_FLUSH:
910 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
912 case DMA_TLB_PSI_FLUSH:
913 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
914 /* Note: always flush non-leaf currently */
915 val_iva = size_order | addr;
920 /* Note: set drain read/write */
923 * This is probably to be super secure.. Looks like we can
924 * ignore it without any impact.
926 if (cap_read_drain(iommu->cap))
927 val |= DMA_TLB_READ_DRAIN;
929 if (cap_write_drain(iommu->cap))
930 val |= DMA_TLB_WRITE_DRAIN;
932 spin_lock_irqsave(&iommu->register_lock, flag);
933 /* Note: Only uses first TLB reg currently */
935 dmar_writeq(iommu->reg + tlb_offset, val_iva);
936 dmar_writeq(iommu->reg + tlb_offset + 8, val);
938 /* Make sure hardware complete it */
939 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
940 dmar_readq, (!(val & DMA_TLB_IVT)), val);
942 spin_unlock_irqrestore(&iommu->register_lock, flag);
944 /* check IOTLB invalidation granularity */
945 if (DMA_TLB_IAIG(val) == 0)
946 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
947 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
948 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
949 (unsigned long long)DMA_TLB_IIRG(type),
950 (unsigned long long)DMA_TLB_IAIG(val));
953 static struct device_domain_info *iommu_support_dev_iotlb(
954 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
958 struct device_domain_info *info;
959 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
961 if (!ecap_dev_iotlb_support(iommu->ecap))
967 spin_lock_irqsave(&device_domain_lock, flags);
968 list_for_each_entry(info, &domain->devices, link)
969 if (info->bus == bus && info->devfn == devfn) {
973 spin_unlock_irqrestore(&device_domain_lock, flags);
975 if (!found || !info->dev)
978 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
981 if (!dmar_find_matched_atsr_unit(info->dev))
989 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
994 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
997 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
999 if (!info->dev || !pci_ats_enabled(info->dev))
1002 pci_disable_ats(info->dev);
1005 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1006 u64 addr, unsigned mask)
1009 unsigned long flags;
1010 struct device_domain_info *info;
1012 spin_lock_irqsave(&device_domain_lock, flags);
1013 list_for_each_entry(info, &domain->devices, link) {
1014 if (!info->dev || !pci_ats_enabled(info->dev))
1017 sid = info->bus << 8 | info->devfn;
1018 qdep = pci_ats_queue_depth(info->dev);
1019 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1021 spin_unlock_irqrestore(&device_domain_lock, flags);
1024 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1025 u64 addr, unsigned int pages)
1027 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1029 BUG_ON(addr & (~VTD_PAGE_MASK));
1033 * Fallback to domain selective flush if no PSI support or the size is
1035 * PSI requires page size to be 2 ^ x, and the base address is naturally
1036 * aligned to the size
1038 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1039 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1042 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1045 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1048 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1051 unsigned long flags;
1053 spin_lock_irqsave(&iommu->register_lock, flags);
1054 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1055 pmen &= ~DMA_PMEN_EPM;
1056 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1058 /* wait for the protected region status bit to clear */
1059 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1060 readl, !(pmen & DMA_PMEN_PRS), pmen);
1062 spin_unlock_irqrestore(&iommu->register_lock, flags);
1065 static int iommu_enable_translation(struct intel_iommu *iommu)
1068 unsigned long flags;
1070 spin_lock_irqsave(&iommu->register_lock, flags);
1071 iommu->gcmd |= DMA_GCMD_TE;
1072 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1074 /* Make sure hardware complete it */
1075 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1076 readl, (sts & DMA_GSTS_TES), sts);
1078 spin_unlock_irqrestore(&iommu->register_lock, flags);
1082 static int iommu_disable_translation(struct intel_iommu *iommu)
1087 spin_lock_irqsave(&iommu->register_lock, flag);
1088 iommu->gcmd &= ~DMA_GCMD_TE;
1089 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1091 /* Make sure hardware complete it */
1092 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1093 readl, (!(sts & DMA_GSTS_TES)), sts);
1095 spin_unlock_irqrestore(&iommu->register_lock, flag);
1100 static int iommu_init_domains(struct intel_iommu *iommu)
1102 unsigned long ndomains;
1103 unsigned long nlongs;
1105 ndomains = cap_ndoms(iommu->cap);
1106 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1107 nlongs = BITS_TO_LONGS(ndomains);
1109 /* TBD: there might be 64K domains,
1110 * consider other allocation for future chip
1112 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1113 if (!iommu->domain_ids) {
1114 printk(KERN_ERR "Allocating domain id array failed\n");
1117 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1119 if (!iommu->domains) {
1120 printk(KERN_ERR "Allocating domain array failed\n");
1121 kfree(iommu->domain_ids);
1125 spin_lock_init(&iommu->lock);
1128 * if Caching mode is set, then invalid translations are tagged
1129 * with domainid 0. Hence we need to pre-allocate it.
1131 if (cap_caching_mode(iommu->cap))
1132 set_bit(0, iommu->domain_ids);
1137 static void domain_exit(struct dmar_domain *domain);
1138 static void vm_domain_exit(struct dmar_domain *domain);
1140 void free_dmar_iommu(struct intel_iommu *iommu)
1142 struct dmar_domain *domain;
1144 unsigned long flags;
1146 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1147 for (; i < cap_ndoms(iommu->cap); ) {
1148 domain = iommu->domains[i];
1149 clear_bit(i, iommu->domain_ids);
1151 spin_lock_irqsave(&domain->iommu_lock, flags);
1152 if (--domain->iommu_count == 0) {
1153 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1154 vm_domain_exit(domain);
1156 domain_exit(domain);
1158 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1160 i = find_next_bit(iommu->domain_ids,
1161 cap_ndoms(iommu->cap), i+1);
1164 if (iommu->gcmd & DMA_GCMD_TE)
1165 iommu_disable_translation(iommu);
1168 set_irq_data(iommu->irq, NULL);
1169 /* This will mask the irq */
1170 free_irq(iommu->irq, iommu);
1171 destroy_irq(iommu->irq);
1174 kfree(iommu->domains);
1175 kfree(iommu->domain_ids);
1177 g_iommus[iommu->seq_id] = NULL;
1179 /* if all iommus are freed, free g_iommus */
1180 for (i = 0; i < g_num_of_iommus; i++) {
1185 if (i == g_num_of_iommus)
1188 /* free context mapping */
1189 free_context_table(iommu);
1192 static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1195 unsigned long ndomains;
1196 struct dmar_domain *domain;
1197 unsigned long flags;
1199 domain = alloc_domain_mem();
1203 ndomains = cap_ndoms(iommu->cap);
1205 spin_lock_irqsave(&iommu->lock, flags);
1206 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1207 if (num >= ndomains) {
1208 spin_unlock_irqrestore(&iommu->lock, flags);
1209 free_domain_mem(domain);
1210 printk(KERN_ERR "IOMMU: no free domain ids\n");
1214 set_bit(num, iommu->domain_ids);
1216 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1217 set_bit(iommu->seq_id, &domain->iommu_bmp);
1219 iommu->domains[num] = domain;
1220 spin_unlock_irqrestore(&iommu->lock, flags);
1225 static void iommu_free_domain(struct dmar_domain *domain)
1227 unsigned long flags;
1228 struct intel_iommu *iommu;
1230 iommu = domain_get_iommu(domain);
1232 spin_lock_irqsave(&iommu->lock, flags);
1233 clear_bit(domain->id, iommu->domain_ids);
1234 spin_unlock_irqrestore(&iommu->lock, flags);
1237 static struct iova_domain reserved_iova_list;
1238 static struct lock_class_key reserved_alloc_key;
1239 static struct lock_class_key reserved_rbtree_key;
1241 static void dmar_init_reserved_ranges(void)
1243 struct pci_dev *pdev = NULL;
1248 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1250 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1251 &reserved_alloc_key);
1252 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1253 &reserved_rbtree_key);
1255 /* IOAPIC ranges shouldn't be accessed by DMA */
1256 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1257 IOVA_PFN(IOAPIC_RANGE_END));
1259 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1261 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1262 for_each_pci_dev(pdev) {
1265 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1266 r = &pdev->resource[i];
1267 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1270 addr &= PHYSICAL_PAGE_MASK;
1271 size = r->end - addr;
1272 size = PAGE_ALIGN(size);
1273 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1274 IOVA_PFN(size + addr) - 1);
1276 printk(KERN_ERR "Reserve iova failed\n");
1282 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1284 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1287 static inline int guestwidth_to_adjustwidth(int gaw)
1290 int r = (gaw - 12) % 9;
1301 static int domain_init(struct dmar_domain *domain, int guest_width)
1303 struct intel_iommu *iommu;
1304 int adjust_width, agaw;
1305 unsigned long sagaw;
1307 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1308 spin_lock_init(&domain->mapping_lock);
1309 spin_lock_init(&domain->iommu_lock);
1311 domain_reserve_special_ranges(domain);
1313 /* calculate AGAW */
1314 iommu = domain_get_iommu(domain);
1315 if (guest_width > cap_mgaw(iommu->cap))
1316 guest_width = cap_mgaw(iommu->cap);
1317 domain->gaw = guest_width;
1318 adjust_width = guestwidth_to_adjustwidth(guest_width);
1319 agaw = width_to_agaw(adjust_width);
1320 sagaw = cap_sagaw(iommu->cap);
1321 if (!test_bit(agaw, &sagaw)) {
1322 /* hardware doesn't support it, choose a bigger one */
1323 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1324 agaw = find_next_bit(&sagaw, 5, agaw);
1328 domain->agaw = agaw;
1329 INIT_LIST_HEAD(&domain->devices);
1331 if (ecap_coherent(iommu->ecap))
1332 domain->iommu_coherency = 1;
1334 domain->iommu_coherency = 0;
1336 if (ecap_sc_support(iommu->ecap))
1337 domain->iommu_snooping = 1;
1339 domain->iommu_snooping = 0;
1341 domain->iommu_count = 1;
1343 /* always allocate the top pgd */
1344 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1347 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1351 static void domain_exit(struct dmar_domain *domain)
1355 /* Domain 0 is reserved, so dont process it */
1359 domain_remove_dev_info(domain);
1361 put_iova_domain(&domain->iovad);
1362 end = DOMAIN_MAX_ADDR(domain->gaw);
1363 end = end & (~PAGE_MASK);
1366 dma_pte_clear_range(domain, 0, end);
1368 /* free page tables */
1369 dma_pte_free_pagetable(domain, 0, end);
1371 iommu_free_domain(domain);
1372 free_domain_mem(domain);
1375 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1376 u8 bus, u8 devfn, int translation)
1378 struct context_entry *context;
1379 unsigned long flags;
1380 struct intel_iommu *iommu;
1381 struct dma_pte *pgd;
1383 unsigned long ndomains;
1386 struct device_domain_info *info = NULL;
1388 pr_debug("Set context mapping for %02x:%02x.%d\n",
1389 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1391 BUG_ON(!domain->pgd);
1392 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1393 translation != CONTEXT_TT_MULTI_LEVEL);
1395 iommu = device_to_iommu(segment, bus, devfn);
1399 context = device_to_context_entry(iommu, bus, devfn);
1402 spin_lock_irqsave(&iommu->lock, flags);
1403 if (context_present(context)) {
1404 spin_unlock_irqrestore(&iommu->lock, flags);
1411 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1414 /* find an available domain id for this device in iommu */
1415 ndomains = cap_ndoms(iommu->cap);
1416 num = find_first_bit(iommu->domain_ids, ndomains);
1417 for (; num < ndomains; ) {
1418 if (iommu->domains[num] == domain) {
1423 num = find_next_bit(iommu->domain_ids,
1424 cap_ndoms(iommu->cap), num+1);
1428 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1429 if (num >= ndomains) {
1430 spin_unlock_irqrestore(&iommu->lock, flags);
1431 printk(KERN_ERR "IOMMU: no free domain ids\n");
1435 set_bit(num, iommu->domain_ids);
1436 iommu->domains[num] = domain;
1440 /* Skip top levels of page tables for
1441 * iommu which has less agaw than default.
1443 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1444 pgd = phys_to_virt(dma_pte_addr(pgd));
1445 if (!dma_pte_present(pgd)) {
1446 spin_unlock_irqrestore(&iommu->lock, flags);
1452 context_set_domain_id(context, id);
1454 if (translation != CONTEXT_TT_PASS_THROUGH) {
1455 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1456 translation = info ? CONTEXT_TT_DEV_IOTLB :
1457 CONTEXT_TT_MULTI_LEVEL;
1460 * In pass through mode, AW must be programmed to indicate the largest
1461 * AGAW value supported by hardware. And ASR is ignored by hardware.
1463 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1464 context_set_address_width(context, iommu->msagaw);
1466 context_set_address_root(context, virt_to_phys(pgd));
1467 context_set_address_width(context, iommu->agaw);
1470 context_set_translation_type(context, translation);
1471 context_set_fault_enable(context);
1472 context_set_present(context);
1473 domain_flush_cache(domain, context, sizeof(*context));
1476 * It's a non-present to present mapping. If hardware doesn't cache
1477 * non-present entry we only need to flush the write-buffer. If the
1478 * _does_ cache non-present entries, then it does so in the special
1479 * domain #0, which we have to flush:
1481 if (cap_caching_mode(iommu->cap)) {
1482 iommu->flush.flush_context(iommu, 0,
1483 (((u16)bus) << 8) | devfn,
1484 DMA_CCMD_MASK_NOBIT,
1485 DMA_CCMD_DEVICE_INVL);
1486 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1488 iommu_flush_write_buffer(iommu);
1490 iommu_enable_dev_iotlb(info);
1491 spin_unlock_irqrestore(&iommu->lock, flags);
1493 spin_lock_irqsave(&domain->iommu_lock, flags);
1494 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1495 domain->iommu_count++;
1496 domain_update_iommu_cap(domain);
1498 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1503 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1507 struct pci_dev *tmp, *parent;
1509 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1510 pdev->bus->number, pdev->devfn,
1515 /* dependent device mapping */
1516 tmp = pci_find_upstream_pcie_bridge(pdev);
1519 /* Secondary interface's bus number and devfn 0 */
1520 parent = pdev->bus->self;
1521 while (parent != tmp) {
1522 ret = domain_context_mapping_one(domain,
1523 pci_domain_nr(parent->bus),
1524 parent->bus->number,
1525 parent->devfn, translation);
1528 parent = parent->bus->self;
1530 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1531 return domain_context_mapping_one(domain,
1532 pci_domain_nr(tmp->subordinate),
1533 tmp->subordinate->number, 0,
1535 else /* this is a legacy PCI bridge */
1536 return domain_context_mapping_one(domain,
1537 pci_domain_nr(tmp->bus),
1543 static int domain_context_mapped(struct pci_dev *pdev)
1546 struct pci_dev *tmp, *parent;
1547 struct intel_iommu *iommu;
1549 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1554 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1557 /* dependent device mapping */
1558 tmp = pci_find_upstream_pcie_bridge(pdev);
1561 /* Secondary interface's bus number and devfn 0 */
1562 parent = pdev->bus->self;
1563 while (parent != tmp) {
1564 ret = device_context_mapped(iommu, parent->bus->number,
1568 parent = parent->bus->self;
1571 return device_context_mapped(iommu, tmp->subordinate->number,
1574 return device_context_mapped(iommu, tmp->bus->number,
1579 domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1580 u64 hpa, size_t size, int prot)
1582 u64 start_pfn, end_pfn;
1583 struct dma_pte *pte;
1585 int addr_width = agaw_to_width(domain->agaw);
1587 hpa &= (((u64)1) << addr_width) - 1;
1589 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1592 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1593 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1595 while (start_pfn < end_pfn) {
1596 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1599 /* We don't need lock here, nobody else
1600 * touches the iova range
1602 BUG_ON(dma_pte_addr(pte));
1603 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1604 dma_set_pte_prot(pte, prot);
1605 if (prot & DMA_PTE_SNP)
1606 dma_set_pte_snp(pte);
1607 domain_flush_cache(domain, pte, sizeof(*pte));
1614 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1619 clear_context_table(iommu, bus, devfn);
1620 iommu->flush.flush_context(iommu, 0, 0, 0,
1621 DMA_CCMD_GLOBAL_INVL);
1622 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1625 static void domain_remove_dev_info(struct dmar_domain *domain)
1627 struct device_domain_info *info;
1628 unsigned long flags;
1629 struct intel_iommu *iommu;
1631 spin_lock_irqsave(&device_domain_lock, flags);
1632 while (!list_empty(&domain->devices)) {
1633 info = list_entry(domain->devices.next,
1634 struct device_domain_info, link);
1635 list_del(&info->link);
1636 list_del(&info->global);
1638 info->dev->dev.archdata.iommu = NULL;
1639 spin_unlock_irqrestore(&device_domain_lock, flags);
1641 iommu_disable_dev_iotlb(info);
1642 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1643 iommu_detach_dev(iommu, info->bus, info->devfn);
1644 free_devinfo_mem(info);
1646 spin_lock_irqsave(&device_domain_lock, flags);
1648 spin_unlock_irqrestore(&device_domain_lock, flags);
1653 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1655 static struct dmar_domain *
1656 find_domain(struct pci_dev *pdev)
1658 struct device_domain_info *info;
1660 /* No lock here, assumes no domain exit in normal case */
1661 info = pdev->dev.archdata.iommu;
1663 return info->domain;
1667 /* domain is initialized */
1668 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1670 struct dmar_domain *domain, *found = NULL;
1671 struct intel_iommu *iommu;
1672 struct dmar_drhd_unit *drhd;
1673 struct device_domain_info *info, *tmp;
1674 struct pci_dev *dev_tmp;
1675 unsigned long flags;
1676 int bus = 0, devfn = 0;
1679 domain = find_domain(pdev);
1683 segment = pci_domain_nr(pdev->bus);
1685 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1687 if (dev_tmp->is_pcie) {
1688 bus = dev_tmp->subordinate->number;
1691 bus = dev_tmp->bus->number;
1692 devfn = dev_tmp->devfn;
1694 spin_lock_irqsave(&device_domain_lock, flags);
1695 list_for_each_entry(info, &device_domain_list, global) {
1696 if (info->segment == segment &&
1697 info->bus == bus && info->devfn == devfn) {
1698 found = info->domain;
1702 spin_unlock_irqrestore(&device_domain_lock, flags);
1703 /* pcie-pci bridge already has a domain, uses it */
1710 /* Allocate new domain for the device */
1711 drhd = dmar_find_matched_drhd_unit(pdev);
1713 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1717 iommu = drhd->iommu;
1719 domain = iommu_alloc_domain(iommu);
1723 if (domain_init(domain, gaw)) {
1724 domain_exit(domain);
1728 /* register pcie-to-pci device */
1730 info = alloc_devinfo_mem();
1732 domain_exit(domain);
1735 info->segment = segment;
1737 info->devfn = devfn;
1739 info->domain = domain;
1740 /* This domain is shared by devices under p2p bridge */
1741 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1743 /* pcie-to-pci bridge already has a domain, uses it */
1745 spin_lock_irqsave(&device_domain_lock, flags);
1746 list_for_each_entry(tmp, &device_domain_list, global) {
1747 if (tmp->segment == segment &&
1748 tmp->bus == bus && tmp->devfn == devfn) {
1749 found = tmp->domain;
1754 free_devinfo_mem(info);
1755 domain_exit(domain);
1758 list_add(&info->link, &domain->devices);
1759 list_add(&info->global, &device_domain_list);
1761 spin_unlock_irqrestore(&device_domain_lock, flags);
1765 info = alloc_devinfo_mem();
1768 info->segment = segment;
1769 info->bus = pdev->bus->number;
1770 info->devfn = pdev->devfn;
1772 info->domain = domain;
1773 spin_lock_irqsave(&device_domain_lock, flags);
1774 /* somebody is fast */
1775 found = find_domain(pdev);
1776 if (found != NULL) {
1777 spin_unlock_irqrestore(&device_domain_lock, flags);
1778 if (found != domain) {
1779 domain_exit(domain);
1782 free_devinfo_mem(info);
1785 list_add(&info->link, &domain->devices);
1786 list_add(&info->global, &device_domain_list);
1787 pdev->dev.archdata.iommu = info;
1788 spin_unlock_irqrestore(&device_domain_lock, flags);
1791 /* recheck it here, maybe others set it */
1792 return find_domain(pdev);
1795 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1796 unsigned long long start,
1797 unsigned long long end)
1799 struct dmar_domain *domain;
1801 unsigned long long base;
1805 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1806 pci_name(pdev), start, end);
1807 /* page table init */
1808 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1812 /* The address might not be aligned */
1813 base = start & PAGE_MASK;
1815 size = PAGE_ALIGN(size);
1816 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1817 IOVA_PFN(base + size) - 1)) {
1818 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1823 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1824 size, base, pci_name(pdev));
1826 * RMRR range might have overlap with physical memory range,
1829 dma_pte_clear_range(domain, base, base + size);
1831 ret = domain_page_mapping(domain, base, base, size,
1832 DMA_PTE_READ|DMA_PTE_WRITE);
1836 /* context entry init */
1837 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1841 domain_exit(domain);
1846 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1847 struct pci_dev *pdev)
1849 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1851 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1852 rmrr->end_address + 1);
1855 #ifdef CONFIG_DMAR_GFX_WA
1856 struct iommu_prepare_data {
1857 struct pci_dev *pdev;
1861 static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1862 unsigned long end_pfn, void *datax)
1864 struct iommu_prepare_data *data;
1866 data = (struct iommu_prepare_data *)datax;
1868 data->ret = iommu_prepare_identity_map(data->pdev,
1869 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1874 static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1877 struct iommu_prepare_data data;
1882 for_each_online_node(nid) {
1883 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1890 static void __init iommu_prepare_gfx_mapping(void)
1892 struct pci_dev *pdev = NULL;
1895 for_each_pci_dev(pdev) {
1896 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
1897 !IS_GFX_DEVICE(pdev))
1899 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1901 ret = iommu_prepare_with_active_regions(pdev);
1903 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1906 #else /* !CONFIG_DMAR_GFX_WA */
1907 static inline void iommu_prepare_gfx_mapping(void)
1913 #ifdef CONFIG_DMAR_FLOPPY_WA
1914 static inline void iommu_prepare_isa(void)
1916 struct pci_dev *pdev;
1919 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1923 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1924 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1927 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
1928 "floppy might not work\n");
1932 static inline void iommu_prepare_isa(void)
1936 #endif /* !CONFIG_DMAR_FLPY_WA */
1938 /* Initialize each context entry as pass through.*/
1939 static int __init init_context_pass_through(void)
1941 struct pci_dev *pdev = NULL;
1942 struct dmar_domain *domain;
1945 for_each_pci_dev(pdev) {
1946 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1947 ret = domain_context_mapping(domain, pdev,
1948 CONTEXT_TT_PASS_THROUGH);
1955 static int __init init_dmars(void)
1957 struct dmar_drhd_unit *drhd;
1958 struct dmar_rmrr_unit *rmrr;
1959 struct pci_dev *pdev;
1960 struct intel_iommu *iommu;
1962 int pass_through = 1;
1967 * initialize and program root entry to not present
1970 for_each_drhd_unit(drhd) {
1973 * lock not needed as this is only incremented in the single
1974 * threaded kernel __init code path all other access are read
1979 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1982 printk(KERN_ERR "Allocating global iommu array failed\n");
1987 deferred_flush = kzalloc(g_num_of_iommus *
1988 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1989 if (!deferred_flush) {
1995 for_each_drhd_unit(drhd) {
1999 iommu = drhd->iommu;
2000 g_iommus[iommu->seq_id] = iommu;
2002 ret = iommu_init_domains(iommu);
2008 * we could share the same root & context tables
2009 * amoung all IOMMU's. Need to Split it later.
2011 ret = iommu_alloc_root_entry(iommu);
2013 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2016 if (!ecap_pass_through(iommu->ecap))
2019 if (iommu_pass_through)
2020 if (!pass_through) {
2022 "Pass Through is not supported by hardware.\n");
2023 iommu_pass_through = 0;
2027 * Start from the sane iommu hardware state.
2029 for_each_drhd_unit(drhd) {
2033 iommu = drhd->iommu;
2036 * If the queued invalidation is already initialized by us
2037 * (for example, while enabling interrupt-remapping) then
2038 * we got the things already rolling from a sane state.
2044 * Clear any previous faults.
2046 dmar_fault(-1, iommu);
2048 * Disable queued invalidation if supported and already enabled
2049 * before OS handover.
2051 dmar_disable_qi(iommu);
2054 for_each_drhd_unit(drhd) {
2058 iommu = drhd->iommu;
2060 if (dmar_enable_qi(iommu)) {
2062 * Queued Invalidate not enabled, use Register Based
2065 iommu->flush.flush_context = __iommu_flush_context;
2066 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2067 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2069 (unsigned long long)drhd->reg_base_addr);
2071 iommu->flush.flush_context = qi_flush_context;
2072 iommu->flush.flush_iotlb = qi_flush_iotlb;
2073 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2075 (unsigned long long)drhd->reg_base_addr);
2080 * If pass through is set and enabled, context entries of all pci
2081 * devices are intialized by pass through translation type.
2083 if (iommu_pass_through) {
2084 ret = init_context_pass_through();
2086 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2087 iommu_pass_through = 0;
2092 * If pass through is not set or not enabled, setup context entries for
2093 * identity mappings for rmrr, gfx, and isa.
2095 if (!iommu_pass_through) {
2098 * for each dev attached to rmrr
2100 * locate drhd for dev, alloc domain for dev
2101 * allocate free domain
2102 * allocate page table entries for rmrr
2103 * if context not allocated for bus
2104 * allocate and init context
2105 * set present in root table for this bus
2106 * init context with domain, translation etc
2110 for_each_rmrr_units(rmrr) {
2111 for (i = 0; i < rmrr->devices_cnt; i++) {
2112 pdev = rmrr->devices[i];
2114 * some BIOS lists non-exist devices in DMAR
2119 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2122 "IOMMU: mapping reserved region failed\n");
2126 iommu_prepare_gfx_mapping();
2128 iommu_prepare_isa();
2134 * global invalidate context cache
2135 * global invalidate iotlb
2136 * enable translation
2138 for_each_drhd_unit(drhd) {
2141 iommu = drhd->iommu;
2143 iommu_flush_write_buffer(iommu);
2145 ret = dmar_set_interrupt(iommu);
2149 iommu_set_root_entry(iommu);
2151 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2152 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2153 iommu_disable_protect_mem_regions(iommu);
2155 ret = iommu_enable_translation(iommu);
2162 for_each_drhd_unit(drhd) {
2165 iommu = drhd->iommu;
2172 static inline u64 aligned_size(u64 host_addr, size_t size)
2175 addr = (host_addr & (~PAGE_MASK)) + size;
2176 return PAGE_ALIGN(addr);
2180 iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2184 /* Make sure it's in range */
2185 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2186 if (!size || (IOVA_START_ADDR + size > end))
2189 piova = alloc_iova(&domain->iovad,
2190 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2194 static struct iova *
2195 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2196 size_t size, u64 dma_mask)
2198 struct pci_dev *pdev = to_pci_dev(dev);
2199 struct iova *iova = NULL;
2201 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
2202 iova = iommu_alloc_iova(domain, size, dma_mask);
2205 * First try to allocate an io virtual address in
2206 * DMA_BIT_MASK(32) and if that fails then try allocating
2209 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
2211 iova = iommu_alloc_iova(domain, size, dma_mask);
2215 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2222 static struct dmar_domain *
2223 get_valid_domain_for_dev(struct pci_dev *pdev)
2225 struct dmar_domain *domain;
2228 domain = get_domain_for_dev(pdev,
2229 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2232 "Allocating domain for %s failed", pci_name(pdev));
2236 /* make sure context mapping is ok */
2237 if (unlikely(!domain_context_mapped(pdev))) {
2238 ret = domain_context_mapping(domain, pdev,
2239 CONTEXT_TT_MULTI_LEVEL);
2242 "Domain context map for %s failed",
2251 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2252 size_t size, int dir, u64 dma_mask)
2254 struct pci_dev *pdev = to_pci_dev(hwdev);
2255 struct dmar_domain *domain;
2256 phys_addr_t start_paddr;
2260 struct intel_iommu *iommu;
2262 BUG_ON(dir == DMA_NONE);
2263 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2266 domain = get_valid_domain_for_dev(pdev);
2270 iommu = domain_get_iommu(domain);
2271 size = aligned_size((u64)paddr, size);
2273 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2277 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2280 * Check if DMAR supports zero-length reads on write only
2283 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2284 !cap_zlr(iommu->cap))
2285 prot |= DMA_PTE_READ;
2286 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2287 prot |= DMA_PTE_WRITE;
2289 * paddr - (paddr + size) might be partial page, we should map the whole
2290 * page. Note: if two part of one page are separately mapped, we
2291 * might have two guest_addr mapping to the same host paddr, but this
2292 * is not a big problem
2294 ret = domain_page_mapping(domain, start_paddr,
2295 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2300 /* it's a non-present to present mapping. Only flush if caching mode */
2301 if (cap_caching_mode(iommu->cap))
2302 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2303 size >> VTD_PAGE_SHIFT);
2305 iommu_flush_write_buffer(iommu);
2307 return start_paddr + ((u64)paddr & (~PAGE_MASK));
2311 __free_iova(&domain->iovad, iova);
2312 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2313 pci_name(pdev), size, (unsigned long long)paddr, dir);
2317 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2318 unsigned long offset, size_t size,
2319 enum dma_data_direction dir,
2320 struct dma_attrs *attrs)
2322 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2323 dir, to_pci_dev(dev)->dma_mask);
2326 static void flush_unmaps(void)
2332 /* just flush them all */
2333 for (i = 0; i < g_num_of_iommus; i++) {
2334 struct intel_iommu *iommu = g_iommus[i];
2338 if (!deferred_flush[i].next)
2341 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2342 DMA_TLB_GLOBAL_FLUSH);
2343 for (j = 0; j < deferred_flush[i].next; j++) {
2345 struct iova *iova = deferred_flush[i].iova[j];
2347 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2348 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2349 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2350 iova->pfn_lo << PAGE_SHIFT, mask);
2351 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2353 deferred_flush[i].next = 0;
2359 static void flush_unmaps_timeout(unsigned long data)
2361 unsigned long flags;
2363 spin_lock_irqsave(&async_umap_flush_lock, flags);
2365 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2368 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2370 unsigned long flags;
2372 struct intel_iommu *iommu;
2374 spin_lock_irqsave(&async_umap_flush_lock, flags);
2375 if (list_size == HIGH_WATER_MARK)
2378 iommu = domain_get_iommu(dom);
2379 iommu_id = iommu->seq_id;
2381 next = deferred_flush[iommu_id].next;
2382 deferred_flush[iommu_id].domain[next] = dom;
2383 deferred_flush[iommu_id].iova[next] = iova;
2384 deferred_flush[iommu_id].next++;
2387 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2391 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2394 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2395 size_t size, enum dma_data_direction dir,
2396 struct dma_attrs *attrs)
2398 struct pci_dev *pdev = to_pci_dev(dev);
2399 struct dmar_domain *domain;
2400 unsigned long start_addr;
2402 struct intel_iommu *iommu;
2404 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2406 domain = find_domain(pdev);
2409 iommu = domain_get_iommu(domain);
2411 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2415 start_addr = iova->pfn_lo << PAGE_SHIFT;
2416 size = aligned_size((u64)dev_addr, size);
2418 pr_debug("Device %s unmapping: %zx@%llx\n",
2419 pci_name(pdev), size, (unsigned long long)start_addr);
2421 /* clear the whole page */
2422 dma_pte_clear_range(domain, start_addr, start_addr + size);
2423 /* free page tables */
2424 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2425 if (intel_iommu_strict) {
2426 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2427 size >> VTD_PAGE_SHIFT);
2429 __free_iova(&domain->iovad, iova);
2431 add_unmap(domain, iova);
2433 * queue up the release of the unmap to save the 1/6th of the
2434 * cpu used up by the iotlb flush operation...
2439 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2442 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2445 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2446 dma_addr_t *dma_handle, gfp_t flags)
2451 size = PAGE_ALIGN(size);
2452 order = get_order(size);
2453 flags &= ~(GFP_DMA | GFP_DMA32);
2455 vaddr = (void *)__get_free_pages(flags, order);
2458 memset(vaddr, 0, size);
2460 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2462 hwdev->coherent_dma_mask);
2465 free_pages((unsigned long)vaddr, order);
2469 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2470 dma_addr_t dma_handle)
2474 size = PAGE_ALIGN(size);
2475 order = get_order(size);
2477 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2478 free_pages((unsigned long)vaddr, order);
2481 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2482 int nelems, enum dma_data_direction dir,
2483 struct dma_attrs *attrs)
2486 struct pci_dev *pdev = to_pci_dev(hwdev);
2487 struct dmar_domain *domain;
2488 unsigned long start_addr;
2492 struct scatterlist *sg;
2493 struct intel_iommu *iommu;
2495 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2498 domain = find_domain(pdev);
2501 iommu = domain_get_iommu(domain);
2503 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2506 for_each_sg(sglist, sg, nelems, i) {
2507 addr = page_to_phys(sg_page(sg)) + sg->offset;
2508 size += aligned_size((u64)addr, sg->length);
2511 start_addr = iova->pfn_lo << PAGE_SHIFT;
2513 /* clear the whole page */
2514 dma_pte_clear_range(domain, start_addr, start_addr + size);
2515 /* free page tables */
2516 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2518 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2519 size >> VTD_PAGE_SHIFT);
2522 __free_iova(&domain->iovad, iova);
2525 static int intel_nontranslate_map_sg(struct device *hddev,
2526 struct scatterlist *sglist, int nelems, int dir)
2529 struct scatterlist *sg;
2531 for_each_sg(sglist, sg, nelems, i) {
2532 BUG_ON(!sg_page(sg));
2533 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2534 sg->dma_length = sg->length;
2539 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2540 enum dma_data_direction dir, struct dma_attrs *attrs)
2544 struct pci_dev *pdev = to_pci_dev(hwdev);
2545 struct dmar_domain *domain;
2549 struct iova *iova = NULL;
2551 struct scatterlist *sg;
2552 unsigned long start_addr;
2553 struct intel_iommu *iommu;
2555 BUG_ON(dir == DMA_NONE);
2556 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2557 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2559 domain = get_valid_domain_for_dev(pdev);
2563 iommu = domain_get_iommu(domain);
2565 for_each_sg(sglist, sg, nelems, i) {
2566 addr = page_to_phys(sg_page(sg)) + sg->offset;
2567 size += aligned_size((u64)addr, sg->length);
2570 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2572 sglist->dma_length = 0;
2577 * Check if DMAR supports zero-length reads on write only
2580 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2581 !cap_zlr(iommu->cap))
2582 prot |= DMA_PTE_READ;
2583 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2584 prot |= DMA_PTE_WRITE;
2586 start_addr = iova->pfn_lo << PAGE_SHIFT;
2588 for_each_sg(sglist, sg, nelems, i) {
2589 addr = page_to_phys(sg_page(sg)) + sg->offset;
2590 size = aligned_size((u64)addr, sg->length);
2591 ret = domain_page_mapping(domain, start_addr + offset,
2592 ((u64)addr) & PHYSICAL_PAGE_MASK,
2595 /* clear the page */
2596 dma_pte_clear_range(domain, start_addr,
2597 start_addr + offset);
2598 /* free page tables */
2599 dma_pte_free_pagetable(domain, start_addr,
2600 start_addr + offset);
2602 __free_iova(&domain->iovad, iova);
2605 sg->dma_address = start_addr + offset +
2606 ((u64)addr & (~PAGE_MASK));
2607 sg->dma_length = sg->length;
2611 /* it's a non-present to present mapping. Only flush if caching mode */
2612 if (cap_caching_mode(iommu->cap))
2613 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2614 offset >> VTD_PAGE_SHIFT);
2616 iommu_flush_write_buffer(iommu);
2621 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2626 struct dma_map_ops intel_dma_ops = {
2627 .alloc_coherent = intel_alloc_coherent,
2628 .free_coherent = intel_free_coherent,
2629 .map_sg = intel_map_sg,
2630 .unmap_sg = intel_unmap_sg,
2631 .map_page = intel_map_page,
2632 .unmap_page = intel_unmap_page,
2633 .mapping_error = intel_mapping_error,
2636 static inline int iommu_domain_cache_init(void)
2640 iommu_domain_cache = kmem_cache_create("iommu_domain",
2641 sizeof(struct dmar_domain),
2646 if (!iommu_domain_cache) {
2647 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2654 static inline int iommu_devinfo_cache_init(void)
2658 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2659 sizeof(struct device_domain_info),
2663 if (!iommu_devinfo_cache) {
2664 printk(KERN_ERR "Couldn't create devinfo cache\n");
2671 static inline int iommu_iova_cache_init(void)
2675 iommu_iova_cache = kmem_cache_create("iommu_iova",
2676 sizeof(struct iova),
2680 if (!iommu_iova_cache) {
2681 printk(KERN_ERR "Couldn't create iova cache\n");
2688 static int __init iommu_init_mempool(void)
2691 ret = iommu_iova_cache_init();
2695 ret = iommu_domain_cache_init();
2699 ret = iommu_devinfo_cache_init();
2703 kmem_cache_destroy(iommu_domain_cache);
2705 kmem_cache_destroy(iommu_iova_cache);
2710 static void __init iommu_exit_mempool(void)
2712 kmem_cache_destroy(iommu_devinfo_cache);
2713 kmem_cache_destroy(iommu_domain_cache);
2714 kmem_cache_destroy(iommu_iova_cache);
2718 static void __init init_no_remapping_devices(void)
2720 struct dmar_drhd_unit *drhd;
2722 for_each_drhd_unit(drhd) {
2723 if (!drhd->include_all) {
2725 for (i = 0; i < drhd->devices_cnt; i++)
2726 if (drhd->devices[i] != NULL)
2728 /* ignore DMAR unit if no pci devices exist */
2729 if (i == drhd->devices_cnt)
2737 for_each_drhd_unit(drhd) {
2739 if (drhd->ignored || drhd->include_all)
2742 for (i = 0; i < drhd->devices_cnt; i++)
2743 if (drhd->devices[i] &&
2744 !IS_GFX_DEVICE(drhd->devices[i]))
2747 if (i < drhd->devices_cnt)
2750 /* bypass IOMMU if it is just for gfx devices */
2752 for (i = 0; i < drhd->devices_cnt; i++) {
2753 if (!drhd->devices[i])
2755 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2760 #ifdef CONFIG_SUSPEND
2761 static int init_iommu_hw(void)
2763 struct dmar_drhd_unit *drhd;
2764 struct intel_iommu *iommu = NULL;
2766 for_each_active_iommu(iommu, drhd)
2768 dmar_reenable_qi(iommu);
2770 for_each_active_iommu(iommu, drhd) {
2771 iommu_flush_write_buffer(iommu);
2773 iommu_set_root_entry(iommu);
2775 iommu->flush.flush_context(iommu, 0, 0, 0,
2776 DMA_CCMD_GLOBAL_INVL);
2777 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2778 DMA_TLB_GLOBAL_FLUSH);
2779 iommu_disable_protect_mem_regions(iommu);
2780 iommu_enable_translation(iommu);
2786 static void iommu_flush_all(void)
2788 struct dmar_drhd_unit *drhd;
2789 struct intel_iommu *iommu;
2791 for_each_active_iommu(iommu, drhd) {
2792 iommu->flush.flush_context(iommu, 0, 0, 0,
2793 DMA_CCMD_GLOBAL_INVL);
2794 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2795 DMA_TLB_GLOBAL_FLUSH);
2799 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2801 struct dmar_drhd_unit *drhd;
2802 struct intel_iommu *iommu = NULL;
2805 for_each_active_iommu(iommu, drhd) {
2806 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2808 if (!iommu->iommu_state)
2814 for_each_active_iommu(iommu, drhd) {
2815 iommu_disable_translation(iommu);
2817 spin_lock_irqsave(&iommu->register_lock, flag);
2819 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2820 readl(iommu->reg + DMAR_FECTL_REG);
2821 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2822 readl(iommu->reg + DMAR_FEDATA_REG);
2823 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2824 readl(iommu->reg + DMAR_FEADDR_REG);
2825 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2826 readl(iommu->reg + DMAR_FEUADDR_REG);
2828 spin_unlock_irqrestore(&iommu->register_lock, flag);
2833 for_each_active_iommu(iommu, drhd)
2834 kfree(iommu->iommu_state);
2839 static int iommu_resume(struct sys_device *dev)
2841 struct dmar_drhd_unit *drhd;
2842 struct intel_iommu *iommu = NULL;
2845 if (init_iommu_hw()) {
2846 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2850 for_each_active_iommu(iommu, drhd) {
2852 spin_lock_irqsave(&iommu->register_lock, flag);
2854 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2855 iommu->reg + DMAR_FECTL_REG);
2856 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2857 iommu->reg + DMAR_FEDATA_REG);
2858 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2859 iommu->reg + DMAR_FEADDR_REG);
2860 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2861 iommu->reg + DMAR_FEUADDR_REG);
2863 spin_unlock_irqrestore(&iommu->register_lock, flag);
2866 for_each_active_iommu(iommu, drhd)
2867 kfree(iommu->iommu_state);
2872 static struct sysdev_class iommu_sysclass = {
2874 .resume = iommu_resume,
2875 .suspend = iommu_suspend,
2878 static struct sys_device device_iommu = {
2879 .cls = &iommu_sysclass,
2882 static int __init init_iommu_sysfs(void)
2886 error = sysdev_class_register(&iommu_sysclass);
2890 error = sysdev_register(&device_iommu);
2892 sysdev_class_unregister(&iommu_sysclass);
2898 static int __init init_iommu_sysfs(void)
2902 #endif /* CONFIG_PM */
2904 int __init intel_iommu_init(void)
2908 if (dmar_table_init())
2911 if (dmar_dev_scope_init())
2915 * Check the need for DMA-remapping initialization now.
2916 * Above initialization will also be used by Interrupt-remapping.
2918 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
2921 iommu_init_mempool();
2922 dmar_init_reserved_ranges();
2924 init_no_remapping_devices();
2928 printk(KERN_ERR "IOMMU: dmar init failed\n");
2929 put_iova_domain(&reserved_iova_list);
2930 iommu_exit_mempool();
2934 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2936 init_timer(&unmap_timer);
2939 if (!iommu_pass_through) {
2941 "Multi-level page-table translation for DMAR.\n");
2942 dma_ops = &intel_dma_ops;
2945 "DMAR: Pass through translation for DMAR.\n");
2949 register_iommu(&intel_iommu_ops);
2954 static int vm_domain_add_dev_info(struct dmar_domain *domain,
2955 struct pci_dev *pdev)
2957 struct device_domain_info *info;
2958 unsigned long flags;
2960 info = alloc_devinfo_mem();
2964 info->segment = pci_domain_nr(pdev->bus);
2965 info->bus = pdev->bus->number;
2966 info->devfn = pdev->devfn;
2968 info->domain = domain;
2970 spin_lock_irqsave(&device_domain_lock, flags);
2971 list_add(&info->link, &domain->devices);
2972 list_add(&info->global, &device_domain_list);
2973 pdev->dev.archdata.iommu = info;
2974 spin_unlock_irqrestore(&device_domain_lock, flags);
2979 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2980 struct pci_dev *pdev)
2982 struct pci_dev *tmp, *parent;
2984 if (!iommu || !pdev)
2987 /* dependent device detach */
2988 tmp = pci_find_upstream_pcie_bridge(pdev);
2989 /* Secondary interface's bus number and devfn 0 */
2991 parent = pdev->bus->self;
2992 while (parent != tmp) {
2993 iommu_detach_dev(iommu, parent->bus->number,
2995 parent = parent->bus->self;
2997 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2998 iommu_detach_dev(iommu,
2999 tmp->subordinate->number, 0);
3000 else /* this is a legacy PCI bridge */
3001 iommu_detach_dev(iommu, tmp->bus->number,
3006 static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
3007 struct pci_dev *pdev)
3009 struct device_domain_info *info;
3010 struct intel_iommu *iommu;
3011 unsigned long flags;
3013 struct list_head *entry, *tmp;
3015 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3020 spin_lock_irqsave(&device_domain_lock, flags);
3021 list_for_each_safe(entry, tmp, &domain->devices) {
3022 info = list_entry(entry, struct device_domain_info, link);
3023 /* No need to compare PCI domain; it has to be the same */
3024 if (info->bus == pdev->bus->number &&
3025 info->devfn == pdev->devfn) {
3026 list_del(&info->link);
3027 list_del(&info->global);
3029 info->dev->dev.archdata.iommu = NULL;
3030 spin_unlock_irqrestore(&device_domain_lock, flags);
3032 iommu_disable_dev_iotlb(info);
3033 iommu_detach_dev(iommu, info->bus, info->devfn);
3034 iommu_detach_dependent_devices(iommu, pdev);
3035 free_devinfo_mem(info);
3037 spin_lock_irqsave(&device_domain_lock, flags);
3045 /* if there is no other devices under the same iommu
3046 * owned by this domain, clear this iommu in iommu_bmp
3047 * update iommu count and coherency
3049 if (iommu == device_to_iommu(info->segment, info->bus,
3055 unsigned long tmp_flags;
3056 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3057 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3058 domain->iommu_count--;
3059 domain_update_iommu_cap(domain);
3060 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3063 spin_unlock_irqrestore(&device_domain_lock, flags);
3066 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3068 struct device_domain_info *info;
3069 struct intel_iommu *iommu;
3070 unsigned long flags1, flags2;
3072 spin_lock_irqsave(&device_domain_lock, flags1);
3073 while (!list_empty(&domain->devices)) {
3074 info = list_entry(domain->devices.next,
3075 struct device_domain_info, link);
3076 list_del(&info->link);
3077 list_del(&info->global);
3079 info->dev->dev.archdata.iommu = NULL;
3081 spin_unlock_irqrestore(&device_domain_lock, flags1);
3083 iommu_disable_dev_iotlb(info);
3084 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3085 iommu_detach_dev(iommu, info->bus, info->devfn);
3086 iommu_detach_dependent_devices(iommu, info->dev);
3088 /* clear this iommu in iommu_bmp, update iommu count
3091 spin_lock_irqsave(&domain->iommu_lock, flags2);
3092 if (test_and_clear_bit(iommu->seq_id,
3093 &domain->iommu_bmp)) {
3094 domain->iommu_count--;
3095 domain_update_iommu_cap(domain);
3097 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3099 free_devinfo_mem(info);
3100 spin_lock_irqsave(&device_domain_lock, flags1);
3102 spin_unlock_irqrestore(&device_domain_lock, flags1);
3105 /* domain id for virtual machine, it won't be set in context */
3106 static unsigned long vm_domid;
3108 static int vm_domain_min_agaw(struct dmar_domain *domain)
3111 int min_agaw = domain->agaw;
3113 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3114 for (; i < g_num_of_iommus; ) {
3115 if (min_agaw > g_iommus[i]->agaw)
3116 min_agaw = g_iommus[i]->agaw;
3118 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3124 static struct dmar_domain *iommu_alloc_vm_domain(void)
3126 struct dmar_domain *domain;
3128 domain = alloc_domain_mem();
3132 domain->id = vm_domid++;
3133 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3134 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3139 static int vm_domain_init(struct dmar_domain *domain, int guest_width)
3143 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3144 spin_lock_init(&domain->mapping_lock);
3145 spin_lock_init(&domain->iommu_lock);
3147 domain_reserve_special_ranges(domain);
3149 /* calculate AGAW */
3150 domain->gaw = guest_width;
3151 adjust_width = guestwidth_to_adjustwidth(guest_width);
3152 domain->agaw = width_to_agaw(adjust_width);
3154 INIT_LIST_HEAD(&domain->devices);
3156 domain->iommu_count = 0;
3157 domain->iommu_coherency = 0;
3158 domain->max_addr = 0;
3160 /* always allocate the top pgd */
3161 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3164 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3168 static void iommu_free_vm_domain(struct dmar_domain *domain)
3170 unsigned long flags;
3171 struct dmar_drhd_unit *drhd;
3172 struct intel_iommu *iommu;
3174 unsigned long ndomains;
3176 for_each_drhd_unit(drhd) {
3179 iommu = drhd->iommu;
3181 ndomains = cap_ndoms(iommu->cap);
3182 i = find_first_bit(iommu->domain_ids, ndomains);
3183 for (; i < ndomains; ) {
3184 if (iommu->domains[i] == domain) {
3185 spin_lock_irqsave(&iommu->lock, flags);
3186 clear_bit(i, iommu->domain_ids);
3187 iommu->domains[i] = NULL;
3188 spin_unlock_irqrestore(&iommu->lock, flags);
3191 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3196 static void vm_domain_exit(struct dmar_domain *domain)
3200 /* Domain 0 is reserved, so dont process it */
3204 vm_domain_remove_all_dev_info(domain);
3206 put_iova_domain(&domain->iovad);
3207 end = DOMAIN_MAX_ADDR(domain->gaw);
3208 end = end & (~VTD_PAGE_MASK);
3211 dma_pte_clear_range(domain, 0, end);
3213 /* free page tables */
3214 dma_pte_free_pagetable(domain, 0, end);
3216 iommu_free_vm_domain(domain);
3217 free_domain_mem(domain);
3220 static int intel_iommu_domain_init(struct iommu_domain *domain)
3222 struct dmar_domain *dmar_domain;
3224 dmar_domain = iommu_alloc_vm_domain();
3227 "intel_iommu_domain_init: dmar_domain == NULL\n");
3230 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3232 "intel_iommu_domain_init() failed\n");
3233 vm_domain_exit(dmar_domain);
3236 domain->priv = dmar_domain;
3241 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3243 struct dmar_domain *dmar_domain = domain->priv;
3245 domain->priv = NULL;
3246 vm_domain_exit(dmar_domain);
3249 static int intel_iommu_attach_device(struct iommu_domain *domain,
3252 struct dmar_domain *dmar_domain = domain->priv;
3253 struct pci_dev *pdev = to_pci_dev(dev);
3254 struct intel_iommu *iommu;
3259 /* normally pdev is not mapped */
3260 if (unlikely(domain_context_mapped(pdev))) {
3261 struct dmar_domain *old_domain;
3263 old_domain = find_domain(pdev);
3265 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
3266 vm_domain_remove_one_dev_info(old_domain, pdev);
3268 domain_remove_dev_info(old_domain);
3272 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3277 /* check if this iommu agaw is sufficient for max mapped address */
3278 addr_width = agaw_to_width(iommu->agaw);
3279 end = DOMAIN_MAX_ADDR(addr_width);
3280 end = end & VTD_PAGE_MASK;
3281 if (end < dmar_domain->max_addr) {
3282 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3283 "sufficient for the mapped address (%llx)\n",
3284 __func__, iommu->agaw, dmar_domain->max_addr);
3288 ret = vm_domain_add_dev_info(dmar_domain, pdev);
3292 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3296 static void intel_iommu_detach_device(struct iommu_domain *domain,
3299 struct dmar_domain *dmar_domain = domain->priv;
3300 struct pci_dev *pdev = to_pci_dev(dev);
3302 vm_domain_remove_one_dev_info(dmar_domain, pdev);
3305 static int intel_iommu_map_range(struct iommu_domain *domain,
3306 unsigned long iova, phys_addr_t hpa,
3307 size_t size, int iommu_prot)
3309 struct dmar_domain *dmar_domain = domain->priv;
3315 if (iommu_prot & IOMMU_READ)
3316 prot |= DMA_PTE_READ;
3317 if (iommu_prot & IOMMU_WRITE)
3318 prot |= DMA_PTE_WRITE;
3319 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3320 prot |= DMA_PTE_SNP;
3322 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3323 if (dmar_domain->max_addr < max_addr) {
3327 /* check if minimum agaw is sufficient for mapped address */
3328 min_agaw = vm_domain_min_agaw(dmar_domain);
3329 addr_width = agaw_to_width(min_agaw);
3330 end = DOMAIN_MAX_ADDR(addr_width);
3331 end = end & VTD_PAGE_MASK;
3332 if (end < max_addr) {
3333 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3334 "sufficient for the mapped address (%llx)\n",
3335 __func__, min_agaw, max_addr);
3338 dmar_domain->max_addr = max_addr;
3341 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
3345 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3346 unsigned long iova, size_t size)
3348 struct dmar_domain *dmar_domain = domain->priv;
3351 /* The address might not be aligned */
3352 base = iova & VTD_PAGE_MASK;
3353 size = VTD_PAGE_ALIGN(size);
3354 dma_pte_clear_range(dmar_domain, base, base + size);
3356 if (dmar_domain->max_addr == base + size)
3357 dmar_domain->max_addr = base;
3360 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3363 struct dmar_domain *dmar_domain = domain->priv;
3364 struct dma_pte *pte;
3367 pte = addr_to_dma_pte(dmar_domain, iova);
3369 phys = dma_pte_addr(pte);
3374 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3377 struct dmar_domain *dmar_domain = domain->priv;
3379 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3380 return dmar_domain->iommu_snooping;
3385 static struct iommu_ops intel_iommu_ops = {
3386 .domain_init = intel_iommu_domain_init,
3387 .domain_destroy = intel_iommu_domain_destroy,
3388 .attach_dev = intel_iommu_attach_device,
3389 .detach_dev = intel_iommu_detach_device,
3390 .map = intel_iommu_map_range,
3391 .unmap = intel_iommu_unmap_range,
3392 .iova_to_phys = intel_iommu_iova_to_phys,
3393 .domain_has_cap = intel_iommu_domain_has_cap,
3396 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3399 * Mobile 4 Series Chipset neglects to set RWBF capability,
3402 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3406 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);