AMD IOMMU: add stats counter for map_single requests
[pandora-kernel.git] / arch / x86 / kernel / amd_iommu.c
index 0922d5f..ef37786 100644 (file)
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/bitops.h>
+#include <linux/debugfs.h>
 #include <linux/scatterlist.h>
 #include <linux/iommu-helper.h>
+#ifdef CONFIG_IOMMU_API
+#include <linux/iommu.h>
+#endif
 #include <asm/proto.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
@@ -38,6 +42,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 static LIST_HEAD(iommu_pd_list);
 static DEFINE_SPINLOCK(iommu_pd_list_lock);
 
+#ifdef CONFIG_IOMMU_API
+static struct iommu_ops amd_iommu_ops;
+#endif
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -47,6 +55,48 @@ struct iommu_cmd {
 
 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
                             struct unity_map_entry *e);
+static struct dma_ops_domain *find_protection_domain(u16 devid);
+
+
+#ifdef CONFIG_AMD_IOMMU_STATS
+
+/*
+ * Initialization code for statistics collection
+ */
+
+DECLARE_STATS_COUNTER(compl_wait);
+DECLARE_STATS_COUNTER(cnt_map_single);
+
+static struct dentry *stats_dir;
+static struct dentry *de_isolate;
+static struct dentry *de_fflush;
+
+static void amd_iommu_stats_add(struct __iommu_counter *cnt)
+{
+       if (stats_dir == NULL)
+               return;
+
+       cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
+                                      &cnt->value);
+}
+
+static void amd_iommu_stats_init(void)
+{
+       stats_dir = debugfs_create_dir("amd-iommu", NULL);
+       if (stats_dir == NULL)
+               return;
+
+       de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
+                                        (u32 *)&amd_iommu_isolate);
+
+       de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
+                                        (u32 *)&amd_iommu_unmap_flush);
+
+       amd_iommu_stats_add(&compl_wait);
+       amd_iommu_stats_add(&cnt_map_single);
+}
+
+#endif
 
 /* returns !0 if the IOMMU is caching non-present entries in its TLB */
 static int iommu_has_npcache(struct amd_iommu *iommu)
@@ -189,12 +239,54 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
        spin_lock_irqsave(&iommu->lock, flags);
        ret = __iommu_queue_command(iommu, cmd);
        if (!ret)
-               iommu->need_sync = 1;
+               iommu->need_sync = true;
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return ret;
 }
 
+/*
+ * This function waits until an IOMMU has completed a completion
+ * wait command
+ */
+static void __iommu_wait_for_completion(struct amd_iommu *iommu)
+{
+       int ready = 0;
+       unsigned status = 0;
+       unsigned long i = 0;
+
+       INC_STATS_COUNTER(compl_wait);
+
+       while (!ready && (i < EXIT_LOOP_COUNT)) {
+               ++i;
+               /* wait for the bit to become one */
+               status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+               ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
+       }
+
+       /* set bit back to zero */
+       status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
+       writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+       if (unlikely(i == EXIT_LOOP_COUNT))
+               panic("AMD IOMMU: Completion wait loop failed\n");
+}
+
+/*
+ * This function queues a completion wait command into the command
+ * buffer of an IOMMU
+ */
+static int __iommu_completion_wait(struct amd_iommu *iommu)
+{
+       struct iommu_cmd cmd;
+
+        memset(&cmd, 0, sizeof(cmd));
+        cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
+        CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
+
+        return __iommu_queue_command(iommu, &cmd);
+}
+
 /*
  * This function is called whenever we need to ensure that the IOMMU has
  * completed execution of all commands we sent. It sends a
@@ -204,40 +296,22 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  */
 static int iommu_completion_wait(struct amd_iommu *iommu)
 {
-       int ret = 0, ready = 0;
-       unsigned status = 0;
-       struct iommu_cmd cmd;
-       unsigned long flags, i = 0;
-
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
-       CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
+       int ret = 0;
+       unsigned long flags;
 
        spin_lock_irqsave(&iommu->lock, flags);
 
        if (!iommu->need_sync)
                goto out;
 
-       iommu->need_sync = 0;
+       ret = __iommu_completion_wait(iommu);
 
-       ret = __iommu_queue_command(iommu, &cmd);
+       iommu->need_sync = false;
 
        if (ret)
                goto out;
 
-       while (!ready && (i < EXIT_LOOP_COUNT)) {
-               ++i;
-               /* wait for the bit to become one */
-               status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-               ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
-       }
-
-       /* set bit back to zero */
-       status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
-       writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
-       if (unlikely(i == EXIT_LOOP_COUNT))
-               panic("AMD IOMMU: Completion wait loop failed\n");
+       __iommu_wait_for_completion(iommu);
 
 out:
        spin_unlock_irqrestore(&iommu->lock, flags);
@@ -264,6 +338,21 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
        return ret;
 }
 
+static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
+                                         u16 domid, int pde, int s)
+{
+       memset(cmd, 0, sizeof(*cmd));
+       address &= PAGE_MASK;
+       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+       cmd->data[1] |= domid;
+       cmd->data[2] = lower_32_bits(address);
+       cmd->data[3] = upper_32_bits(address);
+       if (s) /* size bit - we flush more than one 4kb page */
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+       if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+}
+
 /*
  * Generic command send function for invalidaing TLB entries
  */
@@ -273,16 +362,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
        struct iommu_cmd cmd;
        int ret;
 
-       memset(&cmd, 0, sizeof(cmd));
-       address &= PAGE_MASK;
-       CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
-       cmd.data[1] |= domid;
-       cmd.data[2] = lower_32_bits(address);
-       cmd.data[3] = upper_32_bits(address);
-       if (s) /* size bit - we flush more than one 4kb page */
-               cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-       if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
-               cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+       __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
 
        ret = iommu_queue_command(iommu, &cmd);
 
@@ -324,6 +404,30 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
        iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
 }
 
+#ifdef CONFIG_IOMMU_API
+/*
+ * This function is used to flush the IO/TLB for a given protection domain
+ * on every IOMMU in the system
+ */
+static void iommu_flush_domain(u16 domid)
+{
+       unsigned long flags;
+       struct amd_iommu *iommu;
+       struct iommu_cmd cmd;
+
+       __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                                     domid, 1, 1);
+
+       list_for_each_entry(iommu, &amd_iommu_list, list) {
+               spin_lock_irqsave(&iommu->lock, flags);
+               __iommu_queue_command(iommu, &cmd);
+               __iommu_completion_wait(iommu);
+               __iommu_wait_for_completion(iommu);
+               spin_unlock_irqrestore(&iommu->lock, flags);
+       }
+}
+#endif
+
 /****************************************************************************
  *
  * The functions below are used the create the page table mappings for
@@ -388,6 +492,30 @@ static int iommu_map_page(struct protection_domain *dom,
        return 0;
 }
 
+#ifdef CONFIG_IOMMU_API
+static void iommu_unmap_page(struct protection_domain *dom,
+                            unsigned long bus_addr)
+{
+       u64 *pte;
+
+       pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
+
+       if (!IOMMU_PTE_PRESENT(*pte))
+               return;
+
+       pte = IOMMU_PTE_PAGE(*pte);
+       pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
+
+       if (!IOMMU_PTE_PRESENT(*pte))
+               return;
+
+       pte = IOMMU_PTE_PAGE(*pte);
+       pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
+
+       *pte = 0;
+}
+#endif
+
 /*
  * This function checks if a specific unity mapping entry is needed for
  * this specific IOMMU.
@@ -677,6 +805,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
                goto free_dma_dom;
        dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
        dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+       dma_dom->domain.flags = PD_DMA_OPS_MASK;
        dma_dom->domain.priv = dma_dom;
        if (!dma_dom->domain.pt_root)
                goto free_dma_dom;
@@ -738,6 +867,15 @@ free_dma_dom:
        return NULL;
 }
 
+/*
+ * little helper function to check whether a given protection domain is a
+ * dma_ops domain
+ */
+static bool dma_ops_domain(struct protection_domain *domain)
+{
+       return domain->flags & PD_DMA_OPS_MASK;
+}
+
 /*
  * Find out the protection domain structure for a given PCI device. This
  * will give us the pointer to the page table root for example.
@@ -758,14 +896,15 @@ static struct protection_domain *domain_for_device(u16 devid)
  * If a device is not yet associated with a domain, this function does
  * assigns it visible for the hardware
  */
-static void set_device_domain(struct amd_iommu *iommu,
-                             struct protection_domain *domain,
-                             u16 devid)
+static void attach_device(struct amd_iommu *iommu,
+                         struct protection_domain *domain,
+                         u16 devid)
 {
        unsigned long flags;
-
        u64 pte_root = virt_to_phys(domain->pt_root);
 
+       domain->dev_cnt += 1;
+
        pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
                    << DEV_ENTRY_MODE_SHIFT;
        pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
@@ -781,6 +920,116 @@ static void set_device_domain(struct amd_iommu *iommu,
        iommu_queue_inv_dev_entry(iommu, devid);
 }
 
+/*
+ * Removes a device from a protection domain (unlocked)
+ */
+static void __detach_device(struct protection_domain *domain, u16 devid)
+{
+
+       /* lock domain */
+       spin_lock(&domain->lock);
+
+       /* remove domain from the lookup table */
+       amd_iommu_pd_table[devid] = NULL;
+
+       /* remove entry from the device table seen by the hardware */
+       amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
+       amd_iommu_dev_table[devid].data[1] = 0;
+       amd_iommu_dev_table[devid].data[2] = 0;
+
+       /* decrease reference counter */
+       domain->dev_cnt -= 1;
+
+       /* ready */
+       spin_unlock(&domain->lock);
+}
+
+/*
+ * Removes a device from a protection domain (with devtable_lock held)
+ */
+static void detach_device(struct protection_domain *domain, u16 devid)
+{
+       unsigned long flags;
+
+       /* lock device table */
+       write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+       __detach_device(domain, devid);
+       write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+}
+
+static int device_change_notifier(struct notifier_block *nb,
+                                 unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
+       struct protection_domain *domain;
+       struct dma_ops_domain *dma_domain;
+       struct amd_iommu *iommu;
+       int order = amd_iommu_aperture_order;
+       unsigned long flags;
+
+       if (devid > amd_iommu_last_bdf)
+               goto out;
+
+       devid = amd_iommu_alias_table[devid];
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (iommu == NULL)
+               goto out;
+
+       domain = domain_for_device(devid);
+
+       if (domain && !dma_ops_domain(domain))
+               WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
+                         "to a non-dma-ops domain\n", dev_name(dev));
+
+       switch (action) {
+       case BUS_NOTIFY_BOUND_DRIVER:
+               if (domain)
+                       goto out;
+               dma_domain = find_protection_domain(devid);
+               if (!dma_domain)
+                       dma_domain = iommu->default_dom;
+               attach_device(iommu, &dma_domain->domain, devid);
+               printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
+                      "device %s\n", dma_domain->domain.id, dev_name(dev));
+               break;
+       case BUS_NOTIFY_UNBIND_DRIVER:
+               if (!domain)
+                       goto out;
+               detach_device(domain, devid);
+               break;
+       case BUS_NOTIFY_ADD_DEVICE:
+               /* allocate a protection domain if a device is added */
+               dma_domain = find_protection_domain(devid);
+               if (dma_domain)
+                       goto out;
+               dma_domain = dma_ops_domain_alloc(iommu, order);
+               if (!dma_domain)
+                       goto out;
+               dma_domain->target_dev = devid;
+
+               spin_lock_irqsave(&iommu_pd_list_lock, flags);
+               list_add_tail(&dma_domain->list, &iommu_pd_list);
+               spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
+               break;
+       default:
+               goto out;
+       }
+
+       iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_completion_wait(iommu);
+
+out:
+       return 0;
+}
+
+struct notifier_block device_nb = {
+       .notifier_call = device_change_notifier,
+};
+
 /*****************************************************************************
  *
  * The next functions belong to the dma_ops mapping/unmapping code.
@@ -816,7 +1065,6 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
        list_for_each_entry(entry, &iommu_pd_list, list) {
                if (entry->target_dev == devid) {
                        ret = entry;
-                       list_del(&ret->list);
                        break;
                }
        }
@@ -867,14 +1115,13 @@ static int get_device_resources(struct device *dev,
                if (!dma_dom)
                        dma_dom = (*iommu)->default_dom;
                *domain = &dma_dom->domain;
-               set_device_domain(*iommu, *domain, *bdf);
+               attach_device(*iommu, *domain, *bdf);
                printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
-                               "device ", (*domain)->id);
-               print_devid(_bdf, 1);
+                               "device %s\n", (*domain)->id, dev_name(dev));
        }
 
        if (domain_for_device(_bdf) == NULL)
-               set_device_domain(*iommu, *domain, _bdf);
+               attach_device(*iommu, *domain, _bdf);
 
        return 1;
 }
@@ -1033,6 +1280,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
        dma_addr_t addr;
        u64 dma_mask;
 
+       INC_STATS_COUNTER(cnt_map_single);
+
        if (!check_device(dev))
                return bad_dma_address;
 
@@ -1044,6 +1293,9 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
                /* device not handled by any AMD IOMMU */
                return (dma_addr_t)paddr;
 
+       if (!dma_ops_domain(domain))
+               return bad_dma_address;
+
        spin_lock_irqsave(&domain->lock, flags);
        addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
                            dma_mask);
@@ -1074,6 +1326,9 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
                /* device not handled by any AMD IOMMU */
                return;
 
+       if (!dma_ops_domain(domain))
+               return;
+
        spin_lock_irqsave(&domain->lock, flags);
 
        __unmap_single(iommu, domain->priv, dma_addr, size, dir);
@@ -1128,6 +1383,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
        if (!iommu || !domain)
                return map_sg_no_iommu(dev, sglist, nelems, dir);
 
+       if (!dma_ops_domain(domain))
+               return 0;
+
        spin_lock_irqsave(&domain->lock, flags);
 
        for_each_sg(sglist, s, nelems, i) {
@@ -1181,6 +1439,9 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
            !get_device_resources(dev, &iommu, &domain, &devid))
                return;
 
+       if (!dma_ops_domain(domain))
+               return;
+
        spin_lock_irqsave(&domain->lock, flags);
 
        for_each_sg(sglist, s, nelems, i) {
@@ -1226,6 +1487,9 @@ static void *alloc_coherent(struct device *dev, size_t size,
                return virt_addr;
        }
 
+       if (!dma_ops_domain(domain))
+               goto out_free;
+
        if (!dma_mask)
                dma_mask = *dev->dma_mask;
 
@@ -1234,18 +1498,20 @@ static void *alloc_coherent(struct device *dev, size_t size,
        *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
                                 size, DMA_BIDIRECTIONAL, true, dma_mask);
 
-       if (*dma_addr == bad_dma_address) {
-               free_pages((unsigned long)virt_addr, get_order(size));
-               virt_addr = NULL;
-               goto out;
-       }
+       if (*dma_addr == bad_dma_address)
+               goto out_free;
 
        iommu_completion_wait(iommu);
 
-out:
        spin_unlock_irqrestore(&domain->lock, flags);
 
        return virt_addr;
+
+out_free:
+
+       free_pages((unsigned long)virt_addr, get_order(size));
+
+       return NULL;
 }
 
 /*
@@ -1267,6 +1533,9 @@ static void free_coherent(struct device *dev, size_t size,
        if (!iommu || !domain)
                goto free_mem;
 
+       if (!dma_ops_domain(domain))
+               goto free_mem;
+
        spin_lock_irqsave(&domain->lock, flags);
 
        __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
@@ -1319,7 +1588,7 @@ void prealloc_protection_domains(void)
        u16 devid;
 
        while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-               devid = (dev->bus->number << 8) | dev->devfn;
+               devid = calc_devid(dev->bus->number, dev->devfn);
                if (devid > amd_iommu_last_bdf)
                        continue;
                devid = amd_iommu_alias_table[devid];
@@ -1366,6 +1635,7 @@ int __init amd_iommu_init_dma_ops(void)
                iommu->default_dom = dma_ops_domain_alloc(iommu, order);
                if (iommu->default_dom == NULL)
                        return -ENOMEM;
+               iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
                ret = iommu_init_unity_mappings(iommu);
                if (ret)
                        goto free_domains;
@@ -1389,6 +1659,14 @@ int __init amd_iommu_init_dma_ops(void)
        /* Make the driver finally visible to the drivers */
        dma_ops = &amd_iommu_dma_ops;
 
+#ifdef CONFIG_IOMMU_API
+       register_iommu(&amd_iommu_ops);
+#endif
+
+       bus_register_notifier(&pci_bus_type, &device_nb);
+
+       amd_iommu_stats_init();
+
        return 0;
 
 free_domains:
@@ -1400,3 +1678,227 @@ free_domains:
 
        return ret;
 }
+
+/*****************************************************************************
+ *
+ * The following functions belong to the exported interface of AMD IOMMU
+ *
+ * This interface allows access to lower level functions of the IOMMU
+ * like protection domain handling and assignement of devices to domains
+ * which is not possible with the dma_ops interface.
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IOMMU_API
+
+static void cleanup_domain(struct protection_domain *domain)
+{
+       unsigned long flags;
+       u16 devid;
+
+       write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+
+       for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
+               if (amd_iommu_pd_table[devid] == domain)
+                       __detach_device(domain, devid);
+
+       write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+}
+
+static int amd_iommu_domain_init(struct iommu_domain *dom)
+{
+       struct protection_domain *domain;
+
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return -ENOMEM;
+
+       spin_lock_init(&domain->lock);
+       domain->mode = PAGE_MODE_3_LEVEL;
+       domain->id = domain_id_alloc();
+       if (!domain->id)
+               goto out_free;
+       domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!domain->pt_root)
+               goto out_free;
+
+       dom->priv = domain;
+
+       return 0;
+
+out_free:
+       kfree(domain);
+
+       return -ENOMEM;
+}
+
+static void amd_iommu_domain_destroy(struct iommu_domain *dom)
+{
+       struct protection_domain *domain = dom->priv;
+
+       if (!domain)
+               return;
+
+       if (domain->dev_cnt > 0)
+               cleanup_domain(domain);
+
+       BUG_ON(domain->dev_cnt != 0);
+
+       free_pagetable(domain);
+
+       domain_id_free(domain->id);
+
+       kfree(domain);
+
+       dom->priv = NULL;
+}
+
+static void amd_iommu_detach_device(struct iommu_domain *dom,
+                                   struct device *dev)
+{
+       struct protection_domain *domain = dom->priv;
+       struct amd_iommu *iommu;
+       struct pci_dev *pdev;
+       u16 devid;
+
+       if (dev->bus != &pci_bus_type)
+               return;
+
+       pdev = to_pci_dev(dev);
+
+       devid = calc_devid(pdev->bus->number, pdev->devfn);
+
+       if (devid > 0)
+               detach_device(domain, devid);
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (!iommu)
+               return;
+
+       iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_completion_wait(iommu);
+}
+
+static int amd_iommu_attach_device(struct iommu_domain *dom,
+                                  struct device *dev)
+{
+       struct protection_domain *domain = dom->priv;
+       struct protection_domain *old_domain;
+       struct amd_iommu *iommu;
+       struct pci_dev *pdev;
+       u16 devid;
+
+       if (dev->bus != &pci_bus_type)
+               return -EINVAL;
+
+       pdev = to_pci_dev(dev);
+
+       devid = calc_devid(pdev->bus->number, pdev->devfn);
+
+       if (devid >= amd_iommu_last_bdf ||
+                       devid != amd_iommu_alias_table[devid])
+               return -EINVAL;
+
+       iommu = amd_iommu_rlookup_table[devid];
+       if (!iommu)
+               return -EINVAL;
+
+       old_domain = domain_for_device(devid);
+       if (old_domain)
+               return -EBUSY;
+
+       attach_device(iommu, domain, devid);
+
+       iommu_completion_wait(iommu);
+
+       return 0;
+}
+
+static int amd_iommu_map_range(struct iommu_domain *dom,
+                              unsigned long iova, phys_addr_t paddr,
+                              size_t size, int iommu_prot)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long i,  npages = iommu_num_pages(paddr, size, PAGE_SIZE);
+       int prot = 0;
+       int ret;
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= IOMMU_PROT_IR;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= IOMMU_PROT_IW;
+
+       iova  &= PAGE_MASK;
+       paddr &= PAGE_MASK;
+
+       for (i = 0; i < npages; ++i) {
+               ret = iommu_map_page(domain, iova, paddr, prot);
+               if (ret)
+                       return ret;
+
+               iova  += PAGE_SIZE;
+               paddr += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static void amd_iommu_unmap_range(struct iommu_domain *dom,
+                                 unsigned long iova, size_t size)
+{
+
+       struct protection_domain *domain = dom->priv;
+       unsigned long i,  npages = iommu_num_pages(iova, size, PAGE_SIZE);
+
+       iova  &= PAGE_MASK;
+
+       for (i = 0; i < npages; ++i) {
+               iommu_unmap_page(domain, iova);
+               iova  += PAGE_SIZE;
+       }
+
+       iommu_flush_domain(domain->id);
+}
+
+static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+                                         unsigned long iova)
+{
+       struct protection_domain *domain = dom->priv;
+       unsigned long offset = iova & ~PAGE_MASK;
+       phys_addr_t paddr;
+       u64 *pte;
+
+       pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)];
+
+       if (!IOMMU_PTE_PRESENT(*pte))
+               return 0;
+
+       pte = IOMMU_PTE_PAGE(*pte);
+       pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
+
+       if (!IOMMU_PTE_PRESENT(*pte))
+               return 0;
+
+       pte = IOMMU_PTE_PAGE(*pte);
+       pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
+
+       if (!IOMMU_PTE_PRESENT(*pte))
+               return 0;
+
+       paddr  = *pte & IOMMU_PAGE_MASK;
+       paddr |= offset;
+
+       return paddr;
+}
+
+static struct iommu_ops amd_iommu_ops = {
+       .domain_init = amd_iommu_domain_init,
+       .domain_destroy = amd_iommu_domain_destroy,
+       .attach_dev = amd_iommu_attach_device,
+       .detach_dev = amd_iommu_detach_device,
+       .map = amd_iommu_map_range,
+       .unmap = amd_iommu_unmap_range,
+       .iova_to_phys = amd_iommu_iova_to_phys,
+};
+
+#endif