x86/amd-iommu: Don't detach device from pt domain on driver unbind
[pandora-kernel.git] / arch / x86 / kernel / amd_iommu.c
index 6c99f50..12a541d 100644 (file)
@@ -41,6 +41,12 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 static LIST_HEAD(iommu_pd_list);
 static DEFINE_SPINLOCK(iommu_pd_list_lock);
 
+/*
+ * Domain for untranslated devices - only allocated
+ * if iommu=pt passed on kernel cmd line.
+ */
+static struct protection_domain *pt_domain;
+
 #ifdef CONFIG_IOMMU_API
 static struct iommu_ops amd_iommu_ops;
 #endif
@@ -1067,32 +1073,48 @@ static struct protection_domain *domain_for_device(u16 devid)
  * If a device is not yet associated with a domain, this function does
  * assigns it visible for the hardware
  */
-static void attach_device(struct amd_iommu *iommu,
-                         struct protection_domain *domain,
-                         u16 devid)
+static void __attach_device(struct amd_iommu *iommu,
+                           struct protection_domain *domain,
+                           u16 devid)
 {
-       unsigned long flags;
-       u64 pte_root = virt_to_phys(domain->pt_root);
+       u64 pte_root;
 
-       domain->dev_cnt += 1;
+       /* lock domain */
+       spin_lock(&domain->lock);
+
+       pte_root = virt_to_phys(domain->pt_root);
 
        pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
                    << DEV_ENTRY_MODE_SHIFT;
        pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
 
-       write_lock_irqsave(&amd_iommu_devtable_lock, flags);
-       amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
-       amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
        amd_iommu_dev_table[devid].data[2] = domain->id;
+       amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
+       amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
 
        amd_iommu_pd_table[devid] = domain;
+
+       domain->dev_cnt += 1;
+
+       /* ready */
+       spin_unlock(&domain->lock);
+}
+
+static void attach_device(struct amd_iommu *iommu,
+                         struct protection_domain *domain,
+                         u16 devid)
+{
+       unsigned long flags;
+
+       write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+       __attach_device(iommu, domain, devid);
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
-       /*
-        * We might boot into a crash-kernel here. The crashed kernel
-        * left the caches in the IOMMU dirty. So we have to flush
-        * here to evict all dirty stuff.
-        */
+       /*
+        * We might boot into a crash-kernel here. The crashed kernel
+        * left the caches in the IOMMU dirty. So we have to flush
+        * here to evict all dirty stuff.
+        */
        iommu_queue_inv_dev_entry(iommu, devid);
        iommu_flush_tlb_pde(iommu, domain->id);
 }
@@ -1119,6 +1141,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
 
        /* ready */
        spin_unlock(&domain->lock);
+
+       /*
+        * If we run in passthrough mode the device must be assigned to the
+        * passthrough domain if it is detached from any other domain
+        */
+       if (iommu_pass_through) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+               __attach_device(iommu, pt_domain, devid);
+       }
 }
 
 /*
@@ -1164,6 +1195,8 @@ static int device_change_notifier(struct notifier_block *nb,
        case BUS_NOTIFY_UNBOUND_DRIVER:
                if (!domain)
                        goto out;
+               if (iommu_pass_through)
+                       break;
                detach_device(domain, devid);
                break;
        case BUS_NOTIFY_ADD_DEVICE:
@@ -1988,19 +2021,47 @@ static void cleanup_domain(struct protection_domain *domain)
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 }
 
-static int amd_iommu_domain_init(struct iommu_domain *dom)
+static void protection_domain_free(struct protection_domain *domain)
+{
+       if (!domain)
+               return;
+
+       if (domain->id)
+               domain_id_free(domain->id);
+
+       kfree(domain);
+}
+
+static struct protection_domain *protection_domain_alloc(void)
 {
        struct protection_domain *domain;
 
        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
        if (!domain)
-               return -ENOMEM;
+               return NULL;
 
        spin_lock_init(&domain->lock);
-       domain->mode = PAGE_MODE_3_LEVEL;
        domain->id = domain_id_alloc();
        if (!domain->id)
+               goto out_err;
+
+       return domain;
+
+out_err:
+       kfree(domain);
+
+       return NULL;
+}
+
+static int amd_iommu_domain_init(struct iommu_domain *dom)
+{
+       struct protection_domain *domain;
+
+       domain = protection_domain_alloc();
+       if (!domain)
                goto out_free;
+
+       domain->mode    = PAGE_MODE_3_LEVEL;
        domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
        if (!domain->pt_root)
                goto out_free;
@@ -2010,7 +2071,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
        return 0;
 
 out_free:
-       kfree(domain);
+       protection_domain_free(domain);
 
        return -ENOMEM;
 }
@@ -2191,3 +2252,46 @@ static struct iommu_ops amd_iommu_ops = {
        .domain_has_cap = amd_iommu_domain_has_cap,
 };
 
+/*****************************************************************************
+ *
+ * The next functions do a basic initialization of IOMMU for pass through
+ * mode
+ *
+ * In passthrough mode the IOMMU is initialized and enabled but not used for
+ * DMA-API translation.
+ *
+ *****************************************************************************/
+
+int __init amd_iommu_init_passthrough(void)
+{
+       struct pci_dev *dev = NULL;
+       u16 devid, devid2;
+
+       /* allocate passthroug domain */
+       pt_domain = protection_domain_alloc();
+       if (!pt_domain)
+               return -ENOMEM;
+
+       pt_domain->mode |= PAGE_MODE_NONE;
+
+       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+               struct amd_iommu *iommu;
+
+               devid = calc_devid(dev->bus->number, dev->devfn);
+               if (devid > amd_iommu_last_bdf)
+                       continue;
+
+               devid2 = amd_iommu_alias_table[devid];
+
+               iommu = amd_iommu_rlookup_table[devid2];
+               if (!iommu)
+                       continue;
+
+               __attach_device(iommu, pt_domain, devid);
+               __attach_device(iommu, pt_domain, devid2);
+       }
+
+       pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
+
+       return 0;
+}