media: v4l2-compat-ioctl32.c: add capabilities field to, v4l2_input32
[pandora-kernel.git] / drivers / iommu / amd_iommu.c
index 4ee277a..94e8fc4 100644 (file)
@@ -59,6 +59,8 @@ static struct protection_domain *pt_domain;
 
 static struct iommu_ops amd_iommu_ops;
 
+static struct dma_map_ops amd_iommu_dma_ops;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -381,12 +383,27 @@ static void dump_command(unsigned long phys_addr)
 
 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 {
-       u32 *event = __evt;
-       int type  = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
-       int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
-       int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
-       int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
-       u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+       int type, devid, domid, flags;
+       volatile u32 *event = __evt;
+       int count = 0;
+       u64 address;
+
+retry:
+       type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
+       devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+       domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+       flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+       address = (u64)(((u64)event[3]) << 32) | event[2];
+
+       if (type == 0) {
+               /* Did we hit the erratum? */
+               if (++count == LOOP_TIMEOUT) {
+                       pr_err("AMD-Vi: No event written to event log\n");
+                       return;
+               }
+               udelay(1);
+               goto retry;
+       }
 
        printk(KERN_ERR "AMD-Vi: Event logged [");
 
@@ -439,6 +456,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
        default:
                printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
        }
+
+       memset(__evt, 0, 4 * sizeof(u32));
 }
 
 static void iommu_poll_events(struct amd_iommu *iommu)
@@ -622,7 +641,7 @@ again:
        next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
        left      = (head - next_tail) % iommu->cmd_buf_size;
 
-       if (left <= 2) {
+       if (left <= 0x20) {
                struct iommu_cmd sync_cmd;
                volatile u64 sem = 0;
                int ret;
@@ -1057,6 +1076,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
 
                        /* Large PTE found which maps this address */
                        unmap_size = PTE_PAGE_SIZE(*pte);
+
+                       /* Only unmap from the first pte in the page */
+                       if ((unmap_size - 1) & bus_addr)
+                               break;
                        count      = PAGE_SIZE_PTE_COUNT(unmap_size);
                        for (i = 0; i < count; i++)
                                pte[i] = 0ULL;
@@ -1066,7 +1089,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
                unmapped += unmap_size;
        }
 
-       BUG_ON(!is_power_of_2(unmapped));
+       BUG_ON(unmapped && !is_power_of_2(unmapped));
 
        return unmapped;
 }
@@ -1518,6 +1541,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
                kfree(dom->aperture[i]);
        }
 
+       if (dom->domain.id)
+               domain_id_free(dom->domain.id);
+
        kfree(dom);
 }
 
@@ -1846,20 +1872,27 @@ static int device_change_notifier(struct notifier_block *nb,
 
                iommu_init_device(dev);
 
+               if (iommu_pass_through) {
+                       attach_device(dev, pt_domain);
+                       break;
+               }
+
                domain = domain_for_device(dev);
 
                /* allocate a protection domain if a device is added */
                dma_domain = find_protection_domain(devid);
-               if (dma_domain)
-                       goto out;
-               dma_domain = dma_ops_domain_alloc();
-               if (!dma_domain)
-                       goto out;
-               dma_domain->target_dev = devid;
+               if (!dma_domain) {
+                       dma_domain = dma_ops_domain_alloc();
+                       if (!dma_domain)
+                               goto out;
+                       dma_domain->target_dev = devid;
+
+                       spin_lock_irqsave(&iommu_pd_list_lock, flags);
+                       list_add_tail(&dma_domain->list, &iommu_pd_list);
+                       spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+               }
 
-               spin_lock_irqsave(&iommu_pd_list_lock, flags);
-               list_add_tail(&dma_domain->list, &iommu_pd_list);
-               spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+               dev->archdata.dma_ops = &amd_iommu_dma_ops;
 
                break;
        case BUS_NOTIFY_DEL_DEVICE:
@@ -2432,7 +2465,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  * we don't need to preallocate the protection domains anymore.
  * For now we have to.
  */
-static void prealloc_protection_domains(void)
+static void __init prealloc_protection_domains(void)
 {
        struct pci_dev *dev = NULL;
        struct dma_ops_domain *dma_dom;
@@ -2479,6 +2512,9 @@ static unsigned device_dma_ops_init(void)
 
        for_each_pci_dev(pdev) {
                if (!check_device(&pdev->dev)) {
+
+                       iommu_ignore_device(&pdev->dev);
+
                        unhandled += 1;
                        continue;
                }
@@ -2559,14 +2595,16 @@ free_domains:
 
 static void cleanup_domain(struct protection_domain *domain)
 {
-       struct iommu_dev_data *dev_data, *next;
+       struct iommu_dev_data *entry;
        unsigned long flags;
 
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 
-       list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
-               __detach_device(dev_data);
-               atomic_set(&dev_data->bind, 0);
+       while (!list_empty(&domain->dev_list)) {
+               entry = list_first_entry(&domain->dev_list,
+                                        struct iommu_dev_data, list);
+               __detach_device(entry);
+               atomic_set(&entry->bind, 0);
        }
 
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -2734,6 +2772,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
        mutex_unlock(&domain->api_lock);
 
        domain_flush_tlb_pde(domain);
+       domain_flush_complete(domain);
 
        return get_order(unmap_size);
 }