intel-iommu: Defer the iotlb flush and iova free for intel_unmap_sg() too.
authorDavid Woodhouse <David.Woodhouse@intel.com>
Tue, 14 Jul 2009 00:55:11 +0000 (01:55 +0100)
committerDavid Woodhouse <David.Woodhouse@intel.com>
Wed, 15 Jul 2009 07:17:23 +0000 (08:17 +0100)
I see no reason why we did this _only_ in intel_unmap_page().

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
drivers/pci/intel-iommu.c

index d6a8573..ee48fd0 100644 (file)
@@ -2815,11 +2815,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        /* free page tables */
        dma_pte_free_pagetable(domain, start_pfn, last_pfn);
 
-       iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-                             (last_pfn - start_pfn + 1));
-
-       /* free iova */
-       __free_iova(&domain->iovad, iova);
+       if (intel_iommu_strict) {
+               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+                                     last_pfn - start_pfn + 1);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+       } else {
+               add_unmap(domain, iova);
+               /*
+                * queue up the release of the unmap to save the 1/6th of the
+                * cpu used up by the iotlb flush operation...
+                */
+       }
 }
 
 static int intel_nontranslate_map_sg(struct device *hddev,