nouveau/ttm/PCIe: Use dma_addr if TTM has set it.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 2 Dec 2010 16:36:24 +0000 (11:36 -0500)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Fri, 18 Feb 2011 22:27:49 +0000 (17:27 -0500)
If the TTM layer has used the DMA API to setup pages that are
TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the
DMA API for pages that have TTM_PAGE_FLAG_DMA32 set"), lets
use it when programming the GART in the PCIe type cards.

This patch skips doing the pci_map_page (and pci_unmap_page) if
there is a DMA addresses passed in for that page. If the dma_address
is zero (or DMA_ERROR_CODE), then we continue on with our old
behaviour.

[v2: Added a review-by tag]

Reviewed-by: Thomas Hellstrom <thomas@shipmail.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Ian Campbell <ian.campbell@citrix.com>
drivers/gpu/drm/nouveau/nouveau_sgdma.c

index edc140a..bbdd982 100644 (file)
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
        struct drm_device *dev;
 
        dma_addr_t *pages;
+       bool *ttm_alloced;
        unsigned nr_pages;
 
        unsigned pte_start;
@@ -35,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
        if (!nvbe->pages)
                return -ENOMEM;
 
+       nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+       if (!nvbe->ttm_alloced)
+               return -ENOMEM;
+
        nvbe->nr_pages = 0;
        while (num_pages--) {
-               nvbe->pages[nvbe->nr_pages] =
-                       pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+               if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+                       nvbe->pages[nvbe->nr_pages] =
+                                       dma_addrs[nvbe->nr_pages];
+                       nvbe->ttm_alloced[nvbe->nr_pages] = true;
+               } else {
+                       nvbe->pages[nvbe->nr_pages] =
+                               pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
                                     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(dev->pdev,
-                                         nvbe->pages[nvbe->nr_pages])) {
-                       be->func->clear(be);
-                       return -EFAULT;
+                       if (pci_dma_mapping_error(dev->pdev,
+                                                 nvbe->pages[nvbe->nr_pages])) {
+                               be->func->clear(be);
+                               return -EFAULT;
+                       }
                }
 
                nvbe->nr_pages++;
@@ -66,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be)
                        be->func->unbind(be);
 
                while (nvbe->nr_pages--) {
-                       pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+                       if (!nvbe->ttm_alloced[nvbe->nr_pages])
+                               pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
                kfree(nvbe->pages);
+               kfree(nvbe->ttm_alloced);
                nvbe->pages = NULL;
+               nvbe->ttm_alloced = NULL;
                nvbe->nr_pages = 0;
        }
 }