Merge branch 'drm-ttm-unmappable' into drm-core-next
[pandora-kernel.git] / arch / microblaze / kernel / dma.c
index 300fea4..ce72dd4 100644 (file)
@@ -8,8 +8,10 @@
 
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/gfp.h>
 #include <linux/dma-debug.h>
 #include <asm/bug.h>
+#include <asm/cacheflush.h>
 
 /*
  * Generic direct DMA implementation
  * can set archdata.dma_data to an unsigned long holding the offset. By
  * default the offset is PCI_DRAM_OFFSET.
  */
+static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
+                               size_t size, enum dma_data_direction direction)
+{
+       switch (direction) {
+       case DMA_TO_DEVICE:
+               flush_dcache_range(paddr + offset, paddr + offset + size);
+               break;
+       case DMA_FROM_DEVICE:
+               invalidate_dcache_range(paddr + offset, paddr + offset + size);
+               break;
+       default:
+               BUG();
+       }
+}
 
 static unsigned long get_dma_direct_offset(struct device *dev)
 {
-       if (dev)
+       if (likely(dev))
                return (unsigned long)dev->archdata.dma_data;
 
        return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
 }
 
-void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+#define NOT_COHERENT_CACHE
+
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
                                dma_addr_t *dma_handle, gfp_t flag)
 {
+#ifdef NOT_COHERENT_CACHE
+       return consistent_alloc(flag, size, dma_handle);
+#else
        void *ret;
        struct page *page;
        int node = dev_to_node(dev);
@@ -46,12 +67,17 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
        *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
 
        return ret;
+#endif
 }
 
-void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_direct_free_coherent(struct device *dev, size_t size,
                              void *vaddr, dma_addr_t dma_handle)
 {
+#ifdef NOT_COHERENT_CACHE
+       consistent_free(vaddr);
+#else
        free_pages((unsigned long)vaddr, get_order(size));
+#endif
 }
 
 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -61,10 +87,12 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
        struct scatterlist *sg;
        int i;
 
+       /* FIXME this part of code is untested */
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
                sg->dma_length = sg->length;
-               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+               __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
+                                                       sg->length, direction);
        }
 
        return nents;
@@ -85,11 +113,10 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
                                             struct page *page,
                                             unsigned long offset,
                                             size_t size,
-                                            enum dma_data_direction dir,
+                                            enum dma_data_direction direction,
                                             struct dma_attrs *attrs)
 {
-       BUG_ON(dir == DMA_NONE);
-       __dma_sync_page(page, offset, size, dir);
+       __dma_sync_page(page_to_phys(page), offset, size, direction);
        return page_to_phys(page) + offset + get_dma_direct_offset(dev);
 }
 
@@ -99,6 +126,12 @@ static inline void dma_direct_unmap_page(struct device *dev,
                                         enum dma_data_direction direction,
                                         struct dma_attrs *attrs)
 {
+/* There is not necessary to do cache cleanup
+ *
+ * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
+ * dma_address is physical address
+ */
+       __dma_sync_page(dma_address, 0 , size, direction);
 }
 
 struct dma_map_ops dma_direct_ops = {