Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / arch / arm / common / dmabounce.c
index 595ecd2..aa07f59 100644 (file)
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
        read_lock_irqsave(&device_info->lock, flags);
 
        list_for_each_entry(b, &device_info->safe_buffers, node)
-               if (b->safe_dma_addr == safe_dma_addr) {
+               if (b->safe_dma_addr <= safe_dma_addr &&
+                   b->safe_dma_addr + b->size > safe_dma_addr) {
                        rb = b;
                        break;
                }
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
        if (buf == NULL) {
                dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
                       __func__, ptr);
-               return ~0;
+               return DMA_ERROR_CODE;
        }
 
        dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
 {
        dma_addr_t dma_addr;
        int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
 
        ret = needs_bounce(dev, dma_addr, size);
        if (ret < 0)
-               return ~0;
+               return DMA_ERROR_CODE;
 
        if (ret == 0) {
-               __dma_page_cpu_to_dev(page, offset, size, dir);
+               arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
                return dma_addr;
        }
 
        if (PageHighMem(page)) {
                dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
-               return ~0;
+               return DMA_ERROR_CODE;
        }
 
        return map_single(dev, page_address(page) + offset, size, dir);
 }
-EXPORT_SYMBOL(__dma_map_page);
 
 /*
  * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
  * the safe buffer.  (basically return things back to the way they
  * should be)
  */
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+               enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        struct safe_buffer *buf;
 
@@ -352,31 +353,32 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
 
        buf = find_safe_buffer_dev(dev, dma_addr, __func__);
        if (!buf) {
-               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
-                       dma_addr & ~PAGE_MASK, size, dir);
+               arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
                return;
        }
 
        unmap_single(dev, buf, size, dir);
 }
-EXPORT_SYMBOL(__dma_unmap_page);
 
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
-               unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+               size_t sz, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
+       unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
                return 1;
 
+       off = addr - buf->safe_dma_addr;
+
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -388,24 +390,35 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
        }
        return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
 
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
-               unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+               return;
+
+       arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+               size_t sz, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
+       unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
                return 1;
 
+       off = addr - buf->safe_dma_addr;
+
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
        }
        return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+               return;
+
+       arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+       if (dev->archdata.dmabounce)
+               return 0;
+
+       return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+       .alloc                  = arm_dma_alloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .map_page               = dmabounce_map_page,
+       .unmap_page             = dmabounce_unmap_page,
+       .sync_single_for_cpu    = dmabounce_sync_for_cpu,
+       .sync_single_for_device = dmabounce_sync_for_device,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .set_dma_mask           = dmabounce_set_mask,
+};
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
                const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
 #endif
 
        dev->archdata.dmabounce = device_info;
+       set_dma_ops(dev, &dmabounce_ops);
 
        dev_info(dev, "dmabounce: registered device\n");
 
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
        struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 
        dev->archdata.dmabounce = NULL;
+       set_dma_ops(dev, NULL);
 
        if (!device_info) {
                dev_warn(dev,