Merge branch 'upstream-greg' of gregkh@master.kernel.org:/pub/scm/linux/kernel/git...
[pandora-kernel.git] / arch / arm / common / dmabounce.c
index cbf2165..028bdc9 100644 (file)
@@ -5,7 +5,7 @@
  *  limited DMA windows. These functions utilize bounce buffers to
  *  copy data to/from buffers located outside the DMA region. This
  *  only works for systems in which DMA memory is at the bottom of
- *  RAM and the remainder of memory is at the top an the DMA memory
+ *  RAM, the remainder of memory is at the top and the DMA memory
  *  can be marked as ZONE_DMA. Anything beyond that such as discontigous
  *  DMA windows will require custom implementations that reserve memory
  *  areas at early bootup.
@@ -33,8 +33,8 @@
 #include <asm/cacheflush.h>
 
 #undef DEBUG
-
 #undef STATS
+
 #ifdef STATS
 #define DO_STATS(X) do { X ; } while (0)
 #else
@@ -52,26 +52,33 @@ struct safe_buffer {
        int             direction;
 
        /* safe buffer info */
-       struct dma_pool *pool;
+       struct dmabounce_pool *pool;
        void            *safe;
        dma_addr_t      safe_dma_addr;
 };
 
+struct dmabounce_pool {
+       unsigned long   size;
+       struct dma_pool *pool;
+#ifdef STATS
+       unsigned long   allocs;
+#endif
+};
+
 struct dmabounce_device_info {
        struct list_head node;
 
        struct device *dev;
-       struct dma_pool *small_buffer_pool;
-       struct dma_pool *large_buffer_pool;
        struct list_head safe_buffers;
-       unsigned long small_buffer_size, large_buffer_size;
 #ifdef STATS
-       unsigned long sbp_allocs;
-       unsigned long lbp_allocs;
        unsigned long total_allocs;
        unsigned long map_op_count;
        unsigned long bounce_count;
 #endif
+       struct dmabounce_pool   small;
+       struct dmabounce_pool   large;
+
+       rwlock_t lock;
 };
 
 static LIST_HEAD(dmabounce_devs);
@@ -82,9 +89,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info)
        printk(KERN_INFO
                "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
                device_info->dev->bus_id,
-               device_info->sbp_allocs, device_info->lbp_allocs,
-               device_info->total_allocs - device_info->sbp_allocs -
-                       device_info->lbp_allocs,
+               device_info->small.allocs, device_info->large.allocs,
+               device_info->total_allocs - device_info->small.allocs -
+                       device_info->large.allocs,
                device_info->total_allocs);
 }
 #endif
@@ -106,18 +113,23 @@ find_dmabounce_dev(struct device *dev)
 /* allocate a 'safe' buffer and keep track of it */
 static inline struct safe_buffer *
 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
-                       size_t size, enum dma_data_direction dir)
+                 size_t size, enum dma_data_direction dir)
 {
        struct safe_buffer *buf;
-       struct dma_pool *pool;
+       struct dmabounce_pool *pool;
        struct device *dev = device_info->dev;
-       void *safe;
-       dma_addr_t safe_dma_addr;
+       unsigned long flags;
 
        dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
                __func__, ptr, size, dir);
 
-       DO_STATS ( device_info->total_allocs++ );
+       if (size <= device_info->small.size) {
+               pool = &device_info->small;
+       } else if (size <= device_info->large.size) {
+               pool = &device_info->large;
+       } else {
+               pool = NULL;
+       }
 
        buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
        if (buf == NULL) {
@@ -125,43 +137,41 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
                return NULL;
        }
 
-       if (size <= device_info->small_buffer_size) {
-               pool = device_info->small_buffer_pool;
-               safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
-
-               DO_STATS ( device_info->sbp_allocs++ );
-       } else if (size <= device_info->large_buffer_size) {
-               pool = device_info->large_buffer_pool;
-               safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
+       buf->ptr = ptr;
+       buf->size = size;
+       buf->direction = dir;
+       buf->pool = pool;
 
-               DO_STATS ( device_info->lbp_allocs++ );
+       if (pool) {
+               buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
+                                          &buf->safe_dma_addr);
        } else {
-               pool = NULL;
-               safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC);
+               buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
+                                              GFP_ATOMIC);
        }
 
-       if (safe == NULL) {
-               dev_warn(device_info->dev,
-                       "%s: could not alloc dma memory (size=%d)\n",
-                      __func__, size);
+       if (buf->safe == NULL) {
+               dev_warn(dev,
+                        "%s: could not alloc dma memory (size=%d)\n",
+                        __func__, size);
                kfree(buf);
                return NULL;
        }
 
 #ifdef STATS
+       if (pool)
+               pool->allocs++;
+       device_info->total_allocs++;
        if (device_info->total_allocs % 1000 == 0)
                print_alloc_stats(device_info);
 #endif
 
-       buf->ptr = ptr;
-       buf->size = size;
-       buf->direction = dir;
-       buf->pool = pool;
-       buf->safe = safe;
-       buf->safe_dma_addr = safe_dma_addr;
+       write_lock_irqsave(&device_info->lock, flags);
 
        list_add(&buf->node, &device_info->safe_buffers);
 
+       write_unlock_irqrestore(&device_info->lock, flags);
+
        return buf;
 }
 
@@ -169,24 +179,36 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
 static inline struct safe_buffer *
 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
 {
-       struct safe_buffer *b;
+       struct safe_buffer *b, *rb = NULL;
+       unsigned long flags;
+
+       read_lock_irqsave(&device_info->lock, flags);
 
        list_for_each_entry(b, &device_info->safe_buffers, node)
-               if (b->safe_dma_addr == safe_dma_addr)
-                       return b;
+               if (b->safe_dma_addr == safe_dma_addr) {
+                       rb = b;
+                       break;
+               }
 
-       return NULL;
+       read_unlock_irqrestore(&device_info->lock, flags);
+       return rb;
 }
 
 static inline void
 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
 {
+       unsigned long flags;
+
        dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
 
+       write_lock_irqsave(&device_info->lock, flags);
+
        list_del(&buf->node);
 
+       write_unlock_irqrestore(&device_info->lock, flags);
+
        if (buf->pool)
-               dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
+               dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
        else
                dma_free_coherent(device_info->dev, buf->size, buf->safe,
                                    buf->safe_dma_addr);
@@ -197,12 +219,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
 /* ************************************************** */
 
 #ifdef STATS
-
 static void print_map_stats(struct dmabounce_device_info *device_info)
 {
-       printk(KERN_INFO
-               "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n",
-               device_info->dev->bus_id,
+       dev_info(device_info->dev,
+               "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
                device_info->map_op_count, device_info->bounce_count);
 }
 #endif
@@ -258,13 +278,13 @@ map_single(struct device *dev, void *ptr, size_t size,
                                __func__, ptr, buf->safe, size);
                        memcpy(buf->safe, ptr, size);
                }
-               consistent_sync(buf->safe, size, dir);
+               ptr = buf->safe;
 
                dma_addr = buf->safe_dma_addr;
-       } else {
-               consistent_sync(ptr, size, dir);
        }
 
+       consistent_sync(ptr, size, dir);
+
        return dma_addr;
 }
 
@@ -278,7 +298,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
        /*
         * Trying to unmap an invalid mapping
         */
-       if (dma_addr == ~0) {
+       if (dma_mapping_error(dma_addr)) {
                dev_err(dev, "Trying to unmap invalid mapping\n");
                return;
        }
@@ -395,7 +415,6 @@ dma_addr_t
 dma_map_single(struct device *dev, void *ptr, size_t size,
                enum dma_data_direction dir)
 {
-       unsigned long flags;
        dma_addr_t dma_addr;
 
        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -403,12 +422,8 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
 
        BUG_ON(dir == DMA_NONE);
 
-       local_irq_save(flags);
-
        dma_addr = map_single(dev, ptr, size, dir);
 
-       local_irq_restore(flags);
-
        return dma_addr;
 }
 
@@ -423,25 +438,18 @@ void
 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
                        enum dma_data_direction dir)
 {
-       unsigned long flags;
-
        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
                __func__, (void *) dma_addr, size, dir);
 
        BUG_ON(dir == DMA_NONE);
 
-       local_irq_save(flags);
-
        unmap_single(dev, dma_addr, size, dir);
-
-       local_irq_restore(flags);
 }
 
 int
 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
                enum dma_data_direction dir)
 {
-       unsigned long flags;
        int i;
 
        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -449,8 +457,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 
        BUG_ON(dir == DMA_NONE);
 
-       local_irq_save(flags);
-
        for (i = 0; i < nents; i++, sg++) {
                struct page *page = sg->page;
                unsigned int offset = sg->offset;
@@ -461,8 +467,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
                        map_single(dev, ptr, length, dir);
        }
 
-       local_irq_restore(flags);
-
        return nents;
 }
 
@@ -470,7 +474,6 @@ void
 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
                enum dma_data_direction dir)
 {
-       unsigned long flags;
        int i;
 
        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -478,55 +481,38 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 
        BUG_ON(dir == DMA_NONE);
 
-       local_irq_save(flags);
-
        for (i = 0; i < nents; i++, sg++) {
                dma_addr_t dma_addr = sg->dma_address;
                unsigned int length = sg->length;
 
                unmap_single(dev, dma_addr, length, dir);
        }
-
-       local_irq_restore(flags);
 }
 
 void
 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
                                enum dma_data_direction dir)
 {
-       unsigned long flags;
-
        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
                __func__, (void *) dma_addr, size, dir);
 
-       local_irq_save(flags);
-
        sync_single(dev, dma_addr, size, dir);
-
-       local_irq_restore(flags);
 }
 
 void
 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
                                enum dma_data_direction dir)
 {
-       unsigned long flags;
-
        dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
                __func__, (void *) dma_addr, size, dir);
 
-       local_irq_save(flags);
-
        sync_single(dev, dma_addr, size, dir);
-
-       local_irq_restore(flags);
 }
 
 void
 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
                        enum dma_data_direction dir)
 {
-       unsigned long flags;
        int i;
 
        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -534,23 +520,18 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
 
        BUG_ON(dir == DMA_NONE);
 
-       local_irq_save(flags);
-
        for (i = 0; i < nents; i++, sg++) {
                dma_addr_t dma_addr = sg->dma_address;
                unsigned int length = sg->length;
 
                sync_single(dev, dma_addr, length, dir);
        }
-
-       local_irq_restore(flags);
 }
 
 void
 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
                        enum dma_data_direction dir)
 {
-       unsigned long flags;
        int i;
 
        dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -558,16 +539,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
 
        BUG_ON(dir == DMA_NONE);
 
-       local_irq_save(flags);
-
        for (i = 0; i < nents; i++, sg++) {
                dma_addr_t dma_addr = sg->dma_address;
                unsigned int length = sg->length;
 
                sync_single(dev, dma_addr, length, dir);
        }
+}
+
+static int
+dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
+                   unsigned long size)
+{
+       pool->size = size;
+       DO_STATS(pool->allocs = 0);
+       pool->pool = dma_pool_create(name, dev, size,
+                                    0 /* byte alignment */,
+                                    0 /* no page-crossing issues */);
 
-       local_irq_restore(flags);
+       return pool->pool ? 0 : -ENOMEM;
 }
 
 int
@@ -575,6 +565,7 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
                        unsigned long large_buffer_size)
 {
        struct dmabounce_device_info *device_info;
+       int ret;
 
        device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
        if (!device_info) {
@@ -584,45 +575,32 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
                return -ENOMEM;
        }
 
-       device_info->small_buffer_pool =
-               dma_pool_create("small_dmabounce_pool",
-                               dev,
-                               small_buffer_size,
-                               0 /* byte alignment */,
-                               0 /* no page-crossing issues */);
-       if (!device_info->small_buffer_pool) {
-               printk(KERN_ERR
-                       "dmabounce: could not allocate small DMA pool for %s\n",
-                       dev->bus_id);
-               kfree(device_info);
-               return -ENOMEM;
+       ret = dmabounce_init_pool(&device_info->small, dev,
+                                 "small_dmabounce_pool", small_buffer_size);
+       if (ret) {
+               dev_err(dev,
+                       "dmabounce: could not allocate DMA pool for %ld byte objects\n",
+                       small_buffer_size);
+               goto err_free;
        }
 
        if (large_buffer_size) {
-               device_info->large_buffer_pool =
-                       dma_pool_create("large_dmabounce_pool",
-                                       dev,
-                                       large_buffer_size,
-                                       0 /* byte alignment */,
-                                       0 /* no page-crossing issues */);
-               if (!device_info->large_buffer_pool) {
-               printk(KERN_ERR
-                       "dmabounce: could not allocate large DMA pool for %s\n",
-                       dev->bus_id);
-                       dma_pool_destroy(device_info->small_buffer_pool);
-
-                       return -ENOMEM;
+               ret = dmabounce_init_pool(&device_info->large, dev,
+                                         "large_dmabounce_pool",
+                                         large_buffer_size);
+               if (ret) {
+                       dev_err(dev,
+                               "dmabounce: could not allocate DMA pool for %ld byte objects\n",
+                               large_buffer_size);
+                       goto err_destroy;
                }
        }
 
        device_info->dev = dev;
-       device_info->small_buffer_size = small_buffer_size;
-       device_info->large_buffer_size = large_buffer_size;
        INIT_LIST_HEAD(&device_info->safe_buffers);
+       rwlock_init(&device_info->lock);
 
 #ifdef STATS
-       device_info->sbp_allocs = 0;
-       device_info->lbp_allocs = 0;
        device_info->total_allocs = 0;
        device_info->map_op_count = 0;
        device_info->bounce_count = 0;
@@ -634,6 +612,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
                dev->bus_id, dev->bus->name);
 
        return 0;
+
+ err_destroy:
+       dma_pool_destroy(device_info->small.pool);
+ err_free:
+       kfree(device_info);
+       return ret;
 }
 
 void
@@ -655,10 +639,10 @@ dmabounce_unregister_dev(struct device *dev)
                BUG();
        }
 
-       if (device_info->small_buffer_pool)
-               dma_pool_destroy(device_info->small_buffer_pool);
-       if (device_info->large_buffer_pool)
-               dma_pool_destroy(device_info->large_buffer_pool);
+       if (device_info->small.pool)
+               dma_pool_destroy(device_info->small.pool);
+       if (device_info->large.pool)
+               dma_pool_destroy(device_info->large.pool);
 
 #ifdef STATS
        print_alloc_stats(device_info);