X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Farm%2Fcommon%2Fdmabounce.c;h=7971d0dc6892c1a3e603961de830091c12577704;hb=42eaf0d8f2e7b8201afc00b0ebe1bd89ea51d42d;hp=ad6c89a555bbffad78a3a6d1250c1e8158365528;hpb=849a8924a6740ecbf9711e015beca69425f0c429;p=pandora-kernel.git diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index ad6c89a555bb..6fbe7722aa44 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -5,7 +5,7 @@ * limited DMA windows. These functions utilize bounce buffers to * copy data to/from buffers located outside the DMA region. This * only works for systems in which DMA memory is at the bottom of - * RAM and the remainder of memory is at the top an the DMA memory + * RAM, the remainder of memory is at the top and the DMA memory * can be marked as ZONE_DMA. Anything beyond that such as discontigous * DMA windows will require custom implementations that reserve memory * areas at early bootup. @@ -32,7 +32,6 @@ #include -#undef DEBUG #undef STATS #ifdef STATS @@ -66,46 +65,37 @@ struct dmabounce_pool { }; struct dmabounce_device_info { - struct list_head node; - struct device *dev; struct list_head safe_buffers; #ifdef STATS unsigned long total_allocs; unsigned long map_op_count; unsigned long bounce_count; + int attr_res; #endif struct dmabounce_pool small; struct dmabounce_pool large; -}; -static LIST_HEAD(dmabounce_devs); + rwlock_t lock; +}; #ifdef STATS -static void print_alloc_stats(struct dmabounce_device_info *device_info) +static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, + char *buf) { - printk(KERN_INFO - "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", - device_info->dev->bus_id, - device_info->small.allocs, device_info->large.allocs, + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; + return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", + device_info->small.allocs, + device_info->large.allocs, device_info->total_allocs - device_info->small.allocs - device_info->large.allocs, - device_info->total_allocs); + device_info->total_allocs, + device_info->map_op_count, + device_info->bounce_count); } -#endif - -/* find the given device in the dmabounce device list */ -static inline struct dmabounce_device_info * -find_dmabounce_dev(struct device *dev) -{ - struct dmabounce_device_info *d; - - list_for_each_entry(d, &dmabounce_devs, node) - if (d->dev == dev) - return d; - return NULL; -} +static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); +#endif /* allocate a 'safe' buffer and keep track of it */ @@ -116,6 +106,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, struct safe_buffer *buf; struct dmabounce_pool *pool; struct device *dev = device_info->dev; + unsigned long flags; dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", __func__, ptr, size, dir); @@ -159,12 +150,14 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, if (pool) pool->allocs++; device_info->total_allocs++; - if (device_info->total_allocs % 1000 == 0) - print_alloc_stats(device_info); #endif + write_lock_irqsave(&device_info->lock, flags); + list_add(&buf->node, &device_info->safe_buffers); + write_unlock_irqrestore(&device_info->lock, flags); + return buf; } @@ -172,22 +165,34 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, static inline struct safe_buffer * find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) { - struct safe_buffer *b; + struct safe_buffer *b, *rb = NULL; + unsigned long flags; + + read_lock_irqsave(&device_info->lock, flags); list_for_each_entry(b, &device_info->safe_buffers, node) - if (b->safe_dma_addr == safe_dma_addr) - return b; + if (b->safe_dma_addr == safe_dma_addr) { + rb = b; + break; + } - return NULL; + read_unlock_irqrestore(&device_info->lock, flags); + return rb; } static inline void free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) { + unsigned long flags; + dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); + write_lock_irqsave(&device_info->lock, flags); + list_del(&buf->node); + write_unlock_irqrestore(&device_info->lock, flags); + if (buf->pool) dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); else @@ -199,20 +204,11 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * /* ************************************************** */ -#ifdef STATS -static void print_map_stats(struct dmabounce_device_info *device_info) -{ - dev_info(device_info->dev, - "dmabounce: map_op_count=%lu, bounce_count=%lu\n", - device_info->map_op_count, device_info->bounce_count); -} -#endif - static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { - struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dma_addr_t dma_addr; int needs_bounce = 0; @@ -262,10 +258,14 @@ map_single(struct device *dev, void *ptr, size_t size, ptr = buf->safe; dma_addr = buf->safe_dma_addr; + } else { + /* + * We don't need to sync the DMA buffer since + * it was allocated via the coherent allocators. + */ + consistent_sync(ptr, size, dir); } - consistent_sync(ptr, size, dir); - return dma_addr; } @@ -273,7 +273,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf = NULL; /* @@ -298,12 +298,12 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, DO_STATS ( device_info->bounce_count++ ); if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { - unsigned long ptr; + void *ptr = buf->ptr; dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe, buf->ptr, size); - memcpy(buf->ptr, buf->safe, size); + __func__, buf->safe, ptr, size); + memcpy(ptr, buf->safe, size); /* * DMA buffers must have the same cache properties @@ -313,8 +313,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, * bidirectional case because we know the cache * lines will be coherent with the data written. */ - ptr = (unsigned long)buf->ptr; dmac_clean_range(ptr, ptr + size); + outer_clean_range(__pa(ptr), __pa(ptr) + size); } free_safe_buffer(device_info, buf); } @@ -324,7 +324,7 @@ static inline void sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf = NULL; if (device_info) @@ -378,7 +378,10 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, default: BUG(); } - consistent_sync(buf->safe, size, dir); + /* + * No need to sync the safe buffer - it was allocated + * via the coherent allocators. + */ } else { consistent_sync(dma_to_virt(dev, dma_addr), size, dir); } @@ -396,7 +399,6 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { - unsigned long flags; dma_addr_t dma_addr; dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", @@ -404,12 +406,8 @@ dma_map_single(struct device *dev, void *ptr, size_t size, BUG_ON(dir == DMA_NONE); - local_irq_save(flags); - dma_addr = map_single(dev, ptr, size, dir); - local_irq_restore(flags); - return dma_addr; } @@ -424,25 +422,18 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - unsigned long flags; - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, (void *) dma_addr, size, dir); BUG_ON(dir == DMA_NONE); - local_irq_save(flags); - unmap_single(dev, dma_addr, size, dir); - - local_irq_restore(flags); } int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - unsigned long flags; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", @@ -450,8 +441,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); - local_irq_save(flags); - for (i = 0; i < nents; i++, sg++) { struct page *page = sg->page; unsigned int offset = sg->offset; @@ -462,8 +451,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, map_single(dev, ptr, length, dir); } - local_irq_restore(flags); - return nents; } @@ -471,7 +458,6 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - unsigned long flags; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", @@ -479,55 +465,38 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); - local_irq_save(flags); - for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; unmap_single(dev, dma_addr, length, dir); } - - local_irq_restore(flags); } void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - unsigned long flags; - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, (void *) dma_addr, size, dir); - local_irq_save(flags); - sync_single(dev, dma_addr, size, dir); - - local_irq_restore(flags); } void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - unsigned long flags; - dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, (void *) dma_addr, size, dir); - local_irq_save(flags); - sync_single(dev, dma_addr, size, dir); - - local_irq_restore(flags); } void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - unsigned long flags; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", @@ -535,23 +504,18 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); - local_irq_save(flags); - for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; sync_single(dev, dma_addr, length, dir); } - - local_irq_restore(flags); } void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - unsigned long flags; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", @@ -559,16 +523,12 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); - local_irq_save(flags); - for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; sync_single(dev, dma_addr, length, dir); } - - local_irq_restore(flags); } static int @@ -622,14 +582,16 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, device_info->dev = dev; INIT_LIST_HEAD(&device_info->safe_buffers); + rwlock_init(&device_info->lock); #ifdef STATS device_info->total_allocs = 0; device_info->map_op_count = 0; device_info->bounce_count = 0; + device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); #endif - list_add(&device_info->node, &dmabounce_devs); + dev->archdata.dmabounce = device_info; printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", dev->bus_id, dev->bus->name); @@ -646,7 +608,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, void dmabounce_unregister_dev(struct device *dev) { - struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); + struct dmabounce_device_info *device_info = dev->archdata.dmabounce; + + dev->archdata.dmabounce = NULL; if (!device_info) { printk(KERN_WARNING @@ -668,12 +632,10 @@ dmabounce_unregister_dev(struct device *dev) dma_pool_destroy(device_info->large.pool); #ifdef STATS - print_alloc_stats(device_info); - print_map_stats(device_info); + if (device_info->attr_res == 0) + device_remove_file(dev, &dev_attr_dmabounce_stats); #endif - list_del(&device_info->node); - kfree(device_info); printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", @@ -685,7 +647,8 @@ EXPORT_SYMBOL(dma_map_single); EXPORT_SYMBOL(dma_unmap_single); EXPORT_SYMBOL(dma_map_sg); EXPORT_SYMBOL(dma_unmap_sg); -EXPORT_SYMBOL(dma_sync_single); +EXPORT_SYMBOL(dma_sync_single_for_cpu); +EXPORT_SYMBOL(dma_sync_single_for_device); EXPORT_SYMBOL(dma_sync_sg); EXPORT_SYMBOL(dmabounce_register_dev); EXPORT_SYMBOL(dmabounce_unregister_dev);