2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM, the remainder of memory is at the top and the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
33 #include <asm/cacheflush.h>
38 #define DO_STATS(X) do { X ; } while (0)
40 #define DO_STATS(X) do { } while (0)
43 /* ************************************************** */
46 struct list_head node;
48 /* original request */
53 /* safe buffer info */
54 struct dmabounce_pool *pool;
56 dma_addr_t safe_dma_addr;
59 struct dmabounce_pool {
61 struct dma_pool *pool;
67 struct dmabounce_device_info {
69 struct list_head safe_buffers;
71 unsigned long total_allocs;
72 unsigned long map_op_count;
73 unsigned long bounce_count;
76 struct dmabounce_pool small;
77 struct dmabounce_pool large;
83 static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
86 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
87 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
88 device_info->small.allocs,
89 device_info->large.allocs,
90 device_info->total_allocs - device_info->small.allocs -
91 device_info->large.allocs,
92 device_info->total_allocs,
93 device_info->map_op_count,
94 device_info->bounce_count);
97 static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
101 /* allocate a 'safe' buffer and keep track of it */
102 static inline struct safe_buffer *
103 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
104 size_t size, enum dma_data_direction dir)
106 struct safe_buffer *buf;
107 struct dmabounce_pool *pool;
108 struct device *dev = device_info->dev;
111 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
112 __func__, ptr, size, dir);
114 if (size <= device_info->small.size) {
115 pool = &device_info->small;
116 } else if (size <= device_info->large.size) {
117 pool = &device_info->large;
122 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
124 dev_warn(dev, "%s: kmalloc failed\n", __func__);
130 buf->direction = dir;
134 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
135 &buf->safe_dma_addr);
137 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
141 if (buf->safe == NULL) {
143 "%s: could not alloc dma memory (size=%d)\n",
152 device_info->total_allocs++;
155 write_lock_irqsave(&device_info->lock, flags);
157 list_add(&buf->node, &device_info->safe_buffers);
159 write_unlock_irqrestore(&device_info->lock, flags);
164 /* determine if a buffer is from our "safe" pool */
165 static inline struct safe_buffer *
166 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
168 struct safe_buffer *b, *rb = NULL;
171 read_lock_irqsave(&device_info->lock, flags);
173 list_for_each_entry(b, &device_info->safe_buffers, node)
174 if (b->safe_dma_addr == safe_dma_addr) {
179 read_unlock_irqrestore(&device_info->lock, flags);
184 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
190 write_lock_irqsave(&device_info->lock, flags);
192 list_del(&buf->node);
194 write_unlock_irqrestore(&device_info->lock, flags);
197 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
199 dma_free_coherent(device_info->dev, buf->size, buf->safe,
205 /* ************************************************** */
207 static inline dma_addr_t
208 map_single(struct device *dev, void *ptr, size_t size,
209 enum dma_data_direction dir)
211 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
213 int needs_bounce = 0;
216 DO_STATS ( device_info->map_op_count++ );
218 dma_addr = virt_to_dma(dev, ptr);
221 unsigned long mask = *dev->dma_mask;
224 limit = (mask + 1) & ~mask;
225 if (limit && size > limit) {
226 dev_err(dev, "DMA mapping too big (requested %#x "
227 "mask %#Lx)\n", size, *dev->dma_mask);
232 * Figure out if we need to bounce from the DMA mask.
234 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
237 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
238 struct safe_buffer *buf;
240 buf = alloc_safe_buffer(device_info, ptr, size, dir);
242 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
248 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
249 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
250 buf->safe, (void *) buf->safe_dma_addr);
252 if ((dir == DMA_TO_DEVICE) ||
253 (dir == DMA_BIDIRECTIONAL)) {
254 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
255 __func__, ptr, buf->safe, size);
256 memcpy(buf->safe, ptr, size);
260 dma_addr = buf->safe_dma_addr;
263 consistent_sync(ptr, size, dir);
269 unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
270 enum dma_data_direction dir)
272 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
273 struct safe_buffer *buf = NULL;
276 * Trying to unmap an invalid mapping
278 if (dma_mapping_error(dma_addr)) {
279 dev_err(dev, "Trying to unmap invalid mapping\n");
284 buf = find_safe_buffer(device_info, dma_addr);
287 BUG_ON(buf->size != size);
290 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
291 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
292 buf->safe, (void *) buf->safe_dma_addr);
294 DO_STATS ( device_info->bounce_count++ );
296 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
300 "%s: copy back safe %p to unsafe %p size %d\n",
301 __func__, buf->safe, buf->ptr, size);
302 memcpy(buf->ptr, buf->safe, size);
305 * DMA buffers must have the same cache properties
306 * as if they were really used for DMA - which means
307 * data must be written back to RAM. Note that
308 * we don't use dmac_flush_range() here for the
309 * bidirectional case because we know the cache
310 * lines will be coherent with the data written.
312 ptr = (unsigned long)buf->ptr;
313 dmac_clean_range(ptr, ptr + size);
315 free_safe_buffer(device_info, buf);
320 sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
321 enum dma_data_direction dir)
323 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
324 struct safe_buffer *buf = NULL;
327 buf = find_safe_buffer(device_info, dma_addr);
331 * Both of these checks from original code need to be
332 * commented out b/c some drivers rely on the following:
334 * 1) Drivers may map a large chunk of memory into DMA space
335 * but only sync a small portion of it. Good example is
336 * allocating a large buffer, mapping it, and then
337 * breaking it up into small descriptors. No point
338 * in syncing the whole buffer if you only have to
339 * touch one descriptor.
341 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
342 * usually only synced in one dir at a time.
344 * See drivers/net/eepro100.c for examples of both cases.
348 * BUG_ON(buf->size != size);
349 * BUG_ON(buf->direction != dir);
353 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
354 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
355 buf->safe, (void *) buf->safe_dma_addr);
357 DO_STATS ( device_info->bounce_count++ );
360 case DMA_FROM_DEVICE:
362 "%s: copy back safe %p to unsafe %p size %d\n",
363 __func__, buf->safe, buf->ptr, size);
364 memcpy(buf->ptr, buf->safe, size);
368 "%s: copy out unsafe %p to safe %p, size %d\n",
369 __func__,buf->ptr, buf->safe, size);
370 memcpy(buf->safe, buf->ptr, size);
372 case DMA_BIDIRECTIONAL:
373 BUG(); /* is this allowed? what does it mean? */
377 consistent_sync(buf->safe, size, dir);
379 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
383 /* ************************************************** */
386 * see if a buffer address is in an 'unsafe' range. if it is
387 * allocate a 'safe' buffer and copy the unsafe buffer into it.
388 * substitute the safe buffer for the unsafe one.
389 * (basically move the buffer from an unsafe area to a safe one)
392 dma_map_single(struct device *dev, void *ptr, size_t size,
393 enum dma_data_direction dir)
397 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
398 __func__, ptr, size, dir);
400 BUG_ON(dir == DMA_NONE);
402 dma_addr = map_single(dev, ptr, size, dir);
408 * see if a mapped address was really a "safe" buffer and if so, copy
409 * the data from the safe buffer back to the unsafe buffer and free up
410 * the safe buffer. (basically return things back to the way they
415 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
416 enum dma_data_direction dir)
418 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
419 __func__, (void *) dma_addr, size, dir);
421 BUG_ON(dir == DMA_NONE);
423 unmap_single(dev, dma_addr, size, dir);
427 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
428 enum dma_data_direction dir)
432 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
433 __func__, sg, nents, dir);
435 BUG_ON(dir == DMA_NONE);
437 for (i = 0; i < nents; i++, sg++) {
438 struct page *page = sg->page;
439 unsigned int offset = sg->offset;
440 unsigned int length = sg->length;
441 void *ptr = page_address(page) + offset;
444 map_single(dev, ptr, length, dir);
451 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
452 enum dma_data_direction dir)
456 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
457 __func__, sg, nents, dir);
459 BUG_ON(dir == DMA_NONE);
461 for (i = 0; i < nents; i++, sg++) {
462 dma_addr_t dma_addr = sg->dma_address;
463 unsigned int length = sg->length;
465 unmap_single(dev, dma_addr, length, dir);
470 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
471 enum dma_data_direction dir)
473 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
474 __func__, (void *) dma_addr, size, dir);
476 sync_single(dev, dma_addr, size, dir);
480 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
481 enum dma_data_direction dir)
483 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
484 __func__, (void *) dma_addr, size, dir);
486 sync_single(dev, dma_addr, size, dir);
490 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
491 enum dma_data_direction dir)
495 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
496 __func__, sg, nents, dir);
498 BUG_ON(dir == DMA_NONE);
500 for (i = 0; i < nents; i++, sg++) {
501 dma_addr_t dma_addr = sg->dma_address;
502 unsigned int length = sg->length;
504 sync_single(dev, dma_addr, length, dir);
509 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
510 enum dma_data_direction dir)
514 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
515 __func__, sg, nents, dir);
517 BUG_ON(dir == DMA_NONE);
519 for (i = 0; i < nents; i++, sg++) {
520 dma_addr_t dma_addr = sg->dma_address;
521 unsigned int length = sg->length;
523 sync_single(dev, dma_addr, length, dir);
528 dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
532 DO_STATS(pool->allocs = 0);
533 pool->pool = dma_pool_create(name, dev, size,
534 0 /* byte alignment */,
535 0 /* no page-crossing issues */);
537 return pool->pool ? 0 : -ENOMEM;
541 dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
542 unsigned long large_buffer_size)
544 struct dmabounce_device_info *device_info;
547 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
550 "Could not allocated dmabounce_device_info for %s",
555 ret = dmabounce_init_pool(&device_info->small, dev,
556 "small_dmabounce_pool", small_buffer_size);
559 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
564 if (large_buffer_size) {
565 ret = dmabounce_init_pool(&device_info->large, dev,
566 "large_dmabounce_pool",
570 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
576 device_info->dev = dev;
577 INIT_LIST_HEAD(&device_info->safe_buffers);
578 rwlock_init(&device_info->lock);
581 device_info->total_allocs = 0;
582 device_info->map_op_count = 0;
583 device_info->bounce_count = 0;
584 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
587 dev->archdata.dmabounce = device_info;
589 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
590 dev->bus_id, dev->bus->name);
595 dma_pool_destroy(device_info->small.pool);
602 dmabounce_unregister_dev(struct device *dev)
604 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
606 dev->archdata.dmabounce = NULL;
610 "%s: Never registered with dmabounce but attempting" \
611 "to unregister!\n", dev->bus_id);
615 if (!list_empty(&device_info->safe_buffers)) {
617 "%s: Removing from dmabounce with pending buffers!\n",
622 if (device_info->small.pool)
623 dma_pool_destroy(device_info->small.pool);
624 if (device_info->large.pool)
625 dma_pool_destroy(device_info->large.pool);
628 if (device_info->attr_res == 0)
629 device_remove_file(dev, &dev_attr_dmabounce_stats);
634 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
635 dev->bus_id, dev->bus->name);
639 EXPORT_SYMBOL(dma_map_single);
640 EXPORT_SYMBOL(dma_unmap_single);
641 EXPORT_SYMBOL(dma_map_sg);
642 EXPORT_SYMBOL(dma_unmap_sg);
643 EXPORT_SYMBOL(dma_sync_single_for_cpu);
644 EXPORT_SYMBOL(dma_sync_single_for_device);
645 EXPORT_SYMBOL(dma_sync_sg);
646 EXPORT_SYMBOL(dmabounce_register_dev);
647 EXPORT_SYMBOL(dmabounce_unregister_dev);
649 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
650 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
651 MODULE_LICENSE("GPL");