[ARM] Convert dmabounce statistics to use a device attribute
[pandora-kernel.git] / arch / arm / common / dmabounce.c
1 /*
2  *  arch/arm/common/dmabounce.c
3  *
4  *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
5  *  limited DMA windows. These functions utilize bounce buffers to
6  *  copy data to/from buffers located outside the DMA region. This
7  *  only works for systems in which DMA memory is at the bottom of
8  *  RAM, the remainder of memory is at the top and the DMA memory
9  *  can be marked as ZONE_DMA. Anything beyond that such as discontigous
10  *  DMA windows will require custom implementations that reserve memory
11  *  areas at early bootup.
12  *
13  *  Original version by Brad Parker (brad@heeltoe.com)
14  *  Re-written by Christopher Hoover <ch@murgatroid.com>
15  *  Made generic by Deepak Saxena <dsaxena@plexity.net>
16  *
17  *  Copyright (C) 2002 Hewlett Packard Company.
18  *  Copyright (C) 2004 MontaVista Software, Inc.
19  *
20  *  This program is free software; you can redistribute it and/or
21  *  modify it under the terms of the GNU General Public License
22  *  version 2 as published by the Free Software Foundation.
23  */
24
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
32
33 #include <asm/cacheflush.h>
34
35 #undef STATS
36
37 #ifdef STATS
38 #define DO_STATS(X) do { X ; } while (0)
39 #else
40 #define DO_STATS(X) do { } while (0)
41 #endif
42
43 /* ************************************************** */
44
45 struct safe_buffer {
46         struct list_head node;
47
48         /* original request */
49         void            *ptr;
50         size_t          size;
51         int             direction;
52
53         /* safe buffer info */
54         struct dmabounce_pool *pool;
55         void            *safe;
56         dma_addr_t      safe_dma_addr;
57 };
58
59 struct dmabounce_pool {
60         unsigned long   size;
61         struct dma_pool *pool;
62 #ifdef STATS
63         unsigned long   allocs;
64 #endif
65 };
66
67 struct dmabounce_device_info {
68         struct device *dev;
69         struct list_head safe_buffers;
70 #ifdef STATS
71         unsigned long total_allocs;
72         unsigned long map_op_count;
73         unsigned long bounce_count;
74         int attr_res;
75 #endif
76         struct dmabounce_pool   small;
77         struct dmabounce_pool   large;
78
79         rwlock_t lock;
80 };
81
82 #ifdef STATS
83 static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
84                               char *buf)
85 {
86         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
87         return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
88                 device_info->small.allocs,
89                 device_info->large.allocs,
90                 device_info->total_allocs - device_info->small.allocs -
91                         device_info->large.allocs,
92                 device_info->total_allocs,
93                 device_info->map_op_count,
94                 device_info->bounce_count);
95 }
96
97 static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
98 #endif
99
100
101 /* allocate a 'safe' buffer and keep track of it */
102 static inline struct safe_buffer *
103 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
104                   size_t size, enum dma_data_direction dir)
105 {
106         struct safe_buffer *buf;
107         struct dmabounce_pool *pool;
108         struct device *dev = device_info->dev;
109         unsigned long flags;
110
111         dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
112                 __func__, ptr, size, dir);
113
114         if (size <= device_info->small.size) {
115                 pool = &device_info->small;
116         } else if (size <= device_info->large.size) {
117                 pool = &device_info->large;
118         } else {
119                 pool = NULL;
120         }
121
122         buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
123         if (buf == NULL) {
124                 dev_warn(dev, "%s: kmalloc failed\n", __func__);
125                 return NULL;
126         }
127
128         buf->ptr = ptr;
129         buf->size = size;
130         buf->direction = dir;
131         buf->pool = pool;
132
133         if (pool) {
134                 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
135                                            &buf->safe_dma_addr);
136         } else {
137                 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
138                                                GFP_ATOMIC);
139         }
140
141         if (buf->safe == NULL) {
142                 dev_warn(dev,
143                          "%s: could not alloc dma memory (size=%d)\n",
144                          __func__, size);
145                 kfree(buf);
146                 return NULL;
147         }
148
149 #ifdef STATS
150         if (pool)
151                 pool->allocs++;
152         device_info->total_allocs++;
153 #endif
154
155         write_lock_irqsave(&device_info->lock, flags);
156
157         list_add(&buf->node, &device_info->safe_buffers);
158
159         write_unlock_irqrestore(&device_info->lock, flags);
160
161         return buf;
162 }
163
164 /* determine if a buffer is from our "safe" pool */
165 static inline struct safe_buffer *
166 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
167 {
168         struct safe_buffer *b, *rb = NULL;
169         unsigned long flags;
170
171         read_lock_irqsave(&device_info->lock, flags);
172
173         list_for_each_entry(b, &device_info->safe_buffers, node)
174                 if (b->safe_dma_addr == safe_dma_addr) {
175                         rb = b;
176                         break;
177                 }
178
179         read_unlock_irqrestore(&device_info->lock, flags);
180         return rb;
181 }
182
183 static inline void
184 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
185 {
186         unsigned long flags;
187
188         dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
189
190         write_lock_irqsave(&device_info->lock, flags);
191
192         list_del(&buf->node);
193
194         write_unlock_irqrestore(&device_info->lock, flags);
195
196         if (buf->pool)
197                 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
198         else
199                 dma_free_coherent(device_info->dev, buf->size, buf->safe,
200                                     buf->safe_dma_addr);
201
202         kfree(buf);
203 }
204
205 /* ************************************************** */
206
207 static inline dma_addr_t
208 map_single(struct device *dev, void *ptr, size_t size,
209                 enum dma_data_direction dir)
210 {
211         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
212         dma_addr_t dma_addr;
213         int needs_bounce = 0;
214
215         if (device_info)
216                 DO_STATS ( device_info->map_op_count++ );
217
218         dma_addr = virt_to_dma(dev, ptr);
219
220         if (dev->dma_mask) {
221                 unsigned long mask = *dev->dma_mask;
222                 unsigned long limit;
223
224                 limit = (mask + 1) & ~mask;
225                 if (limit && size > limit) {
226                         dev_err(dev, "DMA mapping too big (requested %#x "
227                                 "mask %#Lx)\n", size, *dev->dma_mask);
228                         return ~0;
229                 }
230
231                 /*
232                  * Figure out if we need to bounce from the DMA mask.
233                  */
234                 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
235         }
236
237         if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
238                 struct safe_buffer *buf;
239
240                 buf = alloc_safe_buffer(device_info, ptr, size, dir);
241                 if (buf == 0) {
242                         dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
243                                __func__, ptr);
244                         return 0;
245                 }
246
247                 dev_dbg(dev,
248                         "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
249                         __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
250                         buf->safe, (void *) buf->safe_dma_addr);
251
252                 if ((dir == DMA_TO_DEVICE) ||
253                     (dir == DMA_BIDIRECTIONAL)) {
254                         dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
255                                 __func__, ptr, buf->safe, size);
256                         memcpy(buf->safe, ptr, size);
257                 }
258                 ptr = buf->safe;
259
260                 dma_addr = buf->safe_dma_addr;
261         }
262
263         consistent_sync(ptr, size, dir);
264
265         return dma_addr;
266 }
267
268 static inline void
269 unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
270                 enum dma_data_direction dir)
271 {
272         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
273         struct safe_buffer *buf = NULL;
274
275         /*
276          * Trying to unmap an invalid mapping
277          */
278         if (dma_mapping_error(dma_addr)) {
279                 dev_err(dev, "Trying to unmap invalid mapping\n");
280                 return;
281         }
282
283         if (device_info)
284                 buf = find_safe_buffer(device_info, dma_addr);
285
286         if (buf) {
287                 BUG_ON(buf->size != size);
288
289                 dev_dbg(dev,
290                         "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
291                         __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
292                         buf->safe, (void *) buf->safe_dma_addr);
293
294                 DO_STATS ( device_info->bounce_count++ );
295
296                 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
297                         unsigned long ptr;
298
299                         dev_dbg(dev,
300                                 "%s: copy back safe %p to unsafe %p size %d\n",
301                                 __func__, buf->safe, buf->ptr, size);
302                         memcpy(buf->ptr, buf->safe, size);
303
304                         /*
305                          * DMA buffers must have the same cache properties
306                          * as if they were really used for DMA - which means
307                          * data must be written back to RAM.  Note that
308                          * we don't use dmac_flush_range() here for the
309                          * bidirectional case because we know the cache
310                          * lines will be coherent with the data written.
311                          */
312                         ptr = (unsigned long)buf->ptr;
313                         dmac_clean_range(ptr, ptr + size);
314                 }
315                 free_safe_buffer(device_info, buf);
316         }
317 }
318
319 static inline void
320 sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
321                 enum dma_data_direction dir)
322 {
323         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
324         struct safe_buffer *buf = NULL;
325
326         if (device_info)
327                 buf = find_safe_buffer(device_info, dma_addr);
328
329         if (buf) {
330                 /*
331                  * Both of these checks from original code need to be
332                  * commented out b/c some drivers rely on the following:
333                  *
334                  * 1) Drivers may map a large chunk of memory into DMA space
335                  *    but only sync a small portion of it. Good example is
336                  *    allocating a large buffer, mapping it, and then
337                  *    breaking it up into small descriptors. No point
338                  *    in syncing the whole buffer if you only have to
339                  *    touch one descriptor.
340                  *
341                  * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
342                  *    usually only synced in one dir at a time.
343                  *
344                  * See drivers/net/eepro100.c for examples of both cases.
345                  *
346                  * -ds
347                  *
348                  * BUG_ON(buf->size != size);
349                  * BUG_ON(buf->direction != dir);
350                  */
351
352                 dev_dbg(dev,
353                         "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
354                         __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
355                         buf->safe, (void *) buf->safe_dma_addr);
356
357                 DO_STATS ( device_info->bounce_count++ );
358
359                 switch (dir) {
360                 case DMA_FROM_DEVICE:
361                         dev_dbg(dev,
362                                 "%s: copy back safe %p to unsafe %p size %d\n",
363                                 __func__, buf->safe, buf->ptr, size);
364                         memcpy(buf->ptr, buf->safe, size);
365                         break;
366                 case DMA_TO_DEVICE:
367                         dev_dbg(dev,
368                                 "%s: copy out unsafe %p to safe %p, size %d\n",
369                                 __func__,buf->ptr, buf->safe, size);
370                         memcpy(buf->safe, buf->ptr, size);
371                         break;
372                 case DMA_BIDIRECTIONAL:
373                         BUG();  /* is this allowed?  what does it mean? */
374                 default:
375                         BUG();
376                 }
377                 consistent_sync(buf->safe, size, dir);
378         } else {
379                 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
380         }
381 }
382
383 /* ************************************************** */
384
385 /*
386  * see if a buffer address is in an 'unsafe' range.  if it is
387  * allocate a 'safe' buffer and copy the unsafe buffer into it.
388  * substitute the safe buffer for the unsafe one.
389  * (basically move the buffer from an unsafe area to a safe one)
390  */
391 dma_addr_t
392 dma_map_single(struct device *dev, void *ptr, size_t size,
393                 enum dma_data_direction dir)
394 {
395         dma_addr_t dma_addr;
396
397         dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
398                 __func__, ptr, size, dir);
399
400         BUG_ON(dir == DMA_NONE);
401
402         dma_addr = map_single(dev, ptr, size, dir);
403
404         return dma_addr;
405 }
406
407 /*
408  * see if a mapped address was really a "safe" buffer and if so, copy
409  * the data from the safe buffer back to the unsafe buffer and free up
410  * the safe buffer.  (basically return things back to the way they
411  * should be)
412  */
413
414 void
415 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
416                         enum dma_data_direction dir)
417 {
418         dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
419                 __func__, (void *) dma_addr, size, dir);
420
421         BUG_ON(dir == DMA_NONE);
422
423         unmap_single(dev, dma_addr, size, dir);
424 }
425
426 int
427 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
428                 enum dma_data_direction dir)
429 {
430         int i;
431
432         dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
433                 __func__, sg, nents, dir);
434
435         BUG_ON(dir == DMA_NONE);
436
437         for (i = 0; i < nents; i++, sg++) {
438                 struct page *page = sg->page;
439                 unsigned int offset = sg->offset;
440                 unsigned int length = sg->length;
441                 void *ptr = page_address(page) + offset;
442
443                 sg->dma_address =
444                         map_single(dev, ptr, length, dir);
445         }
446
447         return nents;
448 }
449
450 void
451 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
452                 enum dma_data_direction dir)
453 {
454         int i;
455
456         dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
457                 __func__, sg, nents, dir);
458
459         BUG_ON(dir == DMA_NONE);
460
461         for (i = 0; i < nents; i++, sg++) {
462                 dma_addr_t dma_addr = sg->dma_address;
463                 unsigned int length = sg->length;
464
465                 unmap_single(dev, dma_addr, length, dir);
466         }
467 }
468
469 void
470 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
471                                 enum dma_data_direction dir)
472 {
473         dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
474                 __func__, (void *) dma_addr, size, dir);
475
476         sync_single(dev, dma_addr, size, dir);
477 }
478
479 void
480 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
481                                 enum dma_data_direction dir)
482 {
483         dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
484                 __func__, (void *) dma_addr, size, dir);
485
486         sync_single(dev, dma_addr, size, dir);
487 }
488
489 void
490 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
491                         enum dma_data_direction dir)
492 {
493         int i;
494
495         dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
496                 __func__, sg, nents, dir);
497
498         BUG_ON(dir == DMA_NONE);
499
500         for (i = 0; i < nents; i++, sg++) {
501                 dma_addr_t dma_addr = sg->dma_address;
502                 unsigned int length = sg->length;
503
504                 sync_single(dev, dma_addr, length, dir);
505         }
506 }
507
508 void
509 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
510                         enum dma_data_direction dir)
511 {
512         int i;
513
514         dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
515                 __func__, sg, nents, dir);
516
517         BUG_ON(dir == DMA_NONE);
518
519         for (i = 0; i < nents; i++, sg++) {
520                 dma_addr_t dma_addr = sg->dma_address;
521                 unsigned int length = sg->length;
522
523                 sync_single(dev, dma_addr, length, dir);
524         }
525 }
526
527 static int
528 dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
529                     unsigned long size)
530 {
531         pool->size = size;
532         DO_STATS(pool->allocs = 0);
533         pool->pool = dma_pool_create(name, dev, size,
534                                      0 /* byte alignment */,
535                                      0 /* no page-crossing issues */);
536
537         return pool->pool ? 0 : -ENOMEM;
538 }
539
540 int
541 dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
542                         unsigned long large_buffer_size)
543 {
544         struct dmabounce_device_info *device_info;
545         int ret;
546
547         device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
548         if (!device_info) {
549                 printk(KERN_ERR
550                         "Could not allocated dmabounce_device_info for %s",
551                         dev->bus_id);
552                 return -ENOMEM;
553         }
554
555         ret = dmabounce_init_pool(&device_info->small, dev,
556                                   "small_dmabounce_pool", small_buffer_size);
557         if (ret) {
558                 dev_err(dev,
559                         "dmabounce: could not allocate DMA pool for %ld byte objects\n",
560                         small_buffer_size);
561                 goto err_free;
562         }
563
564         if (large_buffer_size) {
565                 ret = dmabounce_init_pool(&device_info->large, dev,
566                                           "large_dmabounce_pool",
567                                           large_buffer_size);
568                 if (ret) {
569                         dev_err(dev,
570                                 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
571                                 large_buffer_size);
572                         goto err_destroy;
573                 }
574         }
575
576         device_info->dev = dev;
577         INIT_LIST_HEAD(&device_info->safe_buffers);
578         rwlock_init(&device_info->lock);
579
580 #ifdef STATS
581         device_info->total_allocs = 0;
582         device_info->map_op_count = 0;
583         device_info->bounce_count = 0;
584         device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
585 #endif
586
587         dev->archdata.dmabounce = device_info;
588
589         printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
590                 dev->bus_id, dev->bus->name);
591
592         return 0;
593
594  err_destroy:
595         dma_pool_destroy(device_info->small.pool);
596  err_free:
597         kfree(device_info);
598         return ret;
599 }
600
601 void
602 dmabounce_unregister_dev(struct device *dev)
603 {
604         struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
605
606         dev->archdata.dmabounce = NULL;
607
608         if (!device_info) {
609                 printk(KERN_WARNING
610                         "%s: Never registered with dmabounce but attempting" \
611                         "to unregister!\n", dev->bus_id);
612                 return;
613         }
614
615         if (!list_empty(&device_info->safe_buffers)) {
616                 printk(KERN_ERR
617                         "%s: Removing from dmabounce with pending buffers!\n",
618                         dev->bus_id);
619                 BUG();
620         }
621
622         if (device_info->small.pool)
623                 dma_pool_destroy(device_info->small.pool);
624         if (device_info->large.pool)
625                 dma_pool_destroy(device_info->large.pool);
626
627 #ifdef STATS
628         if (device_info->attr_res == 0)
629                 device_remove_file(dev, &dev_attr_dmabounce_stats);
630 #endif
631
632         kfree(device_info);
633
634         printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
635                 dev->bus_id, dev->bus->name);
636 }
637
638
639 EXPORT_SYMBOL(dma_map_single);
640 EXPORT_SYMBOL(dma_unmap_single);
641 EXPORT_SYMBOL(dma_map_sg);
642 EXPORT_SYMBOL(dma_unmap_sg);
643 EXPORT_SYMBOL(dma_sync_single_for_cpu);
644 EXPORT_SYMBOL(dma_sync_single_for_device);
645 EXPORT_SYMBOL(dma_sync_sg);
646 EXPORT_SYMBOL(dmabounce_register_dev);
647 EXPORT_SYMBOL(dmabounce_unregister_dev);
648
649 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
650 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
651 MODULE_LICENSE("GPL");