Merge branch 'fix/hda' into for-linus
[pandora-kernel.git] / arch / arm / mach-bcmring / dma.c
1 /*****************************************************************************
2 * Copyright 2004 - 2008 Broadcom Corporation.  All rights reserved.
3 *
4 * Unless you and Broadcom execute a separate written software license
5 * agreement governing use of this software, this software is licensed to you
6 * under the terms of the GNU General Public License version 2, available at
7 * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8 *
9 * Notwithstanding the above, under no circumstances may you combine this
10 * software in any way with any other Broadcom software provided under a
11 * license other than the GPL, without Broadcom's express prior written
12 * consent.
13 *****************************************************************************/
14
15 /****************************************************************************/
16 /**
17 *   @file   dma.c
18 *
19 *   @brief  Implements the DMA interface.
20 */
21 /****************************************************************************/
22
23 /* ---- Include Files ---------------------------------------------------- */
24
25 #include <linux/module.h>
26 #include <linux/device.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/irqreturn.h>
31 #include <linux/proc_fs.h>
32 #include <linux/slab.h>
33
34 #include <mach/timer.h>
35
36 #include <linux/mm.h>
37 #include <linux/pfn.h>
38 #include <linux/atomic.h>
39 #include <mach/dma.h>
40
41 /* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
42 /* especially since dc4 doesn't use kmalloc'd memory. */
43
44 #define ALLOW_MAP_OF_KMALLOC_MEMORY 0
45
46 /* ---- Public Variables ------------------------------------------------- */
47
48 /* ---- Private Constants and Types -------------------------------------- */
49
50 #define MAKE_HANDLE(controllerIdx, channelIdx)    (((controllerIdx) << 4) | (channelIdx))
51
52 #define CONTROLLER_FROM_HANDLE(handle)    (((handle) >> 4) & 0x0f)
53 #define CHANNEL_FROM_HANDLE(handle)       ((handle) & 0x0f)
54
55 #define DMA_MAP_DEBUG   0
56
57 #if DMA_MAP_DEBUG
58 #   define  DMA_MAP_PRINT(fmt, args...)   printk("%s: " fmt, __func__,  ## args)
59 #else
60 #   define  DMA_MAP_PRINT(fmt, args...)
61 #endif
62
63 /* ---- Private Variables ------------------------------------------------ */
64
65 static DMA_Global_t gDMA;
66 static struct proc_dir_entry *gDmaDir;
67
68 static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
69 static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
70 static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
71 static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
72
73 #include "dma_device.c"
74
75 /* ---- Private Function Prototypes -------------------------------------- */
76
77 /* ---- Functions  ------------------------------------------------------- */
78
79 /****************************************************************************/
80 /**
81 *   Displays information for /proc/dma/mem-type
82 */
83 /****************************************************************************/
84
85 static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
86                                   int count, int *eof, void *data)
87 {
88         int len = 0;
89
90         len += sprintf(buf + len, "dma_map_mem statistics\n");
91         len +=
92             sprintf(buf + len, "coherent: %d\n",
93                     atomic_read(&gDmaStatMemTypeCoherent));
94         len +=
95             sprintf(buf + len, "kmalloc:  %d\n",
96                     atomic_read(&gDmaStatMemTypeKmalloc));
97         len +=
98             sprintf(buf + len, "vmalloc:  %d\n",
99                     atomic_read(&gDmaStatMemTypeVmalloc));
100         len +=
101             sprintf(buf + len, "user:     %d\n",
102                     atomic_read(&gDmaStatMemTypeUser));
103
104         return len;
105 }
106
107 /****************************************************************************/
108 /**
109 *   Displays information for /proc/dma/channels
110 */
111 /****************************************************************************/
112
113 static int dma_proc_read_channels(char *buf, char **start, off_t offset,
114                                   int count, int *eof, void *data)
115 {
116         int controllerIdx;
117         int channelIdx;
118         int limit = count - 200;
119         int len = 0;
120         DMA_Channel_t *channel;
121
122         if (down_interruptible(&gDMA.lock) < 0) {
123                 return -ERESTARTSYS;
124         }
125
126         for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
127              controllerIdx++) {
128                 for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
129                      channelIdx++) {
130                         if (len >= limit) {
131                                 break;
132                         }
133
134                         channel =
135                             &gDMA.controller[controllerIdx].channel[channelIdx];
136
137                         len +=
138                             sprintf(buf + len, "%d:%d ", controllerIdx,
139                                     channelIdx);
140
141                         if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
142                             0) {
143                                 len +=
144                                     sprintf(buf + len, "Dedicated for %s ",
145                                             DMA_gDeviceAttribute[channel->
146                                                                  devType].name);
147                         } else {
148                                 len += sprintf(buf + len, "Shared ");
149                         }
150
151                         if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) {
152                                 len += sprintf(buf + len, "No ISR ");
153                         }
154
155                         if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) {
156                                 len += sprintf(buf + len, "Fifo: 128 ");
157                         } else {
158                                 len += sprintf(buf + len, "Fifo: 64  ");
159                         }
160
161                         if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
162                                 len +=
163                                     sprintf(buf + len, "InUse by %s",
164                                             DMA_gDeviceAttribute[channel->
165                                                                  devType].name);
166 #if (DMA_DEBUG_TRACK_RESERVATION)
167                                 len +=
168                                     sprintf(buf + len, " (%s:%d)",
169                                             channel->fileName,
170                                             channel->lineNum);
171 #endif
172                         } else {
173                                 len += sprintf(buf + len, "Avail ");
174                         }
175
176                         if (channel->lastDevType != DMA_DEVICE_NONE) {
177                                 len +=
178                                     sprintf(buf + len, "Last use: %s ",
179                                             DMA_gDeviceAttribute[channel->
180                                                                  lastDevType].
181                                             name);
182                         }
183
184                         len += sprintf(buf + len, "\n");
185                 }
186         }
187         up(&gDMA.lock);
188         *eof = 1;
189
190         return len;
191 }
192
193 /****************************************************************************/
194 /**
195 *   Displays information for /proc/dma/devices
196 */
197 /****************************************************************************/
198
199 static int dma_proc_read_devices(char *buf, char **start, off_t offset,
200                                  int count, int *eof, void *data)
201 {
202         int limit = count - 200;
203         int len = 0;
204         int devIdx;
205
206         if (down_interruptible(&gDMA.lock) < 0) {
207                 return -ERESTARTSYS;
208         }
209
210         for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
211                 DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
212
213                 if (devAttr->name == NULL) {
214                         continue;
215                 }
216
217                 if (len >= limit) {
218                         break;
219                 }
220
221                 len += sprintf(buf + len, "%-12s ", devAttr->name);
222
223                 if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
224                         len +=
225                             sprintf(buf + len, "Dedicated %d:%d ",
226                                     devAttr->dedicatedController,
227                                     devAttr->dedicatedChannel);
228                 } else {
229                         len += sprintf(buf + len, "Shared DMA:");
230                         if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) {
231                                 len += sprintf(buf + len, "0");
232                         }
233                         if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) {
234                                 len += sprintf(buf + len, "1");
235                         }
236                         len += sprintf(buf + len, " ");
237                 }
238                 if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) {
239                         len += sprintf(buf + len, "NoISR ");
240                 }
241                 if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) {
242                         len += sprintf(buf + len, "Allow-128 ");
243                 }
244
245                 len +=
246                     sprintf(buf + len,
247                             "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n",
248                             devAttr->numTransfers, devAttr->transferTicks,
249                             devAttr->transferBytes,
250                             devAttr->ring.bytesAllocated);
251
252         }
253
254         up(&gDMA.lock);
255         *eof = 1;
256
257         return len;
258 }
259
260 /****************************************************************************/
261 /**
262 *   Determines if a DMA_Device_t is "valid".
263 *
264 *   @return
265 *       TRUE        - dma device is valid
266 *       FALSE       - dma device isn't valid
267 */
268 /****************************************************************************/
269
270 static inline int IsDeviceValid(DMA_Device_t device)
271 {
272         return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES);
273 }
274
275 /****************************************************************************/
276 /**
277 *   Translates a DMA handle into a pointer to a channel.
278 *
279 *   @return
280 *       non-NULL    - pointer to DMA_Channel_t
281 *       NULL        - DMA Handle was invalid
282 */
283 /****************************************************************************/
284
285 static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle)
286 {
287         int controllerIdx;
288         int channelIdx;
289
290         controllerIdx = CONTROLLER_FROM_HANDLE(handle);
291         channelIdx = CHANNEL_FROM_HANDLE(handle);
292
293         if ((controllerIdx > DMA_NUM_CONTROLLERS)
294             || (channelIdx > DMA_NUM_CHANNELS)) {
295                 return NULL;
296         }
297         return &gDMA.controller[controllerIdx].channel[channelIdx];
298 }
299
300 /****************************************************************************/
301 /**
302 *   Interrupt handler which is called to process DMA interrupts.
303 */
304 /****************************************************************************/
305
306 static irqreturn_t dma_interrupt_handler(int irq, void *dev_id)
307 {
308         DMA_Channel_t *channel;
309         DMA_DeviceAttribute_t *devAttr;
310         int irqStatus;
311
312         channel = (DMA_Channel_t *) dev_id;
313
314         /* Figure out why we were called, and knock down the interrupt */
315
316         irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle);
317         dmacHw_clearInterrupt(channel->dmacHwHandle);
318
319         if ((channel->devType < 0)
320             || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) {
321                 printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n",
322                        channel->devType);
323                 return IRQ_NONE;
324         }
325         devAttr = &DMA_gDeviceAttribute[channel->devType];
326
327         /* Update stats */
328
329         if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) {
330                 devAttr->transferTicks +=
331                     (timer_get_tick_count() - devAttr->transferStartTime);
332         }
333
334         if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) {
335                 printk(KERN_ERR
336                        "dma_interrupt_handler: devType :%d DMA error (%s)\n",
337                        channel->devType, devAttr->name);
338         } else {
339                 devAttr->numTransfers++;
340                 devAttr->transferBytes += devAttr->numBytes;
341         }
342
343         /* Call any installed handler */
344
345         if (devAttr->devHandler != NULL) {
346                 devAttr->devHandler(channel->devType, irqStatus,
347                                     devAttr->userData);
348         }
349
350         return IRQ_HANDLED;
351 }
352
353 /****************************************************************************/
354 /**
355 *   Allocates memory to hold a descriptor ring. The descriptor ring then
356 *   needs to be populated by making one or more calls to
357 *   dna_add_descriptors.
358 *
359 *   The returned descriptor ring will be automatically initialized.
360 *
361 *   @return
362 *       0           Descriptor ring was allocated successfully
363 *       -EINVAL     Invalid parameters passed in
364 *       -ENOMEM     Unable to allocate memory for the desired number of descriptors.
365 */
366 /****************************************************************************/
367
368 int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring,       /* Descriptor ring to populate */
369                               int numDescriptors        /* Number of descriptors that need to be allocated. */
370     ) {
371         size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors);
372
373         if ((ring == NULL) || (numDescriptors <= 0)) {
374                 return -EINVAL;
375         }
376
377         ring->physAddr = 0;
378         ring->descriptorsAllocated = 0;
379         ring->bytesAllocated = 0;
380
381         ring->virtAddr = dma_alloc_writecombine(NULL,
382                                                      bytesToAlloc,
383                                                      &ring->physAddr,
384                                                      GFP_KERNEL);
385         if (ring->virtAddr == NULL) {
386                 return -ENOMEM;
387         }
388
389         ring->bytesAllocated = bytesToAlloc;
390         ring->descriptorsAllocated = numDescriptors;
391
392         return dma_init_descriptor_ring(ring, numDescriptors);
393 }
394
395 EXPORT_SYMBOL(dma_alloc_descriptor_ring);
396
397 /****************************************************************************/
398 /**
399 *   Releases the memory which was previously allocated for a descriptor ring.
400 */
401 /****************************************************************************/
402
403 void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring        /* Descriptor to release */
404     ) {
405         if (ring->virtAddr != NULL) {
406                 dma_free_writecombine(NULL,
407                                       ring->bytesAllocated,
408                                       ring->virtAddr, ring->physAddr);
409         }
410
411         ring->bytesAllocated = 0;
412         ring->descriptorsAllocated = 0;
413         ring->virtAddr = NULL;
414         ring->physAddr = 0;
415 }
416
417 EXPORT_SYMBOL(dma_free_descriptor_ring);
418
419 /****************************************************************************/
420 /**
421 *   Initializes a descriptor ring, so that descriptors can be added to it.
422 *   Once a descriptor ring has been allocated, it may be reinitialized for
423 *   use with additional/different regions of memory.
424 *
425 *   Note that if 7 descriptors are allocated, it's perfectly acceptable to
426 *   initialize the ring with a smaller number of descriptors. The amount
427 *   of memory allocated for the descriptor ring will not be reduced, and
428 *   the descriptor ring may be reinitialized later
429 *
430 *   @return
431 *       0           Descriptor ring was initialized successfully
432 *       -ENOMEM     The descriptor which was passed in has insufficient space
433 *                   to hold the desired number of descriptors.
434 */
435 /****************************************************************************/
436
437 int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring,        /* Descriptor ring to initialize */
438                              int numDescriptors /* Number of descriptors to initialize. */
439     ) {
440         if (ring->virtAddr == NULL) {
441                 return -EINVAL;
442         }
443         if (dmacHw_initDescriptor(ring->virtAddr,
444                                   ring->physAddr,
445                                   ring->bytesAllocated, numDescriptors) < 0) {
446                 printk(KERN_ERR
447                        "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n");
448                 return -ENOMEM;
449         }
450
451         return 0;
452 }
453
454 EXPORT_SYMBOL(dma_init_descriptor_ring);
455
456 /****************************************************************************/
457 /**
458 *   Determines the number of descriptors which would be required for a
459 *   transfer of the indicated memory region.
460 *
461 *   This function also needs to know which DMA device this transfer will
462 *   be destined for, so that the appropriate DMA configuration can be retrieved.
463 *   DMA parameters such as transfer width, and whether this is a memory-to-memory
464 *   or memory-to-peripheral, etc can all affect the actual number of descriptors
465 *   required.
466 *
467 *   @return
468 *       > 0     Returns the number of descriptors required for the indicated transfer
469 *       -ENODEV - Device handed in is invalid.
470 *       -EINVAL Invalid parameters
471 *       -ENOMEM Memory exhausted
472 */
473 /****************************************************************************/
474
475 int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */
476                                    dma_addr_t srcData,  /* Place to get data to write to device */
477                                    dma_addr_t dstData,  /* Pointer to device data address */
478                                    size_t numBytes      /* Number of bytes to transfer to the device */
479     ) {
480         int numDescriptors;
481         DMA_DeviceAttribute_t *devAttr;
482
483         if (!IsDeviceValid(device)) {
484                 return -ENODEV;
485         }
486         devAttr = &DMA_gDeviceAttribute[device];
487
488         numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
489                                                               (void *)srcData,
490                                                               (void *)dstData,
491                                                               numBytes);
492         if (numDescriptors < 0) {
493                 printk(KERN_ERR
494                        "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n");
495                 return -EINVAL;
496         }
497
498         return numDescriptors;
499 }
500
501 EXPORT_SYMBOL(dma_calculate_descriptor_count);
502
503 /****************************************************************************/
504 /**
505 *   Adds a region of memory to the descriptor ring. Note that it may take
506 *   multiple descriptors for each region of memory. It is the callers
507 *   responsibility to allocate a sufficiently large descriptor ring.
508 *
509 *   @return
510 *       0       Descriptors were added successfully
511 *       -ENODEV Device handed in is invalid.
512 *       -EINVAL Invalid parameters
513 *       -ENOMEM Memory exhausted
514 */
515 /****************************************************************************/
516
517 int dma_add_descriptors(DMA_DescriptorRing_t *ring,     /* Descriptor ring to add descriptors to */
518                         DMA_Device_t device,    /* DMA Device that descriptors are for */
519                         dma_addr_t srcData,     /* Place to get data (memory or device) */
520                         dma_addr_t dstData,     /* Place to put data (memory or device) */
521                         size_t numBytes /* Number of bytes to transfer to the device */
522     ) {
523         int rc;
524         DMA_DeviceAttribute_t *devAttr;
525
526         if (!IsDeviceValid(device)) {
527                 return -ENODEV;
528         }
529         devAttr = &DMA_gDeviceAttribute[device];
530
531         rc = dmacHw_setDataDescriptor(&devAttr->config,
532                                       ring->virtAddr,
533                                       (void *)srcData,
534                                       (void *)dstData, numBytes);
535         if (rc < 0) {
536                 printk(KERN_ERR
537                        "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n",
538                        rc);
539                 return -ENOMEM;
540         }
541
542         return 0;
543 }
544
545 EXPORT_SYMBOL(dma_add_descriptors);
546
547 /****************************************************************************/
548 /**
549 *   Sets the descriptor ring associated with a device.
550 *
551 *   Once set, the descriptor ring will be associated with the device, even
552 *   across channel request/free calls. Passing in a NULL descriptor ring
553 *   will release any descriptor ring currently associated with the device.
554 *
555 *   Note: If you call dma_transfer, or one of the other dma_alloc_ functions
556 *         the descriptor ring may be released and reallocated.
557 *
558 *   Note: This function will release the descriptor memory for any current
559 *         descriptor ring associated with this device.
560 *
561 *   @return
562 *       0       Descriptors were added successfully
563 *       -ENODEV Device handed in is invalid.
564 */
565 /****************************************************************************/
566
567 int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */
568                                    DMA_DescriptorRing_t *ring   /* Descriptor ring to add descriptors to */
569     ) {
570         DMA_DeviceAttribute_t *devAttr;
571
572         if (!IsDeviceValid(device)) {
573                 return -ENODEV;
574         }
575         devAttr = &DMA_gDeviceAttribute[device];
576
577         /* Free the previously allocated descriptor ring */
578
579         dma_free_descriptor_ring(&devAttr->ring);
580
581         if (ring != NULL) {
582                 /* Copy in the new one */
583
584                 devAttr->ring = *ring;
585         }
586
587         /* Set things up so that if dma_transfer is called then this descriptor */
588         /* ring will get freed. */
589
590         devAttr->prevSrcData = 0;
591         devAttr->prevDstData = 0;
592         devAttr->prevNumBytes = 0;
593
594         return 0;
595 }
596
597 EXPORT_SYMBOL(dma_set_device_descriptor_ring);
598
599 /****************************************************************************/
600 /**
601 *   Retrieves the descriptor ring associated with a device.
602 *
603 *   @return
604 *       0       Descriptors were added successfully
605 *       -ENODEV Device handed in is invalid.
606 */
607 /****************************************************************************/
608
609 int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */
610                                    DMA_DescriptorRing_t *ring   /* Place to store retrieved ring */
611     ) {
612         DMA_DeviceAttribute_t *devAttr;
613
614         memset(ring, 0, sizeof(*ring));
615
616         if (!IsDeviceValid(device)) {
617                 return -ENODEV;
618         }
619         devAttr = &DMA_gDeviceAttribute[device];
620
621         *ring = devAttr->ring;
622
623         return 0;
624 }
625
626 EXPORT_SYMBOL(dma_get_device_descriptor_ring);
627
628 /****************************************************************************/
629 /**
630 *   Configures a DMA channel.
631 *
632 *   @return
633 *       >= 0    - Initialization was successful.
634 *
635 *       -EBUSY  - Device is currently being used.
636 *       -ENODEV - Device handed in is invalid.
637 */
638 /****************************************************************************/
639
640 static int ConfigChannel(DMA_Handle_t handle)
641 {
642         DMA_Channel_t *channel;
643         DMA_DeviceAttribute_t *devAttr;
644         int controllerIdx;
645
646         channel = HandleToChannel(handle);
647         if (channel == NULL) {
648                 return -ENODEV;
649         }
650         devAttr = &DMA_gDeviceAttribute[channel->devType];
651         controllerIdx = CONTROLLER_FROM_HANDLE(handle);
652
653         if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) {
654                 if (devAttr->config.transferType ==
655                     dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) {
656                         devAttr->config.dstPeripheralPort =
657                             devAttr->dmacPort[controllerIdx];
658                 } else if (devAttr->config.transferType ==
659                            dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) {
660                         devAttr->config.srcPeripheralPort =
661                             devAttr->dmacPort[controllerIdx];
662                 }
663         }
664
665         if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) {
666                 printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n");
667                 return -EIO;
668         }
669
670         return 0;
671 }
672
673 /****************************************************************************/
674 /**
675 *   Initializes all of the data structures associated with the DMA.
676 *   @return
677 *       >= 0    - Initialization was successful.
678 *
679 *       -EBUSY  - Device is currently being used.
680 *       -ENODEV - Device handed in is invalid.
681 */
682 /****************************************************************************/
683
684 int dma_init(void)
685 {
686         int rc = 0;
687         int controllerIdx;
688         int channelIdx;
689         DMA_Device_t devIdx;
690         DMA_Channel_t *channel;
691         DMA_Handle_t dedicatedHandle;
692
693         memset(&gDMA, 0, sizeof(gDMA));
694
695         sema_init(&gDMA.lock, 0);
696         init_waitqueue_head(&gDMA.freeChannelQ);
697
698         /* Initialize the Hardware */
699
700         dmacHw_initDma();
701
702         /* Start off by marking all of the DMA channels as shared. */
703
704         for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
705              controllerIdx++) {
706                 for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
707                      channelIdx++) {
708                         channel =
709                             &gDMA.controller[controllerIdx].channel[channelIdx];
710
711                         channel->flags = 0;
712                         channel->devType = DMA_DEVICE_NONE;
713                         channel->lastDevType = DMA_DEVICE_NONE;
714
715 #if (DMA_DEBUG_TRACK_RESERVATION)
716                         channel->fileName = "";
717                         channel->lineNum = 0;
718 #endif
719
720                         channel->dmacHwHandle =
721                             dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID
722                                                     (controllerIdx,
723                                                      channelIdx));
724                         dmacHw_initChannel(channel->dmacHwHandle);
725                 }
726         }
727
728         /* Record any special attributes that channels may have */
729
730         gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
731         gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
732         gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
733         gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
734
735         /* Now walk through and record the dedicated channels. */
736
737         for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
738                 DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
739
740                 if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0)
741                     && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) {
742                         printk(KERN_ERR
743                                "DMA Device: %s Can only request NO_ISR for dedicated devices\n",
744                                devAttr->name);
745                         rc = -EINVAL;
746                         goto out;
747                 }
748
749                 if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
750                         /* This is a dedicated device. Mark the channel as being reserved. */
751
752                         if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) {
753                                 printk(KERN_ERR
754                                        "DMA Device: %s DMA Controller %d is out of range\n",
755                                        devAttr->name,
756                                        devAttr->dedicatedController);
757                                 rc = -EINVAL;
758                                 goto out;
759                         }
760
761                         if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) {
762                                 printk(KERN_ERR
763                                        "DMA Device: %s DMA Channel %d is out of range\n",
764                                        devAttr->name,
765                                        devAttr->dedicatedChannel);
766                                 rc = -EINVAL;
767                                 goto out;
768                         }
769
770                         dedicatedHandle =
771                             MAKE_HANDLE(devAttr->dedicatedController,
772                                         devAttr->dedicatedChannel);
773                         channel = HandleToChannel(dedicatedHandle);
774
775                         if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
776                             0) {
777                                 printk
778                                     ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n",
779                                      devAttr->name,
780                                      devAttr->dedicatedController,
781                                      devAttr->dedicatedChannel,
782                                      DMA_gDeviceAttribute[channel->devType].
783                                      name);
784                                 rc = -EBUSY;
785                                 goto out;
786                         }
787
788                         channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED;
789                         channel->devType = devIdx;
790
791                         if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) {
792                                 channel->flags |= DMA_CHANNEL_FLAG_NO_ISR;
793                         }
794
795                         /* For dedicated channels, we can go ahead and configure the DMA channel now */
796                         /* as well. */
797
798                         ConfigChannel(dedicatedHandle);
799                 }
800         }
801
802         /* Go through and register the interrupt handlers */
803
804         for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
805              controllerIdx++) {
806                 for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
807                      channelIdx++) {
808                         channel =
809                             &gDMA.controller[controllerIdx].channel[channelIdx];
810
811                         if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) {
812                                 snprintf(channel->name, sizeof(channel->name),
813                                          "dma %d:%d %s", controllerIdx,
814                                          channelIdx,
815                                          channel->devType ==
816                                          DMA_DEVICE_NONE ? "" :
817                                          DMA_gDeviceAttribute[channel->devType].
818                                          name);
819
820                                 rc =
821                                      request_irq(IRQ_DMA0C0 +
822                                                  (controllerIdx *
823                                                   DMA_NUM_CHANNELS) +
824                                                  channelIdx,
825                                                  dma_interrupt_handler,
826                                                  IRQF_DISABLED, channel->name,
827                                                  channel);
828                                 if (rc != 0) {
829                                         printk(KERN_ERR
830                                                "request_irq for IRQ_DMA%dC%d failed\n",
831                                                controllerIdx, channelIdx);
832                                 }
833                         }
834                 }
835         }
836
837         /* Create /proc/dma/channels and /proc/dma/devices */
838
839         gDmaDir = proc_mkdir("dma", NULL);
840
841         if (gDmaDir == NULL) {
842                 printk(KERN_ERR "Unable to create /proc/dma\n");
843         } else {
844                 create_proc_read_entry("channels", 0, gDmaDir,
845                                        dma_proc_read_channels, NULL);
846                 create_proc_read_entry("devices", 0, gDmaDir,
847                                        dma_proc_read_devices, NULL);
848                 create_proc_read_entry("mem-type", 0, gDmaDir,
849                                        dma_proc_read_mem_type, NULL);
850         }
851
852 out:
853
854         up(&gDMA.lock);
855
856         return rc;
857 }
858
859 /****************************************************************************/
860 /**
861 *   Reserves a channel for use with @a dev. If the device is setup to use
862 *   a shared channel, then this function will block until a free channel
863 *   becomes available.
864 *
865 *   @return
866 *       >= 0    - A valid DMA Handle.
867 *       -EBUSY  - Device is currently being used.
868 *       -ENODEV - Device handed in is invalid.
869 */
870 /****************************************************************************/
871
872 #if (DMA_DEBUG_TRACK_RESERVATION)
873 DMA_Handle_t dma_request_channel_dbg
874     (DMA_Device_t dev, const char *fileName, int lineNum)
875 #else
876 DMA_Handle_t dma_request_channel(DMA_Device_t dev)
877 #endif
878 {
879         DMA_Handle_t handle;
880         DMA_DeviceAttribute_t *devAttr;
881         DMA_Channel_t *channel;
882         int controllerIdx;
883         int controllerIdx2;
884         int channelIdx;
885
886         if (down_interruptible(&gDMA.lock) < 0) {
887                 return -ERESTARTSYS;
888         }
889
890         if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
891                 handle = -ENODEV;
892                 goto out;
893         }
894         devAttr = &DMA_gDeviceAttribute[dev];
895
896 #if (DMA_DEBUG_TRACK_RESERVATION)
897         {
898                 char *s;
899
900                 s = strrchr(fileName, '/');
901                 if (s != NULL) {
902                         fileName = s + 1;
903                 }
904         }
905 #endif
906         if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
907                 /* This device has already been requested and not been freed */
908
909                 printk(KERN_ERR "%s: device %s is already requested\n",
910                        __func__, devAttr->name);
911                 handle = -EBUSY;
912                 goto out;
913         }
914
915         if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
916                 /* This device has a dedicated channel. */
917
918                 channel =
919                     &gDMA.controller[devAttr->dedicatedController].
920                     channel[devAttr->dedicatedChannel];
921                 if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
922                         handle = -EBUSY;
923                         goto out;
924                 }
925
926                 channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
927                 devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;
928
929 #if (DMA_DEBUG_TRACK_RESERVATION)
930                 channel->fileName = fileName;
931                 channel->lineNum = lineNum;
932 #endif
933                 handle =
934                     MAKE_HANDLE(devAttr->dedicatedController,
935                                 devAttr->dedicatedChannel);
936                 goto out;
937         }
938
939         /* This device needs to use one of the shared channels. */
940
941         handle = DMA_INVALID_HANDLE;
942         while (handle == DMA_INVALID_HANDLE) {
943                 /* Scan through the shared channels and see if one is available */
944
945                 for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
946                      controllerIdx2++) {
947                         /* Check to see if we should try on controller 1 first. */
948
949                         controllerIdx = controllerIdx2;
950                         if ((devAttr->
951                              flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
952                                 controllerIdx = 1 - controllerIdx;
953                         }
954
955                         /* See if the device is available on the controller being tested */
956
957                         if ((devAttr->
958                              flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
959                             != 0) {
960                                 for (channelIdx = 0;
961                                      channelIdx < DMA_NUM_CHANNELS;
962                                      channelIdx++) {
963                                         channel =
964                                             &gDMA.controller[controllerIdx].
965                                             channel[channelIdx];
966
967                                         if (((channel->
968                                               flags &
969                                               DMA_CHANNEL_FLAG_IS_DEDICATED) ==
970                                              0)
971                                             &&
972                                             ((channel->
973                                               flags & DMA_CHANNEL_FLAG_IN_USE)
974                                              == 0)) {
975                                                 if (((channel->
976                                                       flags &
977                                                       DMA_CHANNEL_FLAG_LARGE_FIFO)
978                                                      != 0)
979                                                     &&
980                                                     ((devAttr->
981                                                       flags &
982                                                       DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
983                                                      == 0)) {
984                                                         /* This channel is a large fifo - don't tie it up */
985                                                         /* with devices that we don't want using it. */
986
987                                                         continue;
988                                                 }
989
990                                                 channel->flags |=
991                                                     DMA_CHANNEL_FLAG_IN_USE;
992                                                 channel->devType = dev;
993                                                 devAttr->flags |=
994                                                     DMA_DEVICE_FLAG_IN_USE;
995
996 #if (DMA_DEBUG_TRACK_RESERVATION)
997                                                 channel->fileName = fileName;
998                                                 channel->lineNum = lineNum;
999 #endif
1000                                                 handle =
1001                                                     MAKE_HANDLE(controllerIdx,
1002                                                                 channelIdx);
1003
1004                                                 /* Now that we've reserved the channel - we can go ahead and configure it */
1005
1006                                                 if (ConfigChannel(handle) != 0) {
1007                                                         handle = -EIO;
1008                                                         printk(KERN_ERR
1009                                                                "dma_request_channel: ConfigChannel failed\n");
1010                                                 }
1011                                                 goto out;
1012                                         }
1013                                 }
1014                         }
1015                 }
1016
1017                 /* No channels are currently available. Let's wait for one to free up. */
1018
1019                 {
1020                         DEFINE_WAIT(wait);
1021
1022                         prepare_to_wait(&gDMA.freeChannelQ, &wait,
1023                                         TASK_INTERRUPTIBLE);
1024                         up(&gDMA.lock);
1025                         schedule();
1026                         finish_wait(&gDMA.freeChannelQ, &wait);
1027
1028                         if (signal_pending(current)) {
1029                                 /* We don't currently hold gDMA.lock, so we return directly */
1030
1031                                 return -ERESTARTSYS;
1032                         }
1033                 }
1034
1035                 if (down_interruptible(&gDMA.lock)) {
1036                         return -ERESTARTSYS;
1037                 }
1038         }
1039
1040 out:
1041         up(&gDMA.lock);
1042
1043         return handle;
1044 }
1045
1046 /* Create both _dbg and non _dbg functions for modules. */
1047
1048 #if (DMA_DEBUG_TRACK_RESERVATION)
1049 #undef dma_request_channel
1050 DMA_Handle_t dma_request_channel(DMA_Device_t dev)
1051 {
1052         return dma_request_channel_dbg(dev, __FILE__, __LINE__);
1053 }
1054
1055 EXPORT_SYMBOL(dma_request_channel_dbg);
1056 #endif
1057 EXPORT_SYMBOL(dma_request_channel);
1058
1059 /****************************************************************************/
1060 /**
1061 *   Frees a previously allocated DMA Handle.
1062 */
1063 /****************************************************************************/
1064
1065 int dma_free_channel(DMA_Handle_t handle        /* DMA handle. */
1066     ) {
1067         int rc = 0;
1068         DMA_Channel_t *channel;
1069         DMA_DeviceAttribute_t *devAttr;
1070
1071         if (down_interruptible(&gDMA.lock) < 0) {
1072                 return -ERESTARTSYS;
1073         }
1074
1075         channel = HandleToChannel(handle);
1076         if (channel == NULL) {
1077                 rc = -EINVAL;
1078                 goto out;
1079         }
1080
1081         devAttr = &DMA_gDeviceAttribute[channel->devType];
1082
1083         if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) {
1084                 channel->lastDevType = channel->devType;
1085                 channel->devType = DMA_DEVICE_NONE;
1086         }
1087         channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE;
1088         devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE;
1089
1090 out:
1091         up(&gDMA.lock);
1092
1093         wake_up_interruptible(&gDMA.freeChannelQ);
1094
1095         return rc;
1096 }
1097
1098 EXPORT_SYMBOL(dma_free_channel);
1099
1100 /****************************************************************************/
1101 /**
1102 *   Determines if a given device has been configured as using a shared
1103 *   channel.
1104 *
1105 *   @return
1106 *       0           Device uses a dedicated channel
1107 *       > zero      Device uses a shared channel
1108 *       < zero      Error code
1109 */
1110 /****************************************************************************/
1111
1112 int dma_device_is_channel_shared(DMA_Device_t device    /* Device to check. */
1113     ) {
1114         DMA_DeviceAttribute_t *devAttr;
1115
1116         if (!IsDeviceValid(device)) {
1117                 return -ENODEV;
1118         }
1119         devAttr = &DMA_gDeviceAttribute[device];
1120
1121         return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0);
1122 }
1123
1124 EXPORT_SYMBOL(dma_device_is_channel_shared);
1125
1126 /****************************************************************************/
1127 /**
1128 *   Allocates buffers for the descriptors. This is normally done automatically
1129 *   but needs to be done explicitly when initiating a dma from interrupt
1130 *   context.
1131 *
1132 *   @return
1133 *       0       Descriptors were allocated successfully
1134 *       -EINVAL Invalid device type for this kind of transfer
1135 *               (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1136 *       -ENOMEM Memory exhausted
1137 */
1138 /****************************************************************************/
1139
1140 int dma_alloc_descriptors(DMA_Handle_t handle,  /* DMA Handle */
1141                           dmacHw_TRANSFER_TYPE_e transferType,  /* Type of transfer being performed */
1142                           dma_addr_t srcData,   /* Place to get data to write to device */
1143                           dma_addr_t dstData,   /* Pointer to device data address */
1144                           size_t numBytes       /* Number of bytes to transfer to the device */
1145     ) {
1146         DMA_Channel_t *channel;
1147         DMA_DeviceAttribute_t *devAttr;
1148         int numDescriptors;
1149         size_t ringBytesRequired;
1150         int rc = 0;
1151
1152         channel = HandleToChannel(handle);
1153         if (channel == NULL) {
1154                 return -ENODEV;
1155         }
1156
1157         devAttr = &DMA_gDeviceAttribute[channel->devType];
1158
1159         if (devAttr->config.transferType != transferType) {
1160                 return -EINVAL;
1161         }
1162
1163         /* Figure out how many descriptors we need. */
1164
1165         /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1166         /*        srcData, dstData, numBytes); */
1167
1168         numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
1169                                                               (void *)srcData,
1170                                                               (void *)dstData,
1171                                                               numBytes);
1172         if (numDescriptors < 0) {
1173                 printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n",
1174                        __func__);
1175                 return -EINVAL;
1176         }
1177
1178         /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1179         /* a new one. */
1180
1181         ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1182
1183         /* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1184
1185         if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1186                 /* Make sure that this code path is never taken from interrupt context. */
1187                 /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1188                 /* allocation needs to have already been done. */
1189
1190                 might_sleep();
1191
1192                 /* Free the old descriptor ring and allocate a new one. */
1193
1194                 dma_free_descriptor_ring(&devAttr->ring);
1195
1196                 /* And allocate a new one. */
1197
1198                 rc =
1199                      dma_alloc_descriptor_ring(&devAttr->ring,
1200                                                numDescriptors);
1201                 if (rc < 0) {
1202                         printk(KERN_ERR
1203                                "%s: dma_alloc_descriptor_ring(%d) failed\n",
1204                                __func__, numDescriptors);
1205                         return rc;
1206                 }
1207                 /* Setup the descriptor for this transfer */
1208
1209                 if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1210                                           devAttr->ring.physAddr,
1211                                           devAttr->ring.bytesAllocated,
1212                                           numDescriptors) < 0) {
1213                         printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n",
1214                                __func__);
1215                         return -EINVAL;
1216                 }
1217         } else {
1218                 /* We've already got enough ring buffer allocated. All we need to do is reset */
1219                 /* any control information, just in case the previous DMA was stopped. */
1220
1221                 dmacHw_resetDescriptorControl(devAttr->ring.virtAddr);
1222         }
1223
1224         /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1225         /* as last time, then we don't need to call setDataDescriptor again. */
1226
1227         if (dmacHw_setDataDescriptor(&devAttr->config,
1228                                      devAttr->ring.virtAddr,
1229                                      (void *)srcData,
1230                                      (void *)dstData, numBytes) < 0) {
1231                 printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n",
1232                        __func__);
1233                 return -EINVAL;
1234         }
1235
1236         /* Remember the critical information for this transfer so that we can eliminate */
1237         /* another call to dma_alloc_descriptors if the caller reuses the same buffers */
1238
1239         devAttr->prevSrcData = srcData;
1240         devAttr->prevDstData = dstData;
1241         devAttr->prevNumBytes = numBytes;
1242
1243         return 0;
1244 }
1245
1246 EXPORT_SYMBOL(dma_alloc_descriptors);
1247
1248 /****************************************************************************/
1249 /**
1250 *   Allocates and sets up descriptors for a double buffered circular buffer.
1251 *
1252 *   This is primarily intended to be used for things like the ingress samples
1253 *   from a microphone.
1254 *
1255 *   @return
1256 *       > 0     Number of descriptors actually allocated.
1257 *       -EINVAL Invalid device type for this kind of transfer
1258 *               (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1259 *       -ENOMEM Memory exhausted
1260 */
1261 /****************************************************************************/
1262
1263 int dma_alloc_double_dst_descriptors(DMA_Handle_t handle,       /* DMA Handle */
1264                                      dma_addr_t srcData,        /* Physical address of source data */
1265                                      dma_addr_t dstData1,       /* Physical address of first destination buffer */
1266                                      dma_addr_t dstData2,       /* Physical address of second destination buffer */
1267                                      size_t numBytes    /* Number of bytes in each destination buffer */
1268     ) {
1269         DMA_Channel_t *channel;
1270         DMA_DeviceAttribute_t *devAttr;
1271         int numDst1Descriptors;
1272         int numDst2Descriptors;
1273         int numDescriptors;
1274         size_t ringBytesRequired;
1275         int rc = 0;
1276
1277         channel = HandleToChannel(handle);
1278         if (channel == NULL) {
1279                 return -ENODEV;
1280         }
1281
1282         devAttr = &DMA_gDeviceAttribute[channel->devType];
1283
1284         /* Figure out how many descriptors we need. */
1285
1286         /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1287         /*        srcData, dstData, numBytes); */
1288
1289         numDst1Descriptors =
1290              dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1291                                              (void *)dstData1, numBytes);
1292         if (numDst1Descriptors < 0) {
1293                 return -EINVAL;
1294         }
1295         numDst2Descriptors =
1296              dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1297                                              (void *)dstData2, numBytes);
1298         if (numDst2Descriptors < 0) {
1299                 return -EINVAL;
1300         }
1301         numDescriptors = numDst1Descriptors + numDst2Descriptors;
1302         /* printk("numDescriptors: %d\n", numDescriptors); */
1303
1304         /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1305         /* a new one. */
1306
1307         ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1308
1309         /* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1310
1311         if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1312                 /* Make sure that this code path is never taken from interrupt context. */
1313                 /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1314                 /* allocation needs to have already been done. */
1315
1316                 might_sleep();
1317
1318                 /* Free the old descriptor ring and allocate a new one. */
1319
1320                 dma_free_descriptor_ring(&devAttr->ring);
1321
1322                 /* And allocate a new one. */
1323
1324                 rc =
1325                      dma_alloc_descriptor_ring(&devAttr->ring,
1326                                                numDescriptors);
1327                 if (rc < 0) {
1328                         printk(KERN_ERR
1329                                "%s: dma_alloc_descriptor_ring(%d) failed\n",
1330                                __func__, ringBytesRequired);
1331                         return rc;
1332                 }
1333         }
1334
1335         /* Setup the descriptor for this transfer. Since this function is used with */
1336         /* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */
1337         /* setDataDescriptor will keep trying to append onto the end. */
1338
1339         if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1340                                   devAttr->ring.physAddr,
1341                                   devAttr->ring.bytesAllocated,
1342                                   numDescriptors) < 0) {
1343                 printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__);
1344                 return -EINVAL;
1345         }
1346
1347         /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1348         /* as last time, then we don't need to call setDataDescriptor again. */
1349
1350         if (dmacHw_setDataDescriptor(&devAttr->config,
1351                                      devAttr->ring.virtAddr,
1352                                      (void *)srcData,
1353                                      (void *)dstData1, numBytes) < 0) {
1354                 printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n",
1355                        __func__);
1356                 return -EINVAL;
1357         }
1358         if (dmacHw_setDataDescriptor(&devAttr->config,
1359                                      devAttr->ring.virtAddr,
1360                                      (void *)srcData,
1361                                      (void *)dstData2, numBytes) < 0) {
1362                 printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n",
1363                        __func__);
1364                 return -EINVAL;
1365         }
1366
1367         /* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */
1368         /* try to make the 'prev' variables right. */
1369
1370         devAttr->prevSrcData = 0;
1371         devAttr->prevDstData = 0;
1372         devAttr->prevNumBytes = 0;
1373
1374         return numDescriptors;
1375 }
1376
1377 EXPORT_SYMBOL(dma_alloc_double_dst_descriptors);
1378
1379 /****************************************************************************/
1380 /**
1381 *   Initiates a transfer when the descriptors have already been setup.
1382 *
1383 *   This is a special case, and normally, the dma_transfer_xxx functions should
1384 *   be used.
1385 *
1386 *   @return
1387 *       0       Transfer was started successfully
1388 *       -ENODEV Invalid handle
1389 */
1390 /****************************************************************************/
1391
1392 int dma_start_transfer(DMA_Handle_t handle)
1393 {
1394         DMA_Channel_t *channel;
1395         DMA_DeviceAttribute_t *devAttr;
1396
1397         channel = HandleToChannel(handle);
1398         if (channel == NULL) {
1399                 return -ENODEV;
1400         }
1401         devAttr = &DMA_gDeviceAttribute[channel->devType];
1402
1403         dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1404                                 devAttr->ring.virtAddr);
1405
1406         /* Since we got this far, everything went successfully */
1407
1408         return 0;
1409 }
1410
1411 EXPORT_SYMBOL(dma_start_transfer);
1412
1413 /****************************************************************************/
1414 /**
1415 *   Stops a previously started DMA transfer.
1416 *
1417 *   @return
1418 *       0       Transfer was stopped successfully
1419 *       -ENODEV Invalid handle
1420 */
1421 /****************************************************************************/
1422
1423 int dma_stop_transfer(DMA_Handle_t handle)
1424 {
1425         DMA_Channel_t *channel;
1426
1427         channel = HandleToChannel(handle);
1428         if (channel == NULL) {
1429                 return -ENODEV;
1430         }
1431
1432         dmacHw_stopTransfer(channel->dmacHwHandle);
1433
1434         return 0;
1435 }
1436
1437 EXPORT_SYMBOL(dma_stop_transfer);
1438
1439 /****************************************************************************/
1440 /**
1441 *   Waits for a DMA to complete by polling. This function is only intended
1442 *   to be used for testing. Interrupts should be used for most DMA operations.
1443 */
1444 /****************************************************************************/
1445
1446 int dma_wait_transfer_done(DMA_Handle_t handle)
1447 {
1448         DMA_Channel_t *channel;
1449         dmacHw_TRANSFER_STATUS_e status;
1450
1451         channel = HandleToChannel(handle);
1452         if (channel == NULL) {
1453                 return -ENODEV;
1454         }
1455
1456         while ((status =
1457                 dmacHw_transferCompleted(channel->dmacHwHandle)) ==
1458                dmacHw_TRANSFER_STATUS_BUSY) {
1459                 ;
1460         }
1461
1462         if (status == dmacHw_TRANSFER_STATUS_ERROR) {
1463                 printk(KERN_ERR "%s: DMA transfer failed\n", __func__);
1464                 return -EIO;
1465         }
1466         return 0;
1467 }
1468
1469 EXPORT_SYMBOL(dma_wait_transfer_done);
1470
1471 /****************************************************************************/
1472 /**
1473 *   Initiates a DMA, allocating the descriptors as required.
1474 *
1475 *   @return
1476 *       0       Transfer was started successfully
1477 *       -EINVAL Invalid device type for this kind of transfer
1478 *               (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV)
1479 */
1480 /****************************************************************************/
1481
1482 int dma_transfer(DMA_Handle_t handle,   /* DMA Handle */
1483                  dmacHw_TRANSFER_TYPE_e transferType,   /* Type of transfer being performed */
1484                  dma_addr_t srcData,    /* Place to get data to write to device */
1485                  dma_addr_t dstData,    /* Pointer to device data address */
1486                  size_t numBytes        /* Number of bytes to transfer to the device */
1487     ) {
1488         DMA_Channel_t *channel;
1489         DMA_DeviceAttribute_t *devAttr;
1490         int rc = 0;
1491
1492         channel = HandleToChannel(handle);
1493         if (channel == NULL) {
1494                 return -ENODEV;
1495         }
1496
1497         devAttr = &DMA_gDeviceAttribute[channel->devType];
1498
1499         if (devAttr->config.transferType != transferType) {
1500                 return -EINVAL;
1501         }
1502
1503         /* We keep track of the information about the previous request for this */
1504         /* device, and if the attributes match, then we can use the descriptors we setup */
1505         /* the last time, and not have to reinitialize everything. */
1506
1507         {
1508                 rc =
1509                      dma_alloc_descriptors(handle, transferType, srcData,
1510                                            dstData, numBytes);
1511                 if (rc != 0) {
1512                         return rc;
1513                 }
1514         }
1515
1516         /* And kick off the transfer */
1517
1518         devAttr->numBytes = numBytes;
1519         devAttr->transferStartTime = timer_get_tick_count();
1520
1521         dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1522                                 devAttr->ring.virtAddr);
1523
1524         /* Since we got this far, everything went successfully */
1525
1526         return 0;
1527 }
1528
1529 EXPORT_SYMBOL(dma_transfer);
1530
1531 /****************************************************************************/
1532 /**
1533 *   Set the callback function which will be called when a transfer completes.
1534 *   If a NULL callback function is set, then no callback will occur.
1535 *
1536 *   @note   @a devHandler will be called from IRQ context.
1537 *
1538 *   @return
1539 *       0       - Success
1540 *       -ENODEV - Device handed in is invalid.
1541 */
1542 /****************************************************************************/
1543
1544 int dma_set_device_handler(DMA_Device_t dev,    /* Device to set the callback for. */
1545                            DMA_DeviceHandler_t devHandler,      /* Function to call when the DMA completes */
1546                            void *userData       /* Pointer which will be passed to devHandler. */
1547     ) {
1548         DMA_DeviceAttribute_t *devAttr;
1549         unsigned long flags;
1550
1551         if (!IsDeviceValid(dev)) {
1552                 return -ENODEV;
1553         }
1554         devAttr = &DMA_gDeviceAttribute[dev];
1555
1556         local_irq_save(flags);
1557
1558         devAttr->userData = userData;
1559         devAttr->devHandler = devHandler;
1560
1561         local_irq_restore(flags);
1562
1563         return 0;
1564 }
1565
1566 EXPORT_SYMBOL(dma_set_device_handler);
1567
1568 /****************************************************************************/
1569 /**
1570 *   Initializes a memory mapping structure
1571 */
1572 /****************************************************************************/
1573
1574 int dma_init_mem_map(DMA_MemMap_t *memMap)
1575 {
1576         memset(memMap, 0, sizeof(*memMap));
1577
1578         sema_init(&memMap->lock, 1);
1579
1580         return 0;
1581 }
1582
1583 EXPORT_SYMBOL(dma_init_mem_map);
1584
1585 /****************************************************************************/
1586 /**
1587 *   Releases any memory currently being held by a memory mapping structure.
1588 */
1589 /****************************************************************************/
1590
1591 int dma_term_mem_map(DMA_MemMap_t *memMap)
1592 {
1593         down(&memMap->lock);    /* Just being paranoid */
1594
1595         /* Free up any allocated memory */
1596
1597         up(&memMap->lock);
1598         memset(memMap, 0, sizeof(*memMap));
1599
1600         return 0;
1601 }
1602
1603 EXPORT_SYMBOL(dma_term_mem_map);
1604
1605 /****************************************************************************/
1606 /**
1607 *   Looks at a memory address and categorizes it.
1608 *
1609 *   @return One of the values from the DMA_MemType_t enumeration.
1610 */
1611 /****************************************************************************/
1612
1613 DMA_MemType_t dma_mem_type(void *addr)
1614 {
1615         unsigned long addrVal = (unsigned long)addr;
1616
1617         if (addrVal >= VMALLOC_END) {
1618                 /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
1619
1620                 /* dma_alloc_xxx pages are physically and virtually contiguous */
1621
1622                 return DMA_MEM_TYPE_DMA;
1623         }
1624
1625         /* Technically, we could add one more classification. Addresses between VMALLOC_END */
1626         /* and the beginning of the DMA virtual address could be considered to be I/O space. */
1627         /* Right now, nobody cares about this particular classification, so we ignore it. */
1628
1629         if (is_vmalloc_addr(addr)) {
1630                 /* Address comes from the vmalloc'd region. Pages are virtually */
1631                 /* contiguous but NOT physically contiguous */
1632
1633                 return DMA_MEM_TYPE_VMALLOC;
1634         }
1635
1636         if (addrVal >= PAGE_OFFSET) {
1637                 /* PAGE_OFFSET is typically 0xC0000000 */
1638
1639                 /* kmalloc'd pages are physically contiguous */
1640
1641                 return DMA_MEM_TYPE_KMALLOC;
1642         }
1643
1644         return DMA_MEM_TYPE_USER;
1645 }
1646
1647 EXPORT_SYMBOL(dma_mem_type);
1648
1649 /****************************************************************************/
1650 /**
1651 *   Looks at a memory address and determines if we support DMA'ing to/from
1652 *   that type of memory.
1653 *
1654 *   @return boolean -
1655 *               return value != 0 means dma supported
1656 *               return value == 0 means dma not supported
1657 */
1658 /****************************************************************************/
1659
1660 int dma_mem_supports_dma(void *addr)
1661 {
1662         DMA_MemType_t memType = dma_mem_type(addr);
1663
1664         return (memType == DMA_MEM_TYPE_DMA)
1665 #if ALLOW_MAP_OF_KMALLOC_MEMORY
1666             || (memType == DMA_MEM_TYPE_KMALLOC)
1667 #endif
1668             || (memType == DMA_MEM_TYPE_USER);
1669 }
1670
1671 EXPORT_SYMBOL(dma_mem_supports_dma);
1672
1673 /****************************************************************************/
1674 /**
1675 *   Maps in a memory region such that it can be used for performing a DMA.
1676 *
1677 *   @return
1678 */
1679 /****************************************************************************/
1680
1681 int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
1682                   enum dma_data_direction dir   /* Direction that the mapping will be going */
1683     ) {
1684         int rc;
1685
1686         down(&memMap->lock);
1687
1688         DMA_MAP_PRINT("memMap: %p\n", memMap);
1689
1690         if (memMap->inUse) {
1691                 printk(KERN_ERR "%s: memory map %p is already being used\n",
1692                        __func__, memMap);
1693                 rc = -EBUSY;
1694                 goto out;
1695         }
1696
1697         memMap->inUse = 1;
1698         memMap->dir = dir;
1699         memMap->numRegionsUsed = 0;
1700
1701         rc = 0;
1702
1703 out:
1704
1705         DMA_MAP_PRINT("returning %d", rc);
1706
1707         up(&memMap->lock);
1708
1709         return rc;
1710 }
1711
1712 EXPORT_SYMBOL(dma_map_start);
1713
1714 /****************************************************************************/
1715 /**
1716 *   Adds a segment of memory to a memory map. Each segment is both
1717 *   physically and virtually contiguous.
1718 *
1719 *   @return     0 on success, error code otherwise.
1720 */
1721 /****************************************************************************/
1722
1723 static int dma_map_add_segment(DMA_MemMap_t *memMap,    /* Stores state information about the map */
1724                                DMA_Region_t *region,    /* Region that the segment belongs to */
1725                                void *virtAddr,  /* Virtual address of the segment being added */
1726                                dma_addr_t physAddr,     /* Physical address of the segment being added */
1727                                size_t numBytes  /* Number of bytes of the segment being added */
1728     ) {
1729         DMA_Segment_t *segment;
1730
1731         DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
1732                       physAddr, numBytes);
1733
1734         /* Sanity check */
1735
1736         if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
1737             || (((unsigned long)virtAddr + numBytes)) >
1738             ((unsigned long)region->virtAddr + region->numBytes)) {
1739                 printk(KERN_ERR
1740                        "%s: virtAddr %p is outside region @ %p len: %d\n",
1741                        __func__, virtAddr, region->virtAddr, region->numBytes);
1742                 return -EINVAL;
1743         }
1744
1745         if (region->numSegmentsUsed > 0) {
1746                 /* Check to see if this segment is physically contiguous with the previous one */
1747
1748                 segment = &region->segment[region->numSegmentsUsed - 1];
1749
1750                 if ((segment->physAddr + segment->numBytes) == physAddr) {
1751                         /* It is - just add on to the end */
1752
1753                         DMA_MAP_PRINT("appending %d bytes to last segment\n",
1754                                       numBytes);
1755
1756                         segment->numBytes += numBytes;
1757
1758                         return 0;
1759                 }
1760         }
1761
1762         /* Reallocate to hold more segments, if required. */
1763
1764         if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
1765                 DMA_Segment_t *newSegment;
1766                 size_t oldSize =
1767                     region->numSegmentsAllocated * sizeof(*newSegment);
1768                 int newAlloc = region->numSegmentsAllocated + 4;
1769                 size_t newSize = newAlloc * sizeof(*newSegment);
1770
1771                 newSegment = kmalloc(newSize, GFP_KERNEL);
1772                 if (newSegment == NULL) {
1773                         return -ENOMEM;
1774                 }
1775                 memcpy(newSegment, region->segment, oldSize);
1776                 memset(&((uint8_t *) newSegment)[oldSize], 0,
1777                        newSize - oldSize);
1778                 kfree(region->segment);
1779
1780                 region->numSegmentsAllocated = newAlloc;
1781                 region->segment = newSegment;
1782         }
1783
1784         segment = &region->segment[region->numSegmentsUsed];
1785         region->numSegmentsUsed++;
1786
1787         segment->virtAddr = virtAddr;
1788         segment->physAddr = physAddr;
1789         segment->numBytes = numBytes;
1790
1791         DMA_MAP_PRINT("returning success\n");
1792
1793         return 0;
1794 }
1795
1796 /****************************************************************************/
1797 /**
1798 *   Adds a region of memory to a memory map. Each region is virtually
1799 *   contiguous, but not necessarily physically contiguous.
1800 *
1801 *   @return     0 on success, error code otherwise.
1802 */
1803 /****************************************************************************/
1804
1805 int dma_map_add_region(DMA_MemMap_t *memMap,    /* Stores state information about the map */
1806                        void *mem,       /* Virtual address that we want to get a map of */
1807                        size_t numBytes  /* Number of bytes being mapped */
1808     ) {
1809         unsigned long addr = (unsigned long)mem;
1810         unsigned int offset;
1811         int rc = 0;
1812         DMA_Region_t *region;
1813         dma_addr_t physAddr;
1814
1815         down(&memMap->lock);
1816
1817         DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
1818
1819         if (!memMap->inUse) {
1820                 printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
1821                        __func__);
1822                 rc = -EINVAL;
1823                 goto out;
1824         }
1825
1826         /* Reallocate to hold more regions. */
1827
1828         if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
1829                 DMA_Region_t *newRegion;
1830                 size_t oldSize =
1831                     memMap->numRegionsAllocated * sizeof(*newRegion);
1832                 int newAlloc = memMap->numRegionsAllocated + 4;
1833                 size_t newSize = newAlloc * sizeof(*newRegion);
1834
1835                 newRegion = kmalloc(newSize, GFP_KERNEL);
1836                 if (newRegion == NULL) {
1837                         rc = -ENOMEM;
1838                         goto out;
1839                 }
1840                 memcpy(newRegion, memMap->region, oldSize);
1841                 memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
1842
1843                 kfree(memMap->region);
1844
1845                 memMap->numRegionsAllocated = newAlloc;
1846                 memMap->region = newRegion;
1847         }
1848
1849         region = &memMap->region[memMap->numRegionsUsed];
1850         memMap->numRegionsUsed++;
1851
1852         offset = addr & ~PAGE_MASK;
1853
1854         region->memType = dma_mem_type(mem);
1855         region->virtAddr = mem;
1856         region->numBytes = numBytes;
1857         region->numSegmentsUsed = 0;
1858         region->numLockedPages = 0;
1859         region->lockedPages = NULL;
1860
1861         switch (region->memType) {
1862         case DMA_MEM_TYPE_VMALLOC:
1863                 {
1864                         atomic_inc(&gDmaStatMemTypeVmalloc);
1865
1866                         /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
1867
1868                         /* vmalloc'd pages are not physically contiguous */
1869
1870                         rc = -EINVAL;
1871                         break;
1872                 }
1873
1874         case DMA_MEM_TYPE_KMALLOC:
1875                 {
1876                         atomic_inc(&gDmaStatMemTypeKmalloc);
1877
1878                         /* kmalloc'd pages are physically contiguous, so they'll have exactly */
1879                         /* one segment */
1880
1881 #if ALLOW_MAP_OF_KMALLOC_MEMORY
1882                         physAddr =
1883                             dma_map_single(NULL, mem, numBytes, memMap->dir);
1884                         rc = dma_map_add_segment(memMap, region, mem, physAddr,
1885                                                  numBytes);
1886 #else
1887                         rc = -EINVAL;
1888 #endif
1889                         break;
1890                 }
1891
1892         case DMA_MEM_TYPE_DMA:
1893                 {
1894                         /* dma_alloc_xxx pages are physically contiguous */
1895
1896                         atomic_inc(&gDmaStatMemTypeCoherent);
1897
1898                         physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
1899
1900                         dma_sync_single_for_cpu(NULL, physAddr, numBytes,
1901                                                 memMap->dir);
1902                         rc = dma_map_add_segment(memMap, region, mem, physAddr,
1903                                                  numBytes);
1904                         break;
1905                 }
1906
1907         case DMA_MEM_TYPE_USER:
1908                 {
1909                         size_t firstPageOffset;
1910                         size_t firstPageSize;
1911                         struct page **pages;
1912                         struct task_struct *userTask;
1913
1914                         atomic_inc(&gDmaStatMemTypeUser);
1915
1916 #if 1
1917                         /* If the pages are user pages, then the dma_mem_map_set_user_task function */
1918                         /* must have been previously called. */
1919
1920                         if (memMap->userTask == NULL) {
1921                                 printk(KERN_ERR
1922                                        "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
1923                                        __func__);
1924                                 return -EINVAL;
1925                         }
1926
1927                         /* User pages need to be locked. */
1928
1929                         firstPageOffset =
1930                             (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
1931                         firstPageSize = PAGE_SIZE - firstPageOffset;
1932
1933                         region->numLockedPages = (firstPageOffset
1934                                                   + region->numBytes +
1935                                                   PAGE_SIZE - 1) / PAGE_SIZE;
1936                         pages =
1937                             kmalloc(region->numLockedPages *
1938                                     sizeof(struct page *), GFP_KERNEL);
1939
1940                         if (pages == NULL) {
1941                                 region->numLockedPages = 0;
1942                                 return -ENOMEM;
1943                         }
1944
1945                         userTask = memMap->userTask;
1946
1947                         down_read(&userTask->mm->mmap_sem);
1948                         rc = get_user_pages(userTask,   /* task */
1949                                             userTask->mm,       /* mm */
1950                                             (unsigned long)region->virtAddr,    /* start */
1951                                             region->numLockedPages,     /* len */
1952                                             memMap->dir == DMA_FROM_DEVICE,     /* write */
1953                                             0,  /* force */
1954                                             pages,      /* pages (array of pointers to page) */
1955                                             NULL);      /* vmas */
1956                         up_read(&userTask->mm->mmap_sem);
1957
1958                         if (rc != region->numLockedPages) {
1959                                 kfree(pages);
1960                                 region->numLockedPages = 0;
1961
1962                                 if (rc >= 0) {
1963                                         rc = -EINVAL;
1964                                 }
1965                         } else {
1966                                 uint8_t *virtAddr = region->virtAddr;
1967                                 size_t bytesRemaining;
1968                                 int pageIdx;
1969
1970                                 rc = 0; /* Since get_user_pages returns +ve number */
1971
1972                                 region->lockedPages = pages;
1973
1974                                 /* We've locked the user pages. Now we need to walk them and figure */
1975                                 /* out the physical addresses. */
1976
1977                                 /* The first page may be partial */
1978
1979                                 dma_map_add_segment(memMap,
1980                                                     region,
1981                                                     virtAddr,
1982                                                     PFN_PHYS(page_to_pfn
1983                                                              (pages[0])) +
1984                                                     firstPageOffset,
1985                                                     firstPageSize);
1986
1987                                 virtAddr += firstPageSize;
1988                                 bytesRemaining =
1989                                     region->numBytes - firstPageSize;
1990
1991                                 for (pageIdx = 1;
1992                                      pageIdx < region->numLockedPages;
1993                                      pageIdx++) {
1994                                         size_t bytesThisPage =
1995                                             (bytesRemaining >
1996                                              PAGE_SIZE ? PAGE_SIZE :
1997                                              bytesRemaining);
1998
1999                                         DMA_MAP_PRINT
2000                                             ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
2001                                              pageIdx, pages[pageIdx],
2002                                              page_to_pfn(pages[pageIdx]),
2003                                              PFN_PHYS(page_to_pfn
2004                                                       (pages[pageIdx])));
2005
2006                                         dma_map_add_segment(memMap,
2007                                                             region,
2008                                                             virtAddr,
2009                                                             PFN_PHYS(page_to_pfn
2010                                                                      (pages
2011                                                                       [pageIdx])),
2012                                                             bytesThisPage);
2013
2014                                         virtAddr += bytesThisPage;
2015                                         bytesRemaining -= bytesThisPage;
2016                                 }
2017                         }
2018 #else
2019                         printk(KERN_ERR
2020                                "%s: User mode pages are not yet supported\n",
2021                                __func__);
2022
2023                         /* user pages are not physically contiguous */
2024
2025                         rc = -EINVAL;
2026 #endif
2027                         break;
2028                 }
2029
2030         default:
2031                 {
2032                         printk(KERN_ERR "%s: Unsupported memory type: %d\n",
2033                                __func__, region->memType);
2034
2035                         rc = -EINVAL;
2036                         break;
2037                 }
2038         }
2039
2040         if (rc != 0) {
2041                 memMap->numRegionsUsed--;
2042         }
2043
2044 out:
2045
2046         DMA_MAP_PRINT("returning %d\n", rc);
2047
2048         up(&memMap->lock);
2049
2050         return rc;
2051 }
2052
2053 EXPORT_SYMBOL(dma_map_add_segment);
2054
2055 /****************************************************************************/
2056 /**
2057 *   Maps in a memory region such that it can be used for performing a DMA.
2058 *
2059 *   @return     0 on success, error code otherwise.
2060 */
2061 /****************************************************************************/
2062
2063 int dma_map_mem(DMA_MemMap_t *memMap,   /* Stores state information about the map */
2064                 void *mem,      /* Virtual address that we want to get a map of */
2065                 size_t numBytes,        /* Number of bytes being mapped */
2066                 enum dma_data_direction dir     /* Direction that the mapping will be going */
2067     ) {
2068         int rc;
2069
2070         rc = dma_map_start(memMap, dir);
2071         if (rc == 0) {
2072                 rc = dma_map_add_region(memMap, mem, numBytes);
2073                 if (rc < 0) {
2074                         /* Since the add fails, this function will fail, and the caller won't */
2075                         /* call unmap, so we need to do it here. */
2076
2077                         dma_unmap(memMap, 0);
2078                 }
2079         }
2080
2081         return rc;
2082 }
2083
2084 EXPORT_SYMBOL(dma_map_mem);
2085
2086 /****************************************************************************/
2087 /**
2088 *   Setup a descriptor ring for a given memory map.
2089 *
2090 *   It is assumed that the descriptor ring has already been initialized, and
2091 *   this routine will only reallocate a new descriptor ring if the existing
2092 *   one is too small.
2093 *
2094 *   @return     0 on success, error code otherwise.
2095 */
2096 /****************************************************************************/
2097
2098 int dma_map_create_descriptor_ring(DMA_Device_t dev,    /* DMA device (where the ring is stored) */
2099                                    DMA_MemMap_t *memMap,        /* Memory map that will be used */
2100                                    dma_addr_t devPhysAddr       /* Physical address of device */
2101     ) {
2102         int rc;
2103         int numDescriptors;
2104         DMA_DeviceAttribute_t *devAttr;
2105         DMA_Region_t *region;
2106         DMA_Segment_t *segment;
2107         dma_addr_t srcPhysAddr;
2108         dma_addr_t dstPhysAddr;
2109         int regionIdx;
2110         int segmentIdx;
2111
2112         devAttr = &DMA_gDeviceAttribute[dev];
2113
2114         down(&memMap->lock);
2115
2116         /* Figure out how many descriptors we need */
2117
2118         numDescriptors = 0;
2119         for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2120                 region = &memMap->region[regionIdx];
2121
2122                 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2123                      segmentIdx++) {
2124                         segment = &region->segment[segmentIdx];
2125
2126                         if (memMap->dir == DMA_TO_DEVICE) {
2127                                 srcPhysAddr = segment->physAddr;
2128                                 dstPhysAddr = devPhysAddr;
2129                         } else {
2130                                 srcPhysAddr = devPhysAddr;
2131                                 dstPhysAddr = segment->physAddr;
2132                         }
2133
2134                         rc =
2135                              dma_calculate_descriptor_count(dev, srcPhysAddr,
2136                                                             dstPhysAddr,
2137                                                             segment->
2138                                                             numBytes);
2139                         if (rc < 0) {
2140                                 printk(KERN_ERR
2141                                        "%s: dma_calculate_descriptor_count failed: %d\n",
2142                                        __func__, rc);
2143                                 goto out;
2144                         }
2145                         numDescriptors += rc;
2146                 }
2147         }
2148
2149         /* Adjust the size of the ring, if it isn't big enough */
2150
2151         if (numDescriptors > devAttr->ring.descriptorsAllocated) {
2152                 dma_free_descriptor_ring(&devAttr->ring);
2153                 rc =
2154                      dma_alloc_descriptor_ring(&devAttr->ring,
2155                                                numDescriptors);
2156                 if (rc < 0) {
2157                         printk(KERN_ERR
2158                                "%s: dma_alloc_descriptor_ring failed: %d\n",
2159                                __func__, rc);
2160                         goto out;
2161                 }
2162         } else {
2163                 rc =
2164                      dma_init_descriptor_ring(&devAttr->ring,
2165                                               numDescriptors);
2166                 if (rc < 0) {
2167                         printk(KERN_ERR
2168                                "%s: dma_init_descriptor_ring failed: %d\n",
2169                                __func__, rc);
2170                         goto out;
2171                 }
2172         }
2173
2174         /* Populate the descriptors */
2175
2176         for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2177                 region = &memMap->region[regionIdx];
2178
2179                 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2180                      segmentIdx++) {
2181                         segment = &region->segment[segmentIdx];
2182
2183                         if (memMap->dir == DMA_TO_DEVICE) {
2184                                 srcPhysAddr = segment->physAddr;
2185                                 dstPhysAddr = devPhysAddr;
2186                         } else {
2187                                 srcPhysAddr = devPhysAddr;
2188                                 dstPhysAddr = segment->physAddr;
2189                         }
2190
2191                         rc =
2192                              dma_add_descriptors(&devAttr->ring, dev,
2193                                                  srcPhysAddr, dstPhysAddr,
2194                                                  segment->numBytes);
2195                         if (rc < 0) {
2196                                 printk(KERN_ERR
2197                                        "%s: dma_add_descriptors failed: %d\n",
2198                                        __func__, rc);
2199                                 goto out;
2200                         }
2201                 }
2202         }
2203
2204         rc = 0;
2205
2206 out:
2207
2208         up(&memMap->lock);
2209         return rc;
2210 }
2211
2212 EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2213
2214 /****************************************************************************/
2215 /**
2216 *   Maps in a memory region such that it can be used for performing a DMA.
2217 *
2218 *   @return
2219 */
2220 /****************************************************************************/
2221
2222 int dma_unmap(DMA_MemMap_t *memMap,     /* Stores state information about the map */
2223               int dirtied       /* non-zero if any of the pages were modified */
2224     ) {
2225
2226         int rc = 0;
2227         int regionIdx;
2228         int segmentIdx;
2229         DMA_Region_t *region;
2230         DMA_Segment_t *segment;
2231
2232         down(&memMap->lock);
2233
2234         for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2235                 region = &memMap->region[regionIdx];
2236
2237                 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2238                      segmentIdx++) {
2239                         segment = &region->segment[segmentIdx];
2240
2241                         switch (region->memType) {
2242                         case DMA_MEM_TYPE_VMALLOC:
2243                                 {
2244                                         printk(KERN_ERR
2245                                                "%s: vmalloc'd pages are not yet supported\n",
2246                                                __func__);
2247                                         rc = -EINVAL;
2248                                         goto out;
2249                                 }
2250
2251                         case DMA_MEM_TYPE_KMALLOC:
2252                                 {
2253 #if ALLOW_MAP_OF_KMALLOC_MEMORY
2254                                         dma_unmap_single(NULL,
2255                                                          segment->physAddr,
2256                                                          segment->numBytes,
2257                                                          memMap->dir);
2258 #endif
2259                                         break;
2260                                 }
2261
2262                         case DMA_MEM_TYPE_DMA:
2263                                 {
2264                                         dma_sync_single_for_cpu(NULL,
2265                                                                 segment->
2266                                                                 physAddr,
2267                                                                 segment->
2268                                                                 numBytes,
2269                                                                 memMap->dir);
2270                                         break;
2271                                 }
2272
2273                         case DMA_MEM_TYPE_USER:
2274                                 {
2275                                         /* Nothing to do here. */
2276
2277                                         break;
2278                                 }
2279
2280                         default:
2281                                 {
2282                                         printk(KERN_ERR
2283                                                "%s: Unsupported memory type: %d\n",
2284                                                __func__, region->memType);
2285                                         rc = -EINVAL;
2286                                         goto out;
2287                                 }
2288                         }
2289
2290                         segment->virtAddr = NULL;
2291                         segment->physAddr = 0;
2292                         segment->numBytes = 0;
2293                 }
2294
2295                 if (region->numLockedPages > 0) {
2296                         int pageIdx;
2297
2298                         /* Some user pages were locked. We need to go and unlock them now. */
2299
2300                         for (pageIdx = 0; pageIdx < region->numLockedPages;
2301                              pageIdx++) {
2302                                 struct page *page =
2303                                     region->lockedPages[pageIdx];
2304
2305                                 if (memMap->dir == DMA_FROM_DEVICE) {
2306                                         SetPageDirty(page);
2307                                 }
2308                                 page_cache_release(page);
2309                         }
2310                         kfree(region->lockedPages);
2311                         region->numLockedPages = 0;
2312                         region->lockedPages = NULL;
2313                 }
2314
2315                 region->memType = DMA_MEM_TYPE_NONE;
2316                 region->virtAddr = NULL;
2317                 region->numBytes = 0;
2318                 region->numSegmentsUsed = 0;
2319         }
2320         memMap->userTask = NULL;
2321         memMap->numRegionsUsed = 0;
2322         memMap->inUse = 0;
2323
2324 out:
2325         up(&memMap->lock);
2326
2327         return rc;
2328 }
2329
2330 EXPORT_SYMBOL(dma_unmap);