Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / drivers / dma / fsldma.c
index 526579d..6b39675 100644 (file)
@@ -221,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan)
        u32 mode;
        int i;
 
+       /* read the mode register */
        mode = DMA_IN(chan, &chan->regs->mr, 32);
-       mode |= FSL_DMA_MR_CA;
-       DMA_OUT(chan, &chan->regs->mr, mode, 32);
 
-       mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+       /*
+        * The 85xx controller supports channel abort, which will stop
+        * the current transfer. On 83xx, this bit is the transfer error
+        * mask bit, which should not be changed.
+        */
+       if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+               mode |= FSL_DMA_MR_CA;
+               DMA_OUT(chan, &chan->regs->mr, mode, 32);
+
+               mode &= ~FSL_DMA_MR_CA;
+       }
+
+       /* stop the DMA controller */
+       mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
        DMA_OUT(chan, &chan->regs->mr, mode, 32);
 
+       /* wait for the DMA controller to become idle */
        for (i = 0; i < 100; i++) {
                if (dma_is_idle(chan))
                        return;
@@ -881,66 +894,16 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
        dma_pool_free(chan->desc_pool, desc, txd->phys);
 }
 
-/**
- * fsl_chan_ld_cleanup - Clean up link descriptors
- * @chan : Freescale DMA channel
- *
- * This function is run after the queue of running descriptors has been
- * executed by the DMA engine. It will run any callbacks, and then free
- * the descriptors.
- *
- * HARDWARE STATE: idle
- */
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
-{
-       struct fsl_desc_sw *desc, *_desc;
-       LIST_HEAD(ld_cleanup);
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->desc_lock, flags);
-
-       /* update the cookie if we have some descriptors to cleanup */
-       if (!list_empty(&chan->ld_running)) {
-               dma_cookie_t cookie;
-
-               desc = to_fsl_desc(chan->ld_running.prev);
-               cookie = desc->async_tx.cookie;
-
-               chan->completed_cookie = cookie;
-               chan_dbg(chan, "completed cookie=%d\n", cookie);
-       }
-
-       /*
-        * move the descriptors to a temporary list so we can drop the lock
-        * during the entire cleanup operation
-        */
-       list_splice_tail_init(&chan->ld_running, &ld_cleanup);
-
-       spin_unlock_irqrestore(&chan->desc_lock, flags);
-
-       /* Run the callback for each descriptor, in order */
-       list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
-
-               /* Remove from the list of transactions */
-               list_del(&desc->node);
-
-               /* Run all cleanup for this descriptor */
-               fsldma_cleanup_descriptor(chan, desc);
-       }
-}
-
 /**
  * fsl_chan_xfer_ld_queue - transfer any pending transactions
  * @chan : Freescale DMA channel
  *
  * HARDWARE STATE: idle
+ * LOCKING: must hold chan->desc_lock
  */
 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
 {
        struct fsl_desc_sw *desc;
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->desc_lock, flags);
 
        /*
         * If the list of pending descriptors is empty, then we
@@ -948,7 +911,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
         */
        if (list_empty(&chan->ld_pending)) {
                chan_dbg(chan, "no pending LDs\n");
-               goto out_unlock;
+               return;
        }
 
        /*
@@ -958,7 +921,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
         */
        if (!chan->idle) {
                chan_dbg(chan, "DMA controller still busy\n");
-               goto out_unlock;
+               return;
        }
 
        /*
@@ -996,9 +959,6 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
 
        dma_start(chan);
        chan->idle = false;
-
-out_unlock:
-       spin_unlock_irqrestore(&chan->desc_lock, flags);
 }
 
 /**
@@ -1008,7 +968,11 @@ out_unlock:
 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
 {
        struct fsldma_chan *chan = to_fsl_chan(dchan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->desc_lock, flags);
        fsl_chan_xfer_ld_queue(chan);
+       spin_unlock_irqrestore(&chan->desc_lock, flags);
 }
 
 /**
@@ -1109,20 +1073,53 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 static void dma_do_tasklet(unsigned long data)
 {
        struct fsldma_chan *chan = (struct fsldma_chan *)data;
+       struct fsl_desc_sw *desc, *_desc;
+       LIST_HEAD(ld_cleanup);
        unsigned long flags;
 
        chan_dbg(chan, "tasklet entry\n");
 
-       /* run all callbacks, free all used descriptors */
-       fsl_chan_ld_cleanup(chan);
-
-       /* the channel is now idle */
        spin_lock_irqsave(&chan->desc_lock, flags);
+
+       /* update the cookie if we have some descriptors to cleanup */
+       if (!list_empty(&chan->ld_running)) {
+               dma_cookie_t cookie;
+
+               desc = to_fsl_desc(chan->ld_running.prev);
+               cookie = desc->async_tx.cookie;
+
+               chan->completed_cookie = cookie;
+               chan_dbg(chan, "completed_cookie=%d\n", cookie);
+       }
+
+       /*
+        * move the descriptors to a temporary list so we can drop the lock
+        * during the entire cleanup operation
+        */
+       list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+
+       /* the hardware is now idle and ready for more */
        chan->idle = true;
-       spin_unlock_irqrestore(&chan->desc_lock, flags);
 
-       /* start any pending transactions automatically */
+       /*
+        * Start any pending transactions automatically
+        *
+        * In the ideal case, we keep the DMA controller busy while we go
+        * ahead and free the descriptors below.
+        */
        fsl_chan_xfer_ld_queue(chan);
+       spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+       /* Run the callback for each descriptor, in order */
+       list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
+
+               /* Remove from the list of transactions */
+               list_del(&desc->node);
+
+               /* Run all cleanup for this descriptor */
+               fsldma_cleanup_descriptor(chan, desc);
+       }
+
        chan_dbg(chan, "tasklet exit\n");
 }
 
@@ -1335,8 +1332,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan)
        kfree(chan);
 }
 
-static int __devinit fsldma_of_probe(struct platform_device *op,
-                       const struct of_device_id *match)
+static int __devinit fsldma_of_probe(struct platform_device *op)
 {
        struct fsldma_device *fdev;
        struct device_node *child;
@@ -1468,20 +1464,13 @@ static struct of_platform_driver fsldma_of_driver = {
 
 static __init int fsldma_init(void)
 {
-       int ret;
-
        pr_info("Freescale Elo / Elo Plus DMA driver\n");
-
-       ret = of_register_platform_driver(&fsldma_of_driver);
-       if (ret)
-               pr_err("fsldma: failed to register platform driver\n");
-
-       return ret;
+       return platform_driver_register(&fsldma_of_driver);
 }
 
 static void __exit fsldma_exit(void)
 {
-       of_unregister_platform_driver(&fsldma_of_driver);
+       platform_driver_unregister(&fsldma_of_driver);
 }
 
 subsys_initcall(fsldma_init);