Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[pandora-kernel.git] / drivers / dma / at_hdmac.c
index fd87b96..fcfa0a8 100644 (file)
@@ -301,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 
        /* for cyclic transfers,
         * no need to replay callback function while stopping */
-       if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+       if (!atc_chan_is_cyclic(atchan)) {
                dma_async_tx_callback   callback = txd->callback;
                void                    *param = txd->callback_param;
 
@@ -478,7 +478,7 @@ static void atc_tasklet(unsigned long data)
        spin_lock_irqsave(&atchan->lock, flags);
        if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
                atc_handle_error(atchan);
-       else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+       else if (atc_chan_is_cyclic(atchan))
                atc_handle_cyclic(atchan);
        else
                atc_advance_work(atchan);
@@ -945,7 +945,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
                spin_unlock_irqrestore(&atchan->lock, flags);
        } else if (cmd == DMA_RESUME) {
-               if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+               if (!atc_chan_is_paused(atchan))
                        return 0;
 
                spin_lock_irqsave(&atchan->lock, flags);
@@ -1035,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
        else
                dma_set_tx_state(txstate, last_complete, last_used, 0);
 
-       if (test_bit(ATC_IS_PAUSED, &atchan->status))
+       if (atc_chan_is_paused(atchan))
                ret = DMA_PAUSED;
 
        dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1057,7 +1057,7 @@ static void atc_issue_pending(struct dma_chan *chan)
        dev_vdbg(chan2dev(chan), "issue_pending\n");
 
        /* Not needed for cyclic transfers */
-       if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+       if (atc_chan_is_cyclic(atchan))
                return;
 
        spin_lock_irqsave(&atchan->lock, flags);
@@ -1268,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
        /* initialize channels related values */
        INIT_LIST_HEAD(&atdma->dma_common.channels);
-       for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
+       for (i = 0; i < pdata->nr_channels; i++) {
                struct at_dma_chan      *atchan = &atdma->chan[i];
 
                atchan->chan_common.device = &atdma->dma_common;
                atchan->chan_common.cookie = atchan->completed_cookie = 1;
-               atchan->chan_common.chan_id = i;
                list_add_tail(&atchan->chan_common.device_node,
                                &atdma->dma_common.channels);
 
@@ -1301,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
        if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
                atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
 
-       if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
+       if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
                atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
-
-       if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+               /* controller can do slave DMA: can trigger cyclic transfers */
+               dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
                atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
-
-       if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
-           dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
                atdma->dma_common.device_control = atc_control;
+       }
 
        dma_writel(atdma, EN, AT_DMA_ENABLE);
 
        dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
          dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
          dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
-         atdma->dma_common.chancnt);
+         pdata->nr_channels);
 
        dma_async_device_register(&atdma->dma_common);
 
@@ -1385,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
        clk_disable(atdma->clk);
 }
 
+static int at_dma_prepare(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct at_dma *atdma = platform_get_drvdata(pdev);
+       struct dma_chan *chan, *_chan;
+
+       list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+                       device_node) {
+               struct at_dma_chan *atchan = to_at_dma_chan(chan);
+               /* wait for transaction completion (except in cyclic case) */
+               if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
+                       return -EAGAIN;
+       }
+       return 0;
+}
+
+static void atc_suspend_cyclic(struct at_dma_chan *atchan)
+{
+       struct dma_chan *chan = &atchan->chan_common;
+
+       /* Channel should be paused by user
+        * do it anyway even if it is not done already */
+       if (!atc_chan_is_paused(atchan)) {
+               dev_warn(chan2dev(chan),
+               "cyclic channel not paused, should be done by channel user\n");
+               atc_control(chan, DMA_PAUSE, 0);
+       }
+
+       /* now preserve additional data for cyclic operations */
+       /* next descriptor address in the cyclic list */
+       atchan->save_dscr = channel_readl(atchan, DSCR);
+
+       vdbg_dump_regs(atchan);
+}
+
 static int at_dma_suspend_noirq(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct at_dma *atdma = platform_get_drvdata(pdev);
+       struct dma_chan *chan, *_chan;
 
-       at_dma_off(platform_get_drvdata(pdev));
+       /* preserve data */
+       list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+                       device_node) {
+               struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+               if (atc_chan_is_cyclic(atchan))
+                       atc_suspend_cyclic(atchan);
+               atchan->save_cfg = channel_readl(atchan, CFG);
+       }
+       atdma->save_imr = dma_readl(atdma, EBCIMR);
+
+       /* disable DMA controller */
+       at_dma_off(atdma);
        clk_disable(atdma->clk);
        return 0;
 }
 
+static void atc_resume_cyclic(struct at_dma_chan *atchan)
+{
+       struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
+
+       /* restore channel status for cyclic descriptors list:
+        * next descriptor in the cyclic list at the time of suspend */
+       channel_writel(atchan, SADDR, 0);
+       channel_writel(atchan, DADDR, 0);
+       channel_writel(atchan, CTRLA, 0);
+       channel_writel(atchan, CTRLB, 0);
+       channel_writel(atchan, DSCR, atchan->save_dscr);
+       dma_writel(atdma, CHER, atchan->mask);
+
+       /* channel pause status should be removed by channel user
+        * We cannot take the initiative to do it here */
+
+       vdbg_dump_regs(atchan);
+}
+
 static int at_dma_resume_noirq(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct at_dma *atdma = platform_get_drvdata(pdev);
+       struct dma_chan *chan, *_chan;
 
+       /* bring back DMA controller */
        clk_enable(atdma->clk);
        dma_writel(atdma, EN, AT_DMA_ENABLE);
+
+       /* clear any pending interrupt */
+       while (dma_readl(atdma, EBCISR))
+               cpu_relax();
+
+       /* restore saved data */
+       dma_writel(atdma, EBCIER, atdma->save_imr);
+       list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+                       device_node) {
+               struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+               channel_writel(atchan, CFG, atchan->save_cfg);
+               if (atc_chan_is_cyclic(atchan))
+                       atc_resume_cyclic(atchan);
+       }
        return 0;
 }
 
 static const struct dev_pm_ops at_dma_dev_pm_ops = {
+       .prepare = at_dma_prepare,
        .suspend_noirq = at_dma_suspend_noirq,
        .resume_noirq = at_dma_resume_noirq,
 };