Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[pandora-kernel.git] / drivers / dma / amba-pl08x.c
index 30b6921..6fbeebb 100644 (file)
@@ -86,6 +86,7 @@
 #include <asm/hardware/pl080.h>
 
 #include "dmaengine.h"
+#include "virt-dma.h"
 
 #define DRIVER_NAME    "pl08xdmac"
 
@@ -165,17 +166,17 @@ struct pl08x_sg {
 
 /**
  * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
- * @node: node for txd list for channels
+ * @vd: virtual DMA descriptor
  * @dsg_list: list of children sg's
  * @llis_bus: DMA memory address (physical) start for the LLIs
  * @llis_va: virtual memory address start for the LLIs
  * @cctl: control reg values for current txd
  * @ccfg: config reg values for current txd
+ * @done: this marks completed descriptors, which should not have their
+ *   mux released.
  */
 struct pl08x_txd {
-       struct dma_async_tx_descriptor tx;
-       struct list_head node;
+       struct virt_dma_desc vd;
        struct list_head dsg_list;
        dma_addr_t llis_bus;
        struct pl08x_lli *llis_va;
@@ -186,6 +187,7 @@ struct pl08x_txd {
         * trigger this txd.  Other registers are in llis_va[0].
         */
        u32 ccfg;
+       bool done;
 };
 
 /**
@@ -208,17 +210,11 @@ enum pl08x_dma_chan_state {
 
 /**
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
+ * @vc: wrappped virtual channel
  * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @pend_list: queued transactions pending on this channel
- * @issued_list: issued transactions for this channel
- * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
@@ -228,18 +224,12 @@ enum pl08x_dma_chan_state {
  * @mux_use: count of descriptors using this DMA request signal setting
  */
 struct pl08x_dma_chan {
-       struct dma_chan chan;
+       struct virt_dma_chan vc;
        struct pl08x_phy_chan *phychan;
-       int phychan_hold;
-       struct tasklet_struct tasklet;
        const char *name;
        const struct pl08x_channel_data *cd;
        struct dma_slave_config cfg;
-       struct list_head pend_list;
-       struct list_head issued_list;
-       struct list_head done_list;
        struct pl08x_txd *at;
-       spinlock_t lock;
        struct pl08x_driver_data *host;
        enum pl08x_dma_chan_state state;
        bool slave;
@@ -257,7 +247,6 @@ struct pl08x_dma_chan {
  * @pd: platform data passed in from the platform/machine
  * @phy_chans: array of data for the physical channels
  * @pool: a pool for the LLI descriptors
- * @pool_ctr: counter of LLIs in the pool
  * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
  * fetches
  * @mem_buses: set to indicate memory transfers on AHB2.
@@ -272,7 +261,6 @@ struct pl08x_driver_data {
        struct pl08x_platform_data *pd;
        struct pl08x_phy_chan *phy_chans;
        struct dma_pool *pool;
-       int pool_ctr;
        u8 lli_buses;
        u8 mem_buses;
 };
@@ -290,12 +278,12 @@ struct pl08x_driver_data {
 
 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
 {
-       return container_of(chan, struct pl08x_dma_chan, chan);
+       return container_of(chan, struct pl08x_dma_chan, vc.chan);
 }
 
 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
 {
-       return container_of(tx, struct pl08x_txd, tx);
+       return container_of(tx, struct pl08x_txd, vd.tx);
 }
 
 /*
@@ -360,12 +348,12 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 {
        struct pl08x_driver_data *pl08x = plchan->host;
        struct pl08x_phy_chan *phychan = plchan->phychan;
+       struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+       struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
        struct pl08x_lli *lli;
-       struct pl08x_txd *txd;
        u32 val;
 
-       txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node);
-       list_del(&txd->node);
+       list_del(&txd->vd.node);
 
        plchan->at = txd;
 
@@ -483,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 {
        struct pl08x_phy_chan *ch;
        struct pl08x_txd *txd;
-       unsigned long flags;
        size_t bytes = 0;
 
-       spin_lock_irqsave(&plchan->lock, flags);
        ch = plchan->phychan;
        txd = plchan->at;
 
@@ -526,27 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
                }
        }
 
-       /* Sum up all queued transactions */
-       if (!list_empty(&plchan->issued_list)) {
-               struct pl08x_txd *txdi;
-               list_for_each_entry(txdi, &plchan->issued_list, node) {
-                       struct pl08x_sg *dsg;
-                       list_for_each_entry(dsg, &txd->dsg_list, node)
-                               bytes += dsg->len;
-               }
-       }
-
-       if (!list_empty(&plchan->pend_list)) {
-               struct pl08x_txd *txdi;
-               list_for_each_entry(txdi, &plchan->pend_list, node) {
-                       struct pl08x_sg *dsg;
-                       list_for_each_entry(dsg, &txd->dsg_list, node)
-                               bytes += dsg->len;
-               }
-       }
-
-       spin_unlock_irqrestore(&plchan->lock, flags);
-
        return bytes;
 }
 
@@ -587,19 +552,111 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
        return ch;
 }
 
+/* Mark the physical channel as free.  Note, this write is atomic. */
 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
                                         struct pl08x_phy_chan *ch)
 {
-       unsigned long flags;
+       ch->serving = NULL;
+}
 
-       spin_lock_irqsave(&ch->lock, flags);
+/*
+ * Try to allocate a physical channel.  When successful, assign it to
+ * this virtual channel, and initiate the next descriptor.  The
+ * virtual channel lock must be held at this point.
+ */
+static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
+{
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_phy_chan *ch;
 
-       /* Stop the channel and clear its interrupts */
-       pl08x_terminate_phy_chan(pl08x, ch);
+       ch = pl08x_get_phy_channel(pl08x, plchan);
+       if (!ch) {
+               dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+               plchan->state = PL08X_CHAN_WAITING;
+               return;
+       }
 
-       /* Mark it as free */
-       ch->serving = NULL;
-       spin_unlock_irqrestore(&ch->lock, flags);
+       dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+               ch->id, plchan->name);
+
+       plchan->phychan = ch;
+       plchan->state = PL08X_CHAN_RUNNING;
+       pl08x_start_next_txd(plchan);
+}
+
+static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
+       struct pl08x_dma_chan *plchan)
+{
+       struct pl08x_driver_data *pl08x = plchan->host;
+
+       dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
+               ch->id, plchan->name);
+
+       /*
+        * We do this without taking the lock; we're really only concerned
+        * about whether this pointer is NULL or not, and we're guaranteed
+        * that this will only be called when it _already_ is non-NULL.
+        */
+       ch->serving = plchan;
+       plchan->phychan = ch;
+       plchan->state = PL08X_CHAN_RUNNING;
+       pl08x_start_next_txd(plchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
+{
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_dma_chan *p, *next;
+
+ retry:
+       next = NULL;
+
+       /* Find a waiting virtual channel for the next transfer. */
+       list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
+               if (p->state == PL08X_CHAN_WAITING) {
+                       next = p;
+                       break;
+               }
+
+       if (!next) {
+               list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
+                       if (p->state == PL08X_CHAN_WAITING) {
+                               next = p;
+                               break;
+                       }
+       }
+
+       /* Ensure that the physical channel is stopped */
+       pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+
+       if (next) {
+               bool success;
+
+               /*
+                * Eww.  We know this isn't going to deadlock
+                * but lockdep probably doesn't.
+                */
+               spin_lock(&next->vc.lock);
+               /* Re-check the state now that we have the lock */
+               success = next->state == PL08X_CHAN_WAITING;
+               if (success)
+                       pl08x_phy_reassign_start(plchan->phychan, next);
+               spin_unlock(&next->vc.lock);
+
+               /* If the state changed, try to find another channel */
+               if (!success)
+                       goto retry;
+       } else {
+               /* No more jobs, so free up the physical channel */
+               pl08x_put_phy_channel(pl08x, plchan->phychan);
+       }
+
+       plchan->phychan = NULL;
+       plchan->state = PL08X_CHAN_IDLE;
 }
 
 /*
@@ -762,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
                return 0;
        }
 
-       pl08x->pool_ctr++;
-
        bd.txd = txd;
        bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
        cctl = txd->cctl;
@@ -979,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
        return num_llis;
 }
 
-/* You should call this with the struct pl08x lock held */
 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
                           struct pl08x_txd *txd)
 {
        struct pl08x_sg *dsg, *_dsg;
 
-       /* Free the LLI */
        if (txd->llis_va)
                dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
 
-       pl08x->pool_ctr--;
-
        list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
                list_del(&dsg->node);
                kfree(dsg);
@@ -999,20 +1050,60 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
        kfree(txd);
 }
 
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+       struct device *dev = txd->vd.tx.chan->device->dev;
+       struct pl08x_sg *dsg;
+
+       if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+               if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+                       list_for_each_entry(dsg, &txd->dsg_list, node)
+                               dma_unmap_single(dev, dsg->src_addr, dsg->len,
+                                               DMA_TO_DEVICE);
+               else {
+                       list_for_each_entry(dsg, &txd->dsg_list, node)
+                               dma_unmap_page(dev, dsg->src_addr, dsg->len,
+                                               DMA_TO_DEVICE);
+               }
+       }
+       if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+               if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+                       list_for_each_entry(dsg, &txd->dsg_list, node)
+                               dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+                                               DMA_FROM_DEVICE);
+               else
+                       list_for_each_entry(dsg, &txd->dsg_list, node)
+                               dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+                                               DMA_FROM_DEVICE);
+       }
+}
+
+static void pl08x_desc_free(struct virt_dma_desc *vd)
+{
+       struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
+
+       if (!plchan->slave)
+               pl08x_unmap_buffers(txd);
+
+       if (!txd->done)
+               pl08x_release_mux(plchan);
+
+       pl08x_free_txd(plchan->host, txd);
+}
+
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
                                struct pl08x_dma_chan *plchan)
 {
        LIST_HEAD(head);
        struct pl08x_txd *txd;
 
-       list_splice_tail_init(&plchan->issued_list, &head);
-       list_splice_tail_init(&plchan->pend_list, &head);
+       vchan_get_all_descriptors(&plchan->vc, &head);
 
        while (!list_empty(&head)) {
-               txd = list_first_entry(&head, struct pl08x_txd, node);
-               pl08x_release_mux(plchan);
-               list_del(&txd->node);
-               pl08x_free_txd(pl08x, txd);
+               txd = list_first_entry(&head, struct pl08x_txd, vd.node);
+               list_del(&txd->vd.node);
+               pl08x_desc_free(&txd->vd);
        }
 }
 
@@ -1026,75 +1117,8 @@ static int pl08x_alloc_chan_resources(struct dma_chan *chan)
 
 static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
-}
-
-/*
- * This should be called with the channel plchan->lock held
- */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan)
-{
-       struct pl08x_driver_data *pl08x = plchan->host;
-       struct pl08x_phy_chan *ch;
-
-       /* Check if we already have a channel */
-       if (plchan->phychan) {
-               ch = plchan->phychan;
-               goto got_channel;
-       }
-
-       ch = pl08x_get_phy_channel(pl08x, plchan);
-       if (!ch) {
-               /* No physical channel available, cope with it */
-               dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
-               return -EBUSY;
-       }
-
-       plchan->phychan = ch;
-       dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
-                ch->id, plchan->name);
-
-got_channel:
-       plchan->phychan_hold++;
-
-       return 0;
-}
-
-static void release_phy_channel(struct pl08x_dma_chan *plchan)
-{
-       struct pl08x_driver_data *pl08x = plchan->host;
-
-       pl08x_put_phy_channel(pl08x, plchan->phychan);
-       plchan->phychan = NULL;
-}
-
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-       struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
-       struct pl08x_txd *txd = to_pl08x_txd(tx);
-       unsigned long flags;
-       dma_cookie_t cookie;
-
-       spin_lock_irqsave(&plchan->lock, flags);
-       cookie = dma_cookie_assign(tx);
-
-       /* Put this onto the pending list */
-       list_add_tail(&txd->node, &plchan->pend_list);
-
-       /*
-        * If there was no physical channel available for this memcpy,
-        * stack the request up and indicate that the channel is waiting
-        * for a free physical channel.
-        */
-       if (!plchan->slave && !plchan->phychan) {
-               /* Do this memcpy whenever there is a channel ready */
-               plchan->state = PL08X_CHAN_WAITING;
-       } else {
-               plchan->phychan_hold--;
-       }
-
-       spin_unlock_irqrestore(&plchan->lock, flags);
-
-       return cookie;
+       /* Ensure all queued descriptors are freed */
+       vchan_free_chan_resources(to_virt_chan(chan));
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -1114,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
                dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct virt_dma_desc *vd;
+       unsigned long flags;
        enum dma_status ret;
+       size_t bytes = 0;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_SUCCESS)
                return ret;
 
+       /*
+        * There's no point calculating the residue if there's
+        * no txstate to store the value.
+        */
+       if (!txstate) {
+               if (plchan->state == PL08X_CHAN_PAUSED)
+                       ret = DMA_PAUSED;
+               return ret;
+       }
+
+       spin_lock_irqsave(&plchan->vc.lock, flags);
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret != DMA_SUCCESS) {
+               vd = vchan_find_desc(&plchan->vc, cookie);
+               if (vd) {
+                       /* On the issued list, so hasn't been processed yet */
+                       struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+                       struct pl08x_sg *dsg;
+
+                       list_for_each_entry(dsg, &txd->dsg_list, node)
+                               bytes += dsg->len;
+               } else {
+                       bytes = pl08x_getbytes_chan(plchan);
+               }
+       }
+       spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
        /*
         * This cookie not complete yet
         * Get number of bytes left in the active transactions and queue
         */
-       dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
+       dma_set_residue(txstate, bytes);
 
-       if (plchan->state == PL08X_CHAN_PAUSED)
-               return DMA_PAUSED;
+       if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
+               ret = DMA_PAUSED;
 
        /* Whether waiting or running, we're in progress */
-       return DMA_IN_PROGRESS;
+       return ret;
 }
 
 /* PrimeCell DMA extension */
@@ -1280,83 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
        unsigned long flags;
 
-       spin_lock_irqsave(&plchan->lock, flags);
-       list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
-
-       /* Something is already active, or we're waiting for a channel... */
-       if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
-               spin_unlock_irqrestore(&plchan->lock, flags);
-               return;
+       spin_lock_irqsave(&plchan->vc.lock, flags);
+       if (vchan_issue_pending(&plchan->vc)) {
+               if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
+                       pl08x_phy_alloc_and_start(plchan);
        }
-
-       /* Take the first element in the queue and execute it */
-       if (!list_empty(&plchan->issued_list)) {
-               plchan->state = PL08X_CHAN_RUNNING;
-               pl08x_start_next_txd(plchan);
-       }
-
-       spin_unlock_irqrestore(&plchan->lock, flags);
-}
-
-static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
-                                       struct pl08x_txd *txd)
-{
-       struct pl08x_driver_data *pl08x = plchan->host;
-       unsigned long flags;
-       int num_llis, ret;
-
-       num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-       if (!num_llis) {
-               spin_lock_irqsave(&plchan->lock, flags);
-               pl08x_free_txd(pl08x, txd);
-               spin_unlock_irqrestore(&plchan->lock, flags);
-               return -EINVAL;
-       }
-
-       spin_lock_irqsave(&plchan->lock, flags);
-
-       /*
-        * See if we already have a physical channel allocated,
-        * else this is the time to try to get one.
-        */
-       ret = prep_phy_channel(plchan);
-       if (ret) {
-               /*
-                * No physical channel was available.
-                *
-                * memcpy transfers can be sorted out at submission time.
-                */
-               if (plchan->slave) {
-                       pl08x_free_txd_list(pl08x, plchan);
-                       pl08x_free_txd(pl08x, txd);
-                       spin_unlock_irqrestore(&plchan->lock, flags);
-                       return -EBUSY;
-               }
-       } else
-               /*
-                * Else we're all set, paused and ready to roll, status
-                * will switch to PL08X_CHAN_RUNNING when we call
-                * issue_pending(). If there is something running on the
-                * channel already we don't change its state.
-                */
-               if (plchan->state == PL08X_CHAN_IDLE)
-                       plchan->state = PL08X_CHAN_PAUSED;
-
-       spin_unlock_irqrestore(&plchan->lock, flags);
-
-       return 0;
+       spin_unlock_irqrestore(&plchan->vc.lock, flags);
 }
 
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
-       unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
 {
        struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 
        if (txd) {
-               dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
-               txd->tx.flags = flags;
-               txd->tx.tx_submit = pl08x_tx_submit;
-               INIT_LIST_HEAD(&txd->node);
                INIT_LIST_HEAD(&txd->dsg_list);
 
                /* Always enable error and terminal interrupts */
@@ -1379,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
        struct pl08x_sg *dsg;
        int ret;
 
-       txd = pl08x_get_txd(plchan, flags);
+       txd = pl08x_get_txd(plchan);
        if (!txd) {
                dev_err(&pl08x->adev->dev,
                        "%s no memory for descriptor\n", __func__);
@@ -1411,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
                txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
                                              pl08x->mem_buses);
 
-       ret = pl08x_prep_channel_resources(plchan, txd);
-       if (ret)
+       ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+       if (!ret) {
+               pl08x_free_txd(pl08x, txd);
                return NULL;
+       }
 
-       return &txd->tx;
+       return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1437,7 +1429,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
                        __func__, sg_dma_len(sgl), plchan->name);
 
-       txd = pl08x_get_txd(plchan, flags);
+       txd = pl08x_get_txd(plchan);
        if (!txd) {
                dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
                return NULL;
@@ -1527,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
                }
        }
 
-       ret = pl08x_prep_channel_resources(plchan, txd);
-       if (ret)
+       ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+       if (!ret) {
+               pl08x_release_mux(plchan);
+               pl08x_free_txd(pl08x, txd);
                return NULL;
+       }
 
-       return &txd->tx;
+       return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1552,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
         * Anything succeeds on channels with no physical allocation and
         * no queued transfers.
         */
-       spin_lock_irqsave(&plchan->lock, flags);
+       spin_lock_irqsave(&plchan->vc.lock, flags);
        if (!plchan->phychan && !plchan->at) {
-               spin_unlock_irqrestore(&plchan->lock, flags);
+               spin_unlock_irqrestore(&plchan->vc.lock, flags);
                return 0;
        }
 
@@ -1563,20 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                plchan->state = PL08X_CHAN_IDLE;
 
                if (plchan->phychan) {
-                       pl08x_terminate_phy_chan(pl08x, plchan->phychan);
-
                        /*
                         * Mark physical channel as free and free any slave
                         * signal
                         */
-                       release_phy_channel(plchan);
-                       plchan->phychan_hold = 0;
+                       pl08x_phy_free(plchan);
                }
                /* Dequeue jobs and free LLIs */
                if (plchan->at) {
-                       /* Killing this one off, release its mux */
-                       pl08x_release_mux(plchan);
-                       pl08x_free_txd(pl08x, plchan->at);
+                       pl08x_desc_free(&plchan->at->vd);
                        plchan->at = NULL;
                }
                /* Dequeue jobs not yet fired as well */
@@ -1596,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                break;
        }
 
-       spin_unlock_irqrestore(&plchan->lock, flags);
+       spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
        return ret;
 }
@@ -1633,112 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
        writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
 }
 
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
-       struct device *dev = txd->tx.chan->device->dev;
-       struct pl08x_sg *dsg;
-
-       if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-               if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-                       list_for_each_entry(dsg, &txd->dsg_list, node)
-                               dma_unmap_single(dev, dsg->src_addr, dsg->len,
-                                               DMA_TO_DEVICE);
-               else {
-                       list_for_each_entry(dsg, &txd->dsg_list, node)
-                               dma_unmap_page(dev, dsg->src_addr, dsg->len,
-                                               DMA_TO_DEVICE);
-               }
-       }
-       if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-               if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-                       list_for_each_entry(dsg, &txd->dsg_list, node)
-                               dma_unmap_single(dev, dsg->dst_addr, dsg->len,
-                                               DMA_FROM_DEVICE);
-               else
-                       list_for_each_entry(dsg, &txd->dsg_list, node)
-                               dma_unmap_page(dev, dsg->dst_addr, dsg->len,
-                                               DMA_FROM_DEVICE);
-       }
-}
-
-static void pl08x_tasklet(unsigned long data)
-{
-       struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
-       struct pl08x_driver_data *pl08x = plchan->host;
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       spin_lock_irqsave(&plchan->lock, flags);
-       list_splice_tail_init(&plchan->done_list, &head);
-
-       if (plchan->at || !list_empty(&plchan->pend_list) || plchan->phychan_hold) {
-               /*
-                * This channel is still in use - we have a new txd being
-                * prepared and will soon be queued.  Don't give up the
-                * physical channel.
-                */
-       } else {
-               struct pl08x_dma_chan *waiting = NULL;
-
-               /*
-                * No more jobs, so free up the physical channel
-                */
-               release_phy_channel(plchan);
-               plchan->state = PL08X_CHAN_IDLE;
-
-               /*
-                * And NOW before anyone else can grab that free:d up
-                * physical channel, see if there is some memcpy pending
-                * that seriously needs to start because of being stacked
-                * up while we were choking the physical channels with data.
-                */
-               list_for_each_entry(waiting, &pl08x->memcpy.channels,
-                                   chan.device_node) {
-                       if (waiting->state == PL08X_CHAN_WAITING) {
-                               int ret;
-
-                               /* This should REALLY not fail now */
-                               ret = prep_phy_channel(waiting);
-                               BUG_ON(ret);
-                               waiting->phychan_hold--;
-                               waiting->state = PL08X_CHAN_RUNNING;
-                               /*
-                                * Eww.  We know this isn't going to deadlock
-                                * but lockdep probably doens't.
-                                */
-                               spin_lock(&waiting->lock);
-                               pl08x_start_next_txd(waiting);
-                               spin_unlock(&waiting->lock);
-                               break;
-                       }
-               }
-       }
-
-       spin_unlock_irqrestore(&plchan->lock, flags);
-
-       while (!list_empty(&head)) {
-               struct pl08x_txd *txd = list_first_entry(&head,
-                                               struct pl08x_txd, node);
-               dma_async_tx_callback callback = txd->tx.callback;
-               void *callback_param = txd->tx.callback_param;
-
-               list_del(&txd->node);
-
-               /* Don't try to unmap buffers on slave channels */
-               if (!plchan->slave)
-                       pl08x_unmap_buffers(txd);
-
-               /* Free the descriptor */
-               spin_lock_irqsave(&plchan->lock, flags);
-               pl08x_free_txd(pl08x, txd);
-               spin_unlock_irqrestore(&plchan->lock, flags);
-
-               /* Callback to signal completion */
-               if (callback)
-                       callback(callback_param);
-       }
-}
-
 static irqreturn_t pl08x_irq(int irq, void *dev)
 {
        struct pl08x_driver_data *pl08x = dev;
@@ -1772,7 +1656,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
                                continue;
                        }
 
-                       spin_lock(&plchan->lock);
+                       spin_lock(&plchan->vc.lock);
                        tx = plchan->at;
                        if (tx) {
                                plchan->at = NULL;
@@ -1781,17 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
                                 * reservation.
                                 */
                                pl08x_release_mux(plchan);
-                               dma_cookie_complete(&tx->tx);
-                               list_add_tail(&tx->node, &plchan->done_list);
+                               tx->done = true;
+                               vchan_cookie_complete(&tx->vd);
 
-                               /* And start the next descriptor */
-                               if (!list_empty(&plchan->issued_list))
+                               /*
+                                * And start the next descriptor (if any),
+                                * otherwise free this channel.
+                                */
+                               if (vchan_next_desc(&plchan->vc))
                                        pl08x_start_next_txd(plchan);
+                               else
+                                       pl08x_phy_free(plchan);
                        }
-                       spin_unlock(&plchan->lock);
+                       spin_unlock(&plchan->vc.lock);
 
-                       /* Schedule tasklet on this channel */
-                       tasklet_schedule(&plchan->tasklet);
                        mask |= (1 << i);
                }
        }
@@ -1851,17 +1738,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
                         "initialize virtual channel \"%s\"\n",
                         chan->name);
 
-               chan->chan.device = dmadev;
-               dma_cookie_init(&chan->chan);
-
-               spin_lock_init(&chan->lock);
-               INIT_LIST_HEAD(&chan->pend_list);
-               INIT_LIST_HEAD(&chan->issued_list);
-               INIT_LIST_HEAD(&chan->done_list);
-               tasklet_init(&chan->tasklet, pl08x_tasklet,
-                            (unsigned long) chan);
-
-               list_add_tail(&chan->chan.device_node, &dmadev->channels);
+               chan->vc.desc_free = pl08x_desc_free;
+               vchan_init(&chan->vc, dmadev);
        }
        dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
                 i, slave ? "slave" : "memcpy");
@@ -1874,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
        struct pl08x_dma_chan *next;
 
        list_for_each_entry_safe(chan,
-                                next, &dmadev->channels, chan.device_node) {
-               list_del(&chan->chan.device_node);
+                                next, &dmadev->channels, vc.chan.device_node) {
+               list_del(&chan->vc.chan.device_node);
                kfree(chan);
        }
 }
@@ -1928,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
        seq_printf(s, "\nPL08x virtual memcpy channels:\n");
        seq_printf(s, "CHANNEL:\tSTATE:\n");
        seq_printf(s, "--------\t------\n");
-       list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+       list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
                seq_printf(s, "%s\t\t%s\n", chan->name,
                           pl08x_state_str(chan->state));
        }
@@ -1936,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
        seq_printf(s, "\nPL08x virtual slave channels:\n");
        seq_printf(s, "CHANNEL:\tSTATE:\n");
        seq_printf(s, "--------\t------\n");
-       list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+       list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
                seq_printf(s, "%s\t\t%s\n", chan->name,
                           pl08x_state_str(chan->state));
        }