struct dma_chan *dma_chan;
/* Mask for DMA transfers */
dma_cap_mask_t mask;
+ /* dma channel private data */
+ void *dma_priv;
/* DMA transfer work */
struct work_struct work;
/* DMA delayed finish work */
static bool filter(struct dma_chan *chan, void *slave)
{
+ chan->private = slave;
return true;
}
return -ETIMEDOUT;
}
- /* Check if PIO Error interrupt has occured */
+ /* Check if PIO Error interrupt has occurred */
if (acdev->dma_status & ATA_DMA_ERR)
return -EAGAIN;
/*
* For each sg:
* MAX_XFER_COUNT data will be transferred before we get transfer
- * complete interrupt. Inbetween after FIFO_SIZE data
+ * complete interrupt. Between after FIFO_SIZE data
* buffer available interrupt will be generated. At this time we will
* fill FIFO again: max FIFO_SIZE data.
*/
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
- /* continue dma xfers untill current sg is completed */
+ /* continue dma xfers until current sg is completed */
while (xfer_cnt) {
/* wait for read to complete */
if (!write) {
/* request dma channels */
/* dma_request_channel may sleep, so calling from process context */
- acdev->dma_chan = dma_request_channel(acdev->mask, filter, NULL);
+ acdev->dma_chan = dma_request_channel(acdev->mask, filter,
+ acdev->dma_priv);
if (!acdev->dma_chan) {
dev_err(acdev->host->dev, "Unable to get dma_chan\n");
goto chan_request_fail;
chan_request_fail:
spin_lock_irqsave(&acdev->host->lock, flags);
- /* error when transfering data to/from memory */
+ /* error when transferring data to/from memory */
qc->err_mask |= AC_ERR_HOST_BUS;
qc->ap->hsm_task_state = HSM_ST_ERR;
INIT_WORK(&acdev->work, data_xfer);
INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
dma_cap_set(DMA_MEMCPY, acdev->mask);
+ acdev->dma_priv = pdata->dma_priv;
/* Handle platform specific quirks */
if (pdata->quirk) {