2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/mfd/tmio.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/tmio.h>
18 #include <linux/pagemap.h>
19 #include <linux/scatterlist.h>
23 #define TMIO_MMC_MIN_DMA_LEN 8
25 void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
27 if (!host->chan_tx || !host->chan_rx)
30 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
31 /* Switch DMA mode on or off - SuperH specific? */
32 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
36 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
38 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
39 struct dma_async_tx_descriptor *desc = NULL;
40 struct dma_chan *chan = host->chan_rx;
41 struct tmio_mmc_data *pdata = host->pdata;
44 bool aligned = true, multiple = true;
45 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
47 for_each_sg(sg, sg_tmp, host->sg_len, i) {
48 if (sg_tmp->offset & align)
50 if (sg_tmp->length & align) {
56 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
57 (align & PAGE_MASK))) || !multiple) {
62 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
63 host->force_pio = true;
67 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
69 /* The only sg element can be unaligned, use our bounce buffer then */
71 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
72 host->sg_ptr = &host->bounce_sg;
76 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
78 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
79 DMA_FROM_DEVICE, DMA_CTRL_ACK);
82 cookie = dmaengine_submit(desc);
88 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
89 __func__, host->sg_len, ret, cookie, host->mrq);
93 /* DMA failed, fall back to PIO */
97 dma_release_channel(chan);
98 /* Free the Tx channel too */
101 host->chan_tx = NULL;
102 dma_release_channel(chan);
104 dev_warn(&host->pdev->dev,
105 "DMA failed: %d, falling back to PIO\n", ret);
106 tmio_mmc_enable_dma(host, false);
109 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
110 desc, cookie, host->sg_len);
113 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
115 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
116 struct dma_async_tx_descriptor *desc = NULL;
117 struct dma_chan *chan = host->chan_tx;
118 struct tmio_mmc_data *pdata = host->pdata;
121 bool aligned = true, multiple = true;
122 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
124 for_each_sg(sg, sg_tmp, host->sg_len, i) {
125 if (sg_tmp->offset & align)
127 if (sg_tmp->length & align) {
133 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
134 (align & PAGE_MASK))) || !multiple) {
139 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
140 host->force_pio = true;
144 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
146 /* The only sg element can be unaligned, use our bounce buffer then */
149 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
150 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
151 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
152 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
153 host->sg_ptr = &host->bounce_sg;
157 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
159 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
160 DMA_TO_DEVICE, DMA_CTRL_ACK);
163 cookie = dmaengine_submit(desc);
169 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
170 __func__, host->sg_len, ret, cookie, host->mrq);
174 /* DMA failed, fall back to PIO */
177 host->chan_tx = NULL;
178 dma_release_channel(chan);
179 /* Free the Rx channel too */
180 chan = host->chan_rx;
182 host->chan_rx = NULL;
183 dma_release_channel(chan);
185 dev_warn(&host->pdev->dev,
186 "DMA failed: %d, falling back to PIO\n", ret);
187 tmio_mmc_enable_dma(host, false);
190 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
194 void tmio_mmc_start_dma(struct tmio_mmc_host *host,
195 struct mmc_data *data)
197 if (data->flags & MMC_DATA_READ) {
199 tmio_mmc_start_dma_rx(host);
202 tmio_mmc_start_dma_tx(host);
206 static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
208 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
209 struct dma_chan *chan = NULL;
211 spin_lock_irq(&host->lock);
213 if (host && host->data) {
214 if (host->data->flags & MMC_DATA_READ)
215 chan = host->chan_rx;
217 chan = host->chan_tx;
220 spin_unlock_irq(&host->lock);
222 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
225 dma_async_issue_pending(chan);
228 static void tmio_mmc_tasklet_fn(unsigned long arg)
230 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
232 spin_lock_irq(&host->lock);
237 if (host->data->flags & MMC_DATA_READ)
238 dma_unmap_sg(host->chan_rx->device->dev,
239 host->sg_ptr, host->sg_len,
242 dma_unmap_sg(host->chan_tx->device->dev,
243 host->sg_ptr, host->sg_len,
246 tmio_mmc_do_data_irq(host);
248 spin_unlock_irq(&host->lock);
251 /* It might be necessary to make filter MFD specific */
252 static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
254 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
259 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
261 /* We can only either use DMA for both Tx and Rx or not use it at all */
265 if (!host->chan_tx && !host->chan_rx) {
269 dma_cap_set(DMA_SLAVE, mask);
271 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
272 pdata->dma->chan_priv_tx);
273 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
279 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
280 pdata->dma->chan_priv_rx);
281 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
287 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
288 if (!host->bounce_buf)
291 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
292 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
295 tmio_mmc_enable_dma(host, true);
300 dma_release_channel(host->chan_rx);
301 host->chan_rx = NULL;
303 dma_release_channel(host->chan_tx);
304 host->chan_tx = NULL;
307 void tmio_mmc_release_dma(struct tmio_mmc_host *host)
310 struct dma_chan *chan = host->chan_tx;
311 host->chan_tx = NULL;
312 dma_release_channel(chan);
315 struct dma_chan *chan = host->chan_rx;
316 host->chan_rx = NULL;
317 dma_release_channel(chan);
319 if (host->bounce_buf) {
320 free_pages((unsigned long)host->bounce_buf, 0);
321 host->bounce_buf = NULL;