Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[pandora-kernel.git] / drivers / mmc / host / tmio_mmc_dma.c
1 /*
2  * linux/drivers/mmc/tmio_mmc_dma.c
3  *
4  * Copyright (C) 2010-2011 Guennadi Liakhovetski
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * DMA function for TMIO MMC implementations
11  */
12
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/mfd/tmio.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/tmio.h>
18 #include <linux/pagemap.h>
19 #include <linux/scatterlist.h>
20
21 #include "tmio_mmc.h"
22
23 #define TMIO_MMC_MIN_DMA_LEN 8
24
25 static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
26 {
27 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
28         /* Switch DMA mode on or off - SuperH specific? */
29         writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
30 #endif
31 }
32
33 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
34 {
35         struct scatterlist *sg = host->sg_ptr, *sg_tmp;
36         struct dma_async_tx_descriptor *desc = NULL;
37         struct dma_chan *chan = host->chan_rx;
38         struct tmio_mmc_data *pdata = host->pdata;
39         dma_cookie_t cookie;
40         int ret, i;
41         bool aligned = true, multiple = true;
42         unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
43
44         for_each_sg(sg, sg_tmp, host->sg_len, i) {
45                 if (sg_tmp->offset & align)
46                         aligned = false;
47                 if (sg_tmp->length & align) {
48                         multiple = false;
49                         break;
50                 }
51         }
52
53         if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
54                           (align & PAGE_MASK))) || !multiple) {
55                 ret = -EINVAL;
56                 goto pio;
57         }
58
59         if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
60                 host->force_pio = true;
61                 return;
62         }
63
64         tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
65
66         /* The only sg element can be unaligned, use our bounce buffer then */
67         if (!aligned) {
68                 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
69                 host->sg_ptr = &host->bounce_sg;
70                 sg = host->sg_ptr;
71         }
72
73         ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
74         if (ret > 0)
75                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
76                         DMA_FROM_DEVICE, DMA_CTRL_ACK);
77
78         if (desc) {
79                 cookie = dmaengine_submit(desc);
80                 if (cookie < 0) {
81                         desc = NULL;
82                         ret = cookie;
83                 }
84         }
85         dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
86                 __func__, host->sg_len, ret, cookie, host->mrq);
87
88 pio:
89         if (!desc) {
90                 /* DMA failed, fall back to PIO */
91                 if (ret >= 0)
92                         ret = -EIO;
93                 host->chan_rx = NULL;
94                 dma_release_channel(chan);
95                 /* Free the Tx channel too */
96                 chan = host->chan_tx;
97                 if (chan) {
98                         host->chan_tx = NULL;
99                         dma_release_channel(chan);
100                 }
101                 dev_warn(&host->pdev->dev,
102                          "DMA failed: %d, falling back to PIO\n", ret);
103                 tmio_mmc_enable_dma(host, false);
104         }
105
106         dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
107                 desc, cookie, host->sg_len);
108 }
109
110 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
111 {
112         struct scatterlist *sg = host->sg_ptr, *sg_tmp;
113         struct dma_async_tx_descriptor *desc = NULL;
114         struct dma_chan *chan = host->chan_tx;
115         struct tmio_mmc_data *pdata = host->pdata;
116         dma_cookie_t cookie;
117         int ret, i;
118         bool aligned = true, multiple = true;
119         unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
120
121         for_each_sg(sg, sg_tmp, host->sg_len, i) {
122                 if (sg_tmp->offset & align)
123                         aligned = false;
124                 if (sg_tmp->length & align) {
125                         multiple = false;
126                         break;
127                 }
128         }
129
130         if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
131                           (align & PAGE_MASK))) || !multiple) {
132                 ret = -EINVAL;
133                 goto pio;
134         }
135
136         if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
137                 host->force_pio = true;
138                 return;
139         }
140
141         tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
142
143         /* The only sg element can be unaligned, use our bounce buffer then */
144         if (!aligned) {
145                 unsigned long flags;
146                 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
147                 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
148                 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
149                 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
150                 host->sg_ptr = &host->bounce_sg;
151                 sg = host->sg_ptr;
152         }
153
154         ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
155         if (ret > 0)
156                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
157                         DMA_TO_DEVICE, DMA_CTRL_ACK);
158
159         if (desc) {
160                 cookie = dmaengine_submit(desc);
161                 if (cookie < 0) {
162                         desc = NULL;
163                         ret = cookie;
164                 }
165         }
166         dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
167                 __func__, host->sg_len, ret, cookie, host->mrq);
168
169 pio:
170         if (!desc) {
171                 /* DMA failed, fall back to PIO */
172                 if (ret >= 0)
173                         ret = -EIO;
174                 host->chan_tx = NULL;
175                 dma_release_channel(chan);
176                 /* Free the Rx channel too */
177                 chan = host->chan_rx;
178                 if (chan) {
179                         host->chan_rx = NULL;
180                         dma_release_channel(chan);
181                 }
182                 dev_warn(&host->pdev->dev,
183                          "DMA failed: %d, falling back to PIO\n", ret);
184                 tmio_mmc_enable_dma(host, false);
185         }
186
187         dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
188                 desc, cookie);
189 }
190
191 void tmio_mmc_start_dma(struct tmio_mmc_host *host,
192                                struct mmc_data *data)
193 {
194         if (data->flags & MMC_DATA_READ) {
195                 if (host->chan_rx)
196                         tmio_mmc_start_dma_rx(host);
197         } else {
198                 if (host->chan_tx)
199                         tmio_mmc_start_dma_tx(host);
200         }
201 }
202
203 static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
204 {
205         struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
206         struct dma_chan *chan = NULL;
207
208         spin_lock_irq(&host->lock);
209
210         if (host && host->data) {
211                 if (host->data->flags & MMC_DATA_READ)
212                         chan = host->chan_rx;
213                 else
214                         chan = host->chan_tx;
215         }
216
217         spin_unlock_irq(&host->lock);
218
219         tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
220
221         if (chan)
222                 dma_async_issue_pending(chan);
223 }
224
225 static void tmio_mmc_tasklet_fn(unsigned long arg)
226 {
227         struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
228
229         spin_lock_irq(&host->lock);
230
231         if (!host->data)
232                 goto out;
233
234         if (host->data->flags & MMC_DATA_READ)
235                 dma_unmap_sg(host->chan_rx->device->dev,
236                              host->sg_ptr, host->sg_len,
237                              DMA_FROM_DEVICE);
238         else
239                 dma_unmap_sg(host->chan_tx->device->dev,
240                              host->sg_ptr, host->sg_len,
241                              DMA_TO_DEVICE);
242
243         tmio_mmc_do_data_irq(host);
244 out:
245         spin_unlock_irq(&host->lock);
246 }
247
248 /* It might be necessary to make filter MFD specific */
249 static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
250 {
251         dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
252         chan->private = arg;
253         return true;
254 }
255
256 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
257 {
258         /* We can only either use DMA for both Tx and Rx or not use it at all */
259         if (!pdata->dma)
260                 return;
261
262         if (!host->chan_tx && !host->chan_rx) {
263                 dma_cap_mask_t mask;
264
265                 dma_cap_zero(mask);
266                 dma_cap_set(DMA_SLAVE, mask);
267
268                 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
269                                                     pdata->dma->chan_priv_tx);
270                 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
271                         host->chan_tx);
272
273                 if (!host->chan_tx)
274                         return;
275
276                 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
277                                                     pdata->dma->chan_priv_rx);
278                 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
279                         host->chan_rx);
280
281                 if (!host->chan_rx)
282                         goto ereqrx;
283
284                 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
285                 if (!host->bounce_buf)
286                         goto ebouncebuf;
287
288                 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
289                 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
290         }
291
292         tmio_mmc_enable_dma(host, true);
293
294         return;
295
296 ebouncebuf:
297         dma_release_channel(host->chan_rx);
298         host->chan_rx = NULL;
299 ereqrx:
300         dma_release_channel(host->chan_tx);
301         host->chan_tx = NULL;
302 }
303
304 void tmio_mmc_release_dma(struct tmio_mmc_host *host)
305 {
306         if (host->chan_tx) {
307                 struct dma_chan *chan = host->chan_tx;
308                 host->chan_tx = NULL;
309                 dma_release_channel(chan);
310         }
311         if (host->chan_rx) {
312                 struct dma_chan *chan = host->chan_rx;
313                 host->chan_rx = NULL;
314                 dma_release_channel(chan);
315         }
316         if (host->bounce_buf) {
317                 free_pages((unsigned long)host->bounce_buf, 0);
318                 host->bounce_buf = NULL;
319         }
320 }