d7309e44c0df301b134079f7d9f6d664df904964
[pandora-kernel.git] / drivers / dma / imx-dma.c
1 /*
2  * drivers/dma/imx-dma.c
3  *
4  * This file contains a driver for the Freescale i.MX DMA engine
5  * found on i.MX1/21/27
6  *
7  * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8  * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9  *
10  * The code contained herein is licensed under the GNU General Public
11  * License. You may obtain a copy of the GNU General Public License
12  * Version 2 or later at the following locations:
13  *
14  * http://www.opensource.org/licenses/gpl-license.html
15  * http://www.gnu.org/copyleft/gpl.html
16  */
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
30
31 #include <asm/irq.h>
32 #include <mach/dma.h>
33 #include <mach/hardware.h>
34
35 #include "dmaengine.h"
36 #define IMXDMA_MAX_CHAN_DESCRIPTORS     16
37 #define IMX_DMA_CHANNELS  16
38
39 #define IMX_DMA_LENGTH_LOOP     ((unsigned int)-1)
40 #define IMX_DMA_MEMSIZE_32      (0 << 4)
41 #define IMX_DMA_MEMSIZE_8       (1 << 4)
42 #define IMX_DMA_MEMSIZE_16      (2 << 4)
43 #define IMX_DMA_TYPE_LINEAR     (0 << 10)
44 #define IMX_DMA_TYPE_2D         (1 << 10)
45 #define IMX_DMA_TYPE_FIFO       (2 << 10)
46
47 #define IMX_DMA_ERR_BURST     (1 << 0)
48 #define IMX_DMA_ERR_REQUEST   (1 << 1)
49 #define IMX_DMA_ERR_TRANSFER  (1 << 2)
50 #define IMX_DMA_ERR_BUFFER    (1 << 3)
51 #define IMX_DMA_ERR_TIMEOUT   (1 << 4)
52
53 #define DMA_DCR     0x00                /* Control Register */
54 #define DMA_DISR    0x04                /* Interrupt status Register */
55 #define DMA_DIMR    0x08                /* Interrupt mask Register */
56 #define DMA_DBTOSR  0x0c                /* Burst timeout status Register */
57 #define DMA_DRTOSR  0x10                /* Request timeout Register */
58 #define DMA_DSESR   0x14                /* Transfer Error Status Register */
59 #define DMA_DBOSR   0x18                /* Buffer overflow status Register */
60 #define DMA_DBTOCR  0x1c                /* Burst timeout control Register */
61 #define DMA_WSRA    0x40                /* W-Size Register A */
62 #define DMA_XSRA    0x44                /* X-Size Register A */
63 #define DMA_YSRA    0x48                /* Y-Size Register A */
64 #define DMA_WSRB    0x4c                /* W-Size Register B */
65 #define DMA_XSRB    0x50                /* X-Size Register B */
66 #define DMA_YSRB    0x54                /* Y-Size Register B */
67 #define DMA_SAR(x)  (0x80 + ((x) << 6)) /* Source Address Registers */
68 #define DMA_DAR(x)  (0x84 + ((x) << 6)) /* Destination Address Registers */
69 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
70 #define DMA_CCR(x)  (0x8c + ((x) << 6)) /* Control Registers */
71 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
72 #define DMA_BLR(x)  (0x94 + ((x) << 6)) /* Burst length Registers */
73 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
74 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
75 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
76
77 #define DCR_DRST           (1<<1)
78 #define DCR_DEN            (1<<0)
79 #define DBTOCR_EN          (1<<15)
80 #define DBTOCR_CNT(x)      ((x) & 0x7fff)
81 #define CNTR_CNT(x)        ((x) & 0xffffff)
82 #define CCR_ACRPT          (1<<14)
83 #define CCR_DMOD_LINEAR    (0x0 << 12)
84 #define CCR_DMOD_2D        (0x1 << 12)
85 #define CCR_DMOD_FIFO      (0x2 << 12)
86 #define CCR_DMOD_EOBFIFO   (0x3 << 12)
87 #define CCR_SMOD_LINEAR    (0x0 << 10)
88 #define CCR_SMOD_2D        (0x1 << 10)
89 #define CCR_SMOD_FIFO      (0x2 << 10)
90 #define CCR_SMOD_EOBFIFO   (0x3 << 10)
91 #define CCR_MDIR_DEC       (1<<9)
92 #define CCR_MSEL_B         (1<<8)
93 #define CCR_DSIZ_32        (0x0 << 6)
94 #define CCR_DSIZ_8         (0x1 << 6)
95 #define CCR_DSIZ_16        (0x2 << 6)
96 #define CCR_SSIZ_32        (0x0 << 4)
97 #define CCR_SSIZ_8         (0x1 << 4)
98 #define CCR_SSIZ_16        (0x2 << 4)
99 #define CCR_REN            (1<<3)
100 #define CCR_RPT            (1<<2)
101 #define CCR_FRC            (1<<1)
102 #define CCR_CEN            (1<<0)
103 #define RTOR_EN            (1<<15)
104 #define RTOR_CLK           (1<<14)
105 #define RTOR_PSC           (1<<13)
106
107 enum  imxdma_prep_type {
108         IMXDMA_DESC_MEMCPY,
109         IMXDMA_DESC_INTERLEAVED,
110         IMXDMA_DESC_SLAVE_SG,
111         IMXDMA_DESC_CYCLIC,
112 };
113
114 /*
115  * struct imxdma_channel_internal - i.MX specific DMA extension
116  * @name: name specified by DMA client
117  * @irq_handler: client callback for end of transfer
118  * @err_handler: client callback for error condition
119  * @data: clients context data for callbacks
120  * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
121  * @sg: pointer to the actual read/written chunk for scatter-gather emulation
122  * @resbytes: total residual number of bytes to transfer
123  *            (it can be lower or same as sum of SG mapped chunk sizes)
124  * @sgcount: number of chunks to be read/written
125  *
126  * Structure is used for IMX DMA processing. It would be probably good
127  * @struct dma_struct in the future for external interfacing and use
128  * @struct imxdma_channel_internal only as extension to it.
129  */
130
131 struct imxdma_channel_internal {
132         unsigned int resbytes;
133
134         struct timer_list watchdog;
135
136         int hw_chaining;
137 };
138
139 struct imxdma_desc {
140         struct list_head                node;
141         struct dma_async_tx_descriptor  desc;
142         enum dma_status                 status;
143         dma_addr_t                      src;
144         dma_addr_t                      dest;
145         size_t                          len;
146         enum dma_transfer_direction     direction;
147         enum imxdma_prep_type           type;
148         /* For memcpy and interleaved */
149         unsigned int                    config_port;
150         unsigned int                    config_mem;
151         /* For interleaved transfers */
152         unsigned int                    x;
153         unsigned int                    y;
154         unsigned int                    w;
155         /* For slave sg and cyclic */
156         struct scatterlist              *sg;
157         unsigned int                    sgcount;
158 };
159
160 struct imxdma_channel {
161         struct imxdma_channel_internal  internal;
162         struct imxdma_engine            *imxdma;
163         unsigned int                    channel;
164
165         struct tasklet_struct           dma_tasklet;
166         struct list_head                ld_free;
167         struct list_head                ld_queue;
168         struct list_head                ld_active;
169         int                             descs_allocated;
170         enum dma_slave_buswidth         word_size;
171         dma_addr_t                      per_address;
172         u32                             watermark_level;
173         struct dma_chan                 chan;
174         spinlock_t                      lock;
175         struct dma_async_tx_descriptor  desc;
176         enum dma_status                 status;
177         int                             dma_request;
178         struct scatterlist              *sg_list;
179         u32                             ccr_from_device;
180         u32                             ccr_to_device;
181 };
182
183 struct imxdma_engine {
184         struct device                   *dev;
185         struct device_dma_parameters    dma_parms;
186         struct dma_device               dma_device;
187         struct imxdma_channel           channel[IMX_DMA_CHANNELS];
188 };
189
190 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
191 {
192         return container_of(chan, struct imxdma_channel, chan);
193 }
194
195 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
196 {
197         struct imxdma_desc *desc;
198
199         if (!list_empty(&imxdmac->ld_active)) {
200                 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
201                                         node);
202                 if (desc->type == IMXDMA_DESC_CYCLIC)
203                         return true;
204         }
205         return false;
206 }
207
208 /* TODO: put this inside any struct */
209 static void __iomem *imx_dmav1_baseaddr;
210 static struct clk *dma_clk;
211
212 static void imx_dmav1_writel(unsigned val, unsigned offset)
213 {
214         __raw_writel(val, imx_dmav1_baseaddr + offset);
215 }
216
217 static unsigned imx_dmav1_readl(unsigned offset)
218 {
219         return __raw_readl(imx_dmav1_baseaddr + offset);
220 }
221
222 static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma)
223 {
224         if (cpu_is_mx27())
225                 return imxdma->hw_chaining;
226         else
227                 return 0;
228 }
229
230 /*
231  * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
232  */
233 static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg)
234 {
235         struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
236         struct imxdma_channel_internal *imxdma = &imxdmac->internal;
237         unsigned long now;
238
239         now = min(imxdma->resbytes, sg->length);
240         if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
241                 imxdma->resbytes -= now;
242
243         if (d->direction == DMA_DEV_TO_MEM)
244                 imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel));
245         else
246                 imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel));
247
248         imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel));
249
250         pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
251                 "size 0x%08x\n", imxdmac->channel,
252                  imx_dmav1_readl(DMA_DAR(imxdmac->channel)),
253                  imx_dmav1_readl(DMA_SAR(imxdmac->channel)),
254                  imx_dmav1_readl(DMA_CNTR(imxdmac->channel)));
255
256         return now;
257 }
258
259 static void imxdma_enable_hw(struct imxdma_desc *d)
260 {
261         struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
262         int channel = imxdmac->channel;
263         unsigned long flags;
264
265         pr_debug("imxdma%d: imx_dma_enable\n", channel);
266
267         local_irq_save(flags);
268
269         imx_dmav1_writel(1 << channel, DMA_DISR);
270         imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
271         imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
272                 CCR_ACRPT, DMA_CCR(channel));
273
274         if ((cpu_is_mx21() || cpu_is_mx27()) &&
275                         d->sg && imxdma_hw_chain(&imxdmac->internal)) {
276                 d->sg = sg_next(d->sg);
277                 if (d->sg) {
278                         u32 tmp;
279                         imxdma_sg_next(d, d->sg);
280                         tmp = imx_dmav1_readl(DMA_CCR(channel));
281                         imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
282                                 DMA_CCR(channel));
283                 }
284         }
285
286         local_irq_restore(flags);
287 }
288
289 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
290 {
291         int channel = imxdmac->channel;
292         unsigned long flags;
293
294         pr_debug("imxdma%d: imx_dma_disable\n", channel);
295
296         if (imxdma_hw_chain(&imxdmac->internal))
297                 del_timer(&imxdmac->internal.watchdog);
298
299         local_irq_save(flags);
300         imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
301         imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
302                         DMA_CCR(channel));
303         imx_dmav1_writel(1 << channel, DMA_DISR);
304         local_irq_restore(flags);
305 }
306
307 static void imxdma_watchdog(unsigned long data)
308 {
309         struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
310         int channel = imxdmac->channel;
311
312         imx_dmav1_writel(0, DMA_CCR(channel));
313
314         /* Tasklet watchdog error handler */
315         tasklet_schedule(&imxdmac->dma_tasklet);
316         pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel);
317 }
318
319 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
320 {
321         struct imxdma_engine *imxdma = dev_id;
322         struct imxdma_channel_internal *internal;
323         unsigned int err_mask;
324         int i, disr;
325         int errcode;
326
327         disr = imx_dmav1_readl(DMA_DISR);
328
329         err_mask = imx_dmav1_readl(DMA_DBTOSR) |
330                    imx_dmav1_readl(DMA_DRTOSR) |
331                    imx_dmav1_readl(DMA_DSESR)  |
332                    imx_dmav1_readl(DMA_DBOSR);
333
334         if (!err_mask)
335                 return IRQ_HANDLED;
336
337         imx_dmav1_writel(disr & err_mask, DMA_DISR);
338
339         for (i = 0; i < IMX_DMA_CHANNELS; i++) {
340                 if (!(err_mask & (1 << i)))
341                         continue;
342                 internal = &imxdma->channel[i].internal;
343                 errcode = 0;
344
345                 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
346                         imx_dmav1_writel(1 << i, DMA_DBTOSR);
347                         errcode |= IMX_DMA_ERR_BURST;
348                 }
349                 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
350                         imx_dmav1_writel(1 << i, DMA_DRTOSR);
351                         errcode |= IMX_DMA_ERR_REQUEST;
352                 }
353                 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
354                         imx_dmav1_writel(1 << i, DMA_DSESR);
355                         errcode |= IMX_DMA_ERR_TRANSFER;
356                 }
357                 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
358                         imx_dmav1_writel(1 << i, DMA_DBOSR);
359                         errcode |= IMX_DMA_ERR_BUFFER;
360                 }
361                 /* Tasklet error handler */
362                 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
363
364                 printk(KERN_WARNING
365                        "DMA timeout on channel %d -%s%s%s%s\n", i,
366                        errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
367                        errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
368                        errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
369                        errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
370         }
371         return IRQ_HANDLED;
372 }
373
374 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
375 {
376         struct imxdma_channel_internal *imxdma = &imxdmac->internal;
377         int chno = imxdmac->channel;
378         struct imxdma_desc *desc;
379
380         spin_lock(&imxdmac->lock);
381         if (list_empty(&imxdmac->ld_active)) {
382                 spin_unlock(&imxdmac->lock);
383                 goto out;
384         }
385
386         desc = list_first_entry(&imxdmac->ld_active,
387                                 struct imxdma_desc,
388                                 node);
389         spin_unlock(&imxdmac->lock);
390
391         if (desc->sg) {
392                 u32 tmp;
393                 desc->sg = sg_next(desc->sg);
394
395                 if (desc->sg) {
396                         imxdma_sg_next(desc, desc->sg);
397
398                         tmp = imx_dmav1_readl(DMA_CCR(chno));
399
400                         if (imxdma_hw_chain(imxdma)) {
401                                 /* FIXME: The timeout should probably be
402                                  * configurable
403                                  */
404                                 mod_timer(&imxdma->watchdog,
405                                         jiffies + msecs_to_jiffies(500));
406
407                                 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
408                                 imx_dmav1_writel(tmp, DMA_CCR(chno));
409                         } else {
410                                 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
411                                 tmp |= CCR_CEN;
412                         }
413
414                         imx_dmav1_writel(tmp, DMA_CCR(chno));
415
416                         if (imxdma_chan_is_doing_cyclic(imxdmac))
417                                 /* Tasklet progression */
418                                 tasklet_schedule(&imxdmac->dma_tasklet);
419
420                         return;
421                 }
422
423                 if (imxdma_hw_chain(imxdma)) {
424                         del_timer(&imxdma->watchdog);
425                         return;
426                 }
427         }
428
429 out:
430         imx_dmav1_writel(0, DMA_CCR(chno));
431         /* Tasklet irq */
432         tasklet_schedule(&imxdmac->dma_tasklet);
433 }
434
435 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
436 {
437         struct imxdma_engine *imxdma = dev_id;
438         struct imxdma_channel_internal *internal;
439         int i, disr;
440
441         if (cpu_is_mx21() || cpu_is_mx27())
442                 imxdma_err_handler(irq, dev_id);
443
444         disr = imx_dmav1_readl(DMA_DISR);
445
446         pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
447                      disr);
448
449         imx_dmav1_writel(disr, DMA_DISR);
450         for (i = 0; i < IMX_DMA_CHANNELS; i++) {
451                 if (disr & (1 << i)) {
452                         internal = &imxdma->channel[i].internal;
453                         dma_irq_handle_channel(&imxdma->channel[i]);
454                 }
455         }
456
457         return IRQ_HANDLED;
458 }
459
460 static int imxdma_xfer_desc(struct imxdma_desc *d)
461 {
462         struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
463         struct imxdma_engine *imxdma = imxdmac->imxdma;
464
465         /* Configure and enable */
466         switch (d->type) {
467         case IMXDMA_DESC_MEMCPY:
468                 imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel));
469                 imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel));
470                 imx_dmav1_writel(d->config_mem | (d->config_port << 2),
471                          DMA_CCR(imxdmac->channel));
472
473                 imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel));
474
475                 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
476                         "dma_length=%d\n", __func__, imxdmac->channel,
477                         d->dest, d->src, d->len);
478
479                 break;
480         /* Cyclic transfer is the same as slave_sg with special sg configuration. */
481         case IMXDMA_DESC_CYCLIC:
482         case IMXDMA_DESC_SLAVE_SG:
483                 imxdmac->internal.resbytes = d->len;
484
485                 if (d->direction == DMA_DEV_TO_MEM) {
486                         imx_dmav1_writel(imxdmac->per_address,
487                                          DMA_SAR(imxdmac->channel));
488                         imx_dmav1_writel(imxdmac->ccr_from_device,
489                                          DMA_CCR(imxdmac->channel));
490
491                         dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
492                                 "total length=%d dev_addr=0x%08x (dev2mem)\n",
493                                 __func__, imxdmac->channel, d->sg, d->sgcount,
494                                 d->len, imxdmac->per_address);
495                 } else if (d->direction == DMA_MEM_TO_DEV) {
496                         imx_dmav1_writel(imxdmac->per_address,
497                                          DMA_DAR(imxdmac->channel));
498                         imx_dmav1_writel(imxdmac->ccr_to_device,
499                                          DMA_CCR(imxdmac->channel));
500
501                         dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
502                                 "total length=%d dev_addr=0x%08x (mem2dev)\n",
503                                 __func__, imxdmac->channel, d->sg, d->sgcount,
504                                 d->len, imxdmac->per_address);
505                 } else {
506                         dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
507                                 __func__, imxdmac->channel);
508                         return -EINVAL;
509                 }
510
511                 imxdma_sg_next(d, d->sg);
512
513                 break;
514         default:
515                 return -EINVAL;
516         }
517         imxdma_enable_hw(d);
518         return 0;
519 }
520
521 static void imxdma_tasklet(unsigned long data)
522 {
523         struct imxdma_channel *imxdmac = (void *)data;
524         struct imxdma_engine *imxdma = imxdmac->imxdma;
525         struct imxdma_desc *desc;
526
527         spin_lock(&imxdmac->lock);
528
529         if (list_empty(&imxdmac->ld_active)) {
530                 /* Someone might have called terminate all */
531                 goto out;
532         }
533         desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
534
535         if (desc->desc.callback)
536                 desc->desc.callback(desc->desc.callback_param);
537
538         dma_cookie_complete(&desc->desc);
539
540         /* If we are dealing with a cyclic descriptor keep it on ld_active */
541         if (imxdma_chan_is_doing_cyclic(imxdmac))
542                 goto out;
543
544         list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
545
546         if (!list_empty(&imxdmac->ld_queue)) {
547                 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
548                                         node);
549                 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
550                 if (imxdma_xfer_desc(desc) < 0)
551                         dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
552                                  __func__, imxdmac->channel);
553         }
554 out:
555         spin_unlock(&imxdmac->lock);
556 }
557
558 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
559                 unsigned long arg)
560 {
561         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
562         struct dma_slave_config *dmaengine_cfg = (void *)arg;
563         unsigned long flags;
564         unsigned int mode = 0;
565
566         switch (cmd) {
567         case DMA_TERMINATE_ALL:
568                 imxdma_disable_hw(imxdmac);
569
570                 spin_lock_irqsave(&imxdmac->lock, flags);
571                 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
572                 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
573                 spin_unlock_irqrestore(&imxdmac->lock, flags);
574                 return 0;
575         case DMA_SLAVE_CONFIG:
576                 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
577                         imxdmac->per_address = dmaengine_cfg->src_addr;
578                         imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
579                         imxdmac->word_size = dmaengine_cfg->src_addr_width;
580                 } else {
581                         imxdmac->per_address = dmaengine_cfg->dst_addr;
582                         imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
583                         imxdmac->word_size = dmaengine_cfg->dst_addr_width;
584                 }
585
586                 switch (imxdmac->word_size) {
587                 case DMA_SLAVE_BUSWIDTH_1_BYTE:
588                         mode = IMX_DMA_MEMSIZE_8;
589                         break;
590                 case DMA_SLAVE_BUSWIDTH_2_BYTES:
591                         mode = IMX_DMA_MEMSIZE_16;
592                         break;
593                 default:
594                 case DMA_SLAVE_BUSWIDTH_4_BYTES:
595                         mode = IMX_DMA_MEMSIZE_32;
596                         break;
597                 }
598
599                 imxdmac->internal.hw_chaining = 1;
600                 if (!imxdma_hw_chain(&imxdmac->internal))
601                         return -EINVAL;
602                 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
603                         ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
604                         CCR_REN;
605                 imxdmac->ccr_to_device =
606                         (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
607                         ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
608                 imx_dmav1_writel(imxdmac->dma_request,
609                                  DMA_RSSR(imxdmac->channel));
610
611                 /* Set burst length */
612                 imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size,
613                                  DMA_BLR(imxdmac->channel));
614
615                 return 0;
616         default:
617                 return -ENOSYS;
618         }
619
620         return -EINVAL;
621 }
622
623 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
624                                             dma_cookie_t cookie,
625                                             struct dma_tx_state *txstate)
626 {
627         return dma_cookie_status(chan, cookie, txstate);
628 }
629
630 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
631 {
632         struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
633         dma_cookie_t cookie;
634         unsigned long flags;
635
636         spin_lock_irqsave(&imxdmac->lock, flags);
637         cookie = dma_cookie_assign(tx);
638         spin_unlock_irqrestore(&imxdmac->lock, flags);
639
640         return cookie;
641 }
642
643 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
644 {
645         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
646         struct imx_dma_data *data = chan->private;
647
648         if (data != NULL)
649                 imxdmac->dma_request = data->dma_request;
650
651         while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
652                 struct imxdma_desc *desc;
653
654                 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
655                 if (!desc)
656                         break;
657                 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
658                 dma_async_tx_descriptor_init(&desc->desc, chan);
659                 desc->desc.tx_submit = imxdma_tx_submit;
660                 /* txd.flags will be overwritten in prep funcs */
661                 desc->desc.flags = DMA_CTRL_ACK;
662                 desc->status = DMA_SUCCESS;
663
664                 list_add_tail(&desc->node, &imxdmac->ld_free);
665                 imxdmac->descs_allocated++;
666         }
667
668         if (!imxdmac->descs_allocated)
669                 return -ENOMEM;
670
671         return imxdmac->descs_allocated;
672 }
673
674 static void imxdma_free_chan_resources(struct dma_chan *chan)
675 {
676         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
677         struct imxdma_desc *desc, *_desc;
678         unsigned long flags;
679
680         spin_lock_irqsave(&imxdmac->lock, flags);
681
682         imxdma_disable_hw(imxdmac);
683         list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
684         list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
685
686         spin_unlock_irqrestore(&imxdmac->lock, flags);
687
688         list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
689                 kfree(desc);
690                 imxdmac->descs_allocated--;
691         }
692         INIT_LIST_HEAD(&imxdmac->ld_free);
693
694         if (imxdmac->sg_list) {
695                 kfree(imxdmac->sg_list);
696                 imxdmac->sg_list = NULL;
697         }
698 }
699
700 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
701                 struct dma_chan *chan, struct scatterlist *sgl,
702                 unsigned int sg_len, enum dma_transfer_direction direction,
703                 unsigned long flags, void *context)
704 {
705         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
706         struct scatterlist *sg;
707         int i, dma_length = 0;
708         struct imxdma_desc *desc;
709
710         if (list_empty(&imxdmac->ld_free) ||
711             imxdma_chan_is_doing_cyclic(imxdmac))
712                 return NULL;
713
714         desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
715
716         for_each_sg(sgl, sg, sg_len, i) {
717                 dma_length += sg->length;
718         }
719
720         switch (imxdmac->word_size) {
721         case DMA_SLAVE_BUSWIDTH_4_BYTES:
722                 if (sgl->length & 3 || sgl->dma_address & 3)
723                         return NULL;
724                 break;
725         case DMA_SLAVE_BUSWIDTH_2_BYTES:
726                 if (sgl->length & 1 || sgl->dma_address & 1)
727                         return NULL;
728                 break;
729         case DMA_SLAVE_BUSWIDTH_1_BYTE:
730                 break;
731         default:
732                 return NULL;
733         }
734
735         desc->type = IMXDMA_DESC_SLAVE_SG;
736         desc->sg = sgl;
737         desc->sgcount = sg_len;
738         desc->len = dma_length;
739         desc->direction = direction;
740         if (direction == DMA_DEV_TO_MEM) {
741                 desc->src = imxdmac->per_address;
742         } else {
743                 desc->dest = imxdmac->per_address;
744         }
745         desc->desc.callback = NULL;
746         desc->desc.callback_param = NULL;
747
748         return &desc->desc;
749 }
750
751 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
752                 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
753                 size_t period_len, enum dma_transfer_direction direction,
754                 void *context)
755 {
756         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
757         struct imxdma_engine *imxdma = imxdmac->imxdma;
758         struct imxdma_desc *desc;
759         int i;
760         unsigned int periods = buf_len / period_len;
761
762         dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
763                         __func__, imxdmac->channel, buf_len, period_len);
764
765         if (list_empty(&imxdmac->ld_free) ||
766             imxdma_chan_is_doing_cyclic(imxdmac))
767                 return NULL;
768
769         desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
770
771         if (imxdmac->sg_list)
772                 kfree(imxdmac->sg_list);
773
774         imxdmac->sg_list = kcalloc(periods + 1,
775                         sizeof(struct scatterlist), GFP_KERNEL);
776         if (!imxdmac->sg_list)
777                 return NULL;
778
779         sg_init_table(imxdmac->sg_list, periods);
780
781         for (i = 0; i < periods; i++) {
782                 imxdmac->sg_list[i].page_link = 0;
783                 imxdmac->sg_list[i].offset = 0;
784                 imxdmac->sg_list[i].dma_address = dma_addr;
785                 imxdmac->sg_list[i].length = period_len;
786                 dma_addr += period_len;
787         }
788
789         /* close the loop */
790         imxdmac->sg_list[periods].offset = 0;
791         imxdmac->sg_list[periods].length = 0;
792         imxdmac->sg_list[periods].page_link =
793                 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
794
795         desc->type = IMXDMA_DESC_CYCLIC;
796         desc->sg = imxdmac->sg_list;
797         desc->sgcount = periods;
798         desc->len = IMX_DMA_LENGTH_LOOP;
799         desc->direction = direction;
800         if (direction == DMA_DEV_TO_MEM) {
801                 desc->src = imxdmac->per_address;
802         } else {
803                 desc->dest = imxdmac->per_address;
804         }
805         desc->desc.callback = NULL;
806         desc->desc.callback_param = NULL;
807
808         return &desc->desc;
809 }
810
811 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
812         struct dma_chan *chan, dma_addr_t dest,
813         dma_addr_t src, size_t len, unsigned long flags)
814 {
815         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
816         struct imxdma_engine *imxdma = imxdmac->imxdma;
817         struct imxdma_desc *desc;
818
819         dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
820                         __func__, imxdmac->channel, src, dest, len);
821
822         if (list_empty(&imxdmac->ld_free) ||
823             imxdma_chan_is_doing_cyclic(imxdmac))
824                 return NULL;
825
826         desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
827
828         desc->type = IMXDMA_DESC_MEMCPY;
829         desc->src = src;
830         desc->dest = dest;
831         desc->len = len;
832         desc->direction = DMA_MEM_TO_MEM;
833         desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
834         desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
835         desc->desc.callback = NULL;
836         desc->desc.callback_param = NULL;
837
838         return &desc->desc;
839 }
840
841 static void imxdma_issue_pending(struct dma_chan *chan)
842 {
843         struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
844         struct imxdma_engine *imxdma = imxdmac->imxdma;
845         struct imxdma_desc *desc;
846         unsigned long flags;
847
848         spin_lock_irqsave(&imxdmac->lock, flags);
849         if (list_empty(&imxdmac->ld_active) &&
850             !list_empty(&imxdmac->ld_queue)) {
851                 desc = list_first_entry(&imxdmac->ld_queue,
852                                         struct imxdma_desc, node);
853
854                 if (imxdma_xfer_desc(desc) < 0) {
855                         dev_warn(imxdma->dev,
856                                  "%s: channel: %d couldn't issue DMA xfer\n",
857                                  __func__, imxdmac->channel);
858                 } else {
859                         list_move_tail(imxdmac->ld_queue.next,
860                                        &imxdmac->ld_active);
861                 }
862         }
863         spin_unlock_irqrestore(&imxdmac->lock, flags);
864 }
865
866 static int __init imxdma_probe(struct platform_device *pdev)
867         {
868         struct imxdma_engine *imxdma;
869         int ret, i;
870
871         if (cpu_is_mx1())
872                 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
873         else if (cpu_is_mx21())
874                 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
875         else if (cpu_is_mx27())
876                 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
877         else
878                 return 0;
879
880         dma_clk = clk_get(NULL, "dma");
881         if (IS_ERR(dma_clk))
882                 return PTR_ERR(dma_clk);
883         clk_enable(dma_clk);
884
885         /* reset DMA module */
886         imx_dmav1_writel(DCR_DRST, DMA_DCR);
887
888         if (cpu_is_mx1()) {
889                 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
890                 if (ret) {
891                         pr_crit("Can't register IRQ for DMA\n");
892                         return ret;
893                 }
894
895                 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
896                 if (ret) {
897                         pr_crit("Can't register ERRIRQ for DMA\n");
898                         free_irq(MX1_DMA_INT, NULL);
899                         return ret;
900                 }
901         }
902
903         /* enable DMA module */
904         imx_dmav1_writel(DCR_DEN, DMA_DCR);
905
906         /* clear all interrupts */
907         imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
908
909         /* disable interrupts */
910         imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
911
912         imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
913         if (!imxdma)
914                 return -ENOMEM;
915
916         INIT_LIST_HEAD(&imxdma->dma_device.channels);
917
918         dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
919         dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
920         dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
921
922         /* Initialize channel parameters */
923         for (i = 0; i < IMX_DMA_CHANNELS; i++) {
924                 struct imxdma_channel *imxdmac = &imxdma->channel[i];
925                 memset(&imxdmac->internal, 0, sizeof(imxdmac->internal));
926                 if (cpu_is_mx21() || cpu_is_mx27()) {
927                         ret = request_irq(MX2x_INT_DMACH0 + i,
928                                         dma_irq_handler, 0, "DMA", imxdma);
929                         if (ret) {
930                                 pr_crit("Can't register IRQ %d for DMA channel %d\n",
931                                                 MX2x_INT_DMACH0 + i, i);
932                                 goto err_init;
933                         }
934                         init_timer(&imxdmac->internal.watchdog);
935                         imxdmac->internal.watchdog.function = &imxdma_watchdog;
936                         imxdmac->internal.watchdog.data = (unsigned long)imxdmac;
937                 }
938
939                 imxdmac->imxdma = imxdma;
940                 spin_lock_init(&imxdmac->lock);
941
942                 INIT_LIST_HEAD(&imxdmac->ld_queue);
943                 INIT_LIST_HEAD(&imxdmac->ld_free);
944                 INIT_LIST_HEAD(&imxdmac->ld_active);
945
946                 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
947                              (unsigned long)imxdmac);
948                 imxdmac->chan.device = &imxdma->dma_device;
949                 dma_cookie_init(&imxdmac->chan);
950                 imxdmac->channel = i;
951
952                 /* Add the channel to the DMAC list */
953                 list_add_tail(&imxdmac->chan.device_node,
954                               &imxdma->dma_device.channels);
955         }
956
957         imxdma->dev = &pdev->dev;
958         imxdma->dma_device.dev = &pdev->dev;
959
960         imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
961         imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
962         imxdma->dma_device.device_tx_status = imxdma_tx_status;
963         imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
964         imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
965         imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
966         imxdma->dma_device.device_control = imxdma_control;
967         imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
968
969         platform_set_drvdata(pdev, imxdma);
970
971         imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
972         imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
973         dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
974
975         ret = dma_async_device_register(&imxdma->dma_device);
976         if (ret) {
977                 dev_err(&pdev->dev, "unable to register\n");
978                 goto err_init;
979         }
980
981         return 0;
982
983 err_init:
984
985         if (cpu_is_mx21() || cpu_is_mx27()) {
986                 while (--i >= 0)
987                         free_irq(MX2x_INT_DMACH0 + i, NULL);
988         } else if cpu_is_mx1() {
989                 free_irq(MX1_DMA_INT, NULL);
990                 free_irq(MX1_DMA_ERR, NULL);
991         }
992
993         kfree(imxdma);
994         return ret;
995 }
996
997 static int __exit imxdma_remove(struct platform_device *pdev)
998 {
999         struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1000         int i;
1001
1002         dma_async_device_unregister(&imxdma->dma_device);
1003
1004         if (cpu_is_mx21() || cpu_is_mx27()) {
1005                 for (i = 0; i < IMX_DMA_CHANNELS; i++)
1006                         free_irq(MX2x_INT_DMACH0 + i, NULL);
1007         } else if cpu_is_mx1() {
1008                 free_irq(MX1_DMA_INT, NULL);
1009                 free_irq(MX1_DMA_ERR, NULL);
1010         }
1011
1012         kfree(imxdma);
1013
1014         return 0;
1015 }
1016
1017 static struct platform_driver imxdma_driver = {
1018         .driver         = {
1019                 .name   = "imx-dma",
1020         },
1021         .remove         = __exit_p(imxdma_remove),
1022 };
1023
1024 static int __init imxdma_module_init(void)
1025 {
1026         return platform_driver_probe(&imxdma_driver, imxdma_probe);
1027 }
1028 subsys_initcall(imxdma_module_init);
1029
1030 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1031 MODULE_DESCRIPTION("i.MX dma driver");
1032 MODULE_LICENSE("GPL");