2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
33 #include <mach/hardware.h>
35 #include "dmaengine.h"
36 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37 #define IMX_DMA_CHANNELS 16
39 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
40 #define IMX_DMA_MEMSIZE_32 (0 << 4)
41 #define IMX_DMA_MEMSIZE_8 (1 << 4)
42 #define IMX_DMA_MEMSIZE_16 (2 << 4)
43 #define IMX_DMA_TYPE_LINEAR (0 << 10)
44 #define IMX_DMA_TYPE_2D (1 << 10)
45 #define IMX_DMA_TYPE_FIFO (2 << 10)
47 #define IMX_DMA_ERR_BURST (1 << 0)
48 #define IMX_DMA_ERR_REQUEST (1 << 1)
49 #define IMX_DMA_ERR_TRANSFER (1 << 2)
50 #define IMX_DMA_ERR_BUFFER (1 << 3)
51 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
53 #define DMA_DCR 0x00 /* Control Register */
54 #define DMA_DISR 0x04 /* Interrupt status Register */
55 #define DMA_DIMR 0x08 /* Interrupt mask Register */
56 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
57 #define DMA_DRTOSR 0x10 /* Request timeout Register */
58 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
59 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
60 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
61 #define DMA_WSRA 0x40 /* W-Size Register A */
62 #define DMA_XSRA 0x44 /* X-Size Register A */
63 #define DMA_YSRA 0x48 /* Y-Size Register A */
64 #define DMA_WSRB 0x4c /* W-Size Register B */
65 #define DMA_XSRB 0x50 /* X-Size Register B */
66 #define DMA_YSRB 0x54 /* Y-Size Register B */
67 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
68 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
69 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
70 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
71 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
72 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
73 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
74 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
75 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
77 #define DCR_DRST (1<<1)
78 #define DCR_DEN (1<<0)
79 #define DBTOCR_EN (1<<15)
80 #define DBTOCR_CNT(x) ((x) & 0x7fff)
81 #define CNTR_CNT(x) ((x) & 0xffffff)
82 #define CCR_ACRPT (1<<14)
83 #define CCR_DMOD_LINEAR (0x0 << 12)
84 #define CCR_DMOD_2D (0x1 << 12)
85 #define CCR_DMOD_FIFO (0x2 << 12)
86 #define CCR_DMOD_EOBFIFO (0x3 << 12)
87 #define CCR_SMOD_LINEAR (0x0 << 10)
88 #define CCR_SMOD_2D (0x1 << 10)
89 #define CCR_SMOD_FIFO (0x2 << 10)
90 #define CCR_SMOD_EOBFIFO (0x3 << 10)
91 #define CCR_MDIR_DEC (1<<9)
92 #define CCR_MSEL_B (1<<8)
93 #define CCR_DSIZ_32 (0x0 << 6)
94 #define CCR_DSIZ_8 (0x1 << 6)
95 #define CCR_DSIZ_16 (0x2 << 6)
96 #define CCR_SSIZ_32 (0x0 << 4)
97 #define CCR_SSIZ_8 (0x1 << 4)
98 #define CCR_SSIZ_16 (0x2 << 4)
99 #define CCR_REN (1<<3)
100 #define CCR_RPT (1<<2)
101 #define CCR_FRC (1<<1)
102 #define CCR_CEN (1<<0)
103 #define RTOR_EN (1<<15)
104 #define RTOR_CLK (1<<14)
105 #define RTOR_PSC (1<<13)
107 enum imxdma_prep_type {
109 IMXDMA_DESC_INTERLEAVED,
110 IMXDMA_DESC_SLAVE_SG,
115 * struct imxdma_channel_internal - i.MX specific DMA extension
116 * @name: name specified by DMA client
117 * @irq_handler: client callback for end of transfer
118 * @err_handler: client callback for error condition
119 * @data: clients context data for callbacks
120 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
121 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
122 * @resbytes: total residual number of bytes to transfer
123 * (it can be lower or same as sum of SG mapped chunk sizes)
124 * @sgcount: number of chunks to be read/written
126 * Structure is used for IMX DMA processing. It would be probably good
127 * @struct dma_struct in the future for external interfacing and use
128 * @struct imxdma_channel_internal only as extension to it.
131 struct imxdma_channel_internal {
132 unsigned int resbytes;
134 struct timer_list watchdog;
140 struct list_head node;
141 struct dma_async_tx_descriptor desc;
142 enum dma_status status;
146 enum dma_transfer_direction direction;
147 enum imxdma_prep_type type;
148 /* For memcpy and interleaved */
149 unsigned int config_port;
150 unsigned int config_mem;
151 /* For interleaved transfers */
155 /* For slave sg and cyclic */
156 struct scatterlist *sg;
157 unsigned int sgcount;
160 struct imxdma_channel {
161 struct imxdma_channel_internal internal;
162 struct imxdma_engine *imxdma;
163 unsigned int channel;
165 struct tasklet_struct dma_tasklet;
166 struct list_head ld_free;
167 struct list_head ld_queue;
168 struct list_head ld_active;
170 enum dma_slave_buswidth word_size;
171 dma_addr_t per_address;
173 struct dma_chan chan;
175 struct dma_async_tx_descriptor desc;
176 enum dma_status status;
178 struct scatterlist *sg_list;
183 struct imxdma_engine {
185 struct device_dma_parameters dma_parms;
186 struct dma_device dma_device;
187 struct imxdma_channel channel[IMX_DMA_CHANNELS];
190 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
192 return container_of(chan, struct imxdma_channel, chan);
195 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
197 struct imxdma_desc *desc;
199 if (!list_empty(&imxdmac->ld_active)) {
200 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
202 if (desc->type == IMXDMA_DESC_CYCLIC)
208 /* TODO: put this inside any struct */
209 static void __iomem *imx_dmav1_baseaddr;
210 static struct clk *dma_clk;
212 static void imx_dmav1_writel(unsigned val, unsigned offset)
214 __raw_writel(val, imx_dmav1_baseaddr + offset);
217 static unsigned imx_dmav1_readl(unsigned offset)
219 return __raw_readl(imx_dmav1_baseaddr + offset);
222 static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma)
225 return imxdma->hw_chaining;
231 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
233 static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg)
235 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
236 struct imxdma_channel_internal *imxdma = &imxdmac->internal;
239 now = min(imxdma->resbytes, sg->length);
240 if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
241 imxdma->resbytes -= now;
243 if (d->direction == DMA_DEV_TO_MEM)
244 imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel));
246 imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel));
248 imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel));
250 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
251 "size 0x%08x\n", imxdmac->channel,
252 imx_dmav1_readl(DMA_DAR(imxdmac->channel)),
253 imx_dmav1_readl(DMA_SAR(imxdmac->channel)),
254 imx_dmav1_readl(DMA_CNTR(imxdmac->channel)));
259 static void imxdma_enable_hw(struct imxdma_desc *d)
261 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
262 int channel = imxdmac->channel;
265 pr_debug("imxdma%d: imx_dma_enable\n", channel);
267 local_irq_save(flags);
269 imx_dmav1_writel(1 << channel, DMA_DISR);
270 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
271 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
272 CCR_ACRPT, DMA_CCR(channel));
274 if ((cpu_is_mx21() || cpu_is_mx27()) &&
275 d->sg && imxdma_hw_chain(&imxdmac->internal)) {
276 d->sg = sg_next(d->sg);
279 imxdma_sg_next(d, d->sg);
280 tmp = imx_dmav1_readl(DMA_CCR(channel));
281 imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
286 local_irq_restore(flags);
289 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
291 int channel = imxdmac->channel;
294 pr_debug("imxdma%d: imx_dma_disable\n", channel);
296 if (imxdma_hw_chain(&imxdmac->internal))
297 del_timer(&imxdmac->internal.watchdog);
299 local_irq_save(flags);
300 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
301 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
303 imx_dmav1_writel(1 << channel, DMA_DISR);
304 local_irq_restore(flags);
307 static void imxdma_watchdog(unsigned long data)
309 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
310 int channel = imxdmac->channel;
312 imx_dmav1_writel(0, DMA_CCR(channel));
314 /* Tasklet watchdog error handler */
315 tasklet_schedule(&imxdmac->dma_tasklet);
316 pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel);
319 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
321 struct imxdma_engine *imxdma = dev_id;
322 struct imxdma_channel_internal *internal;
323 unsigned int err_mask;
327 disr = imx_dmav1_readl(DMA_DISR);
329 err_mask = imx_dmav1_readl(DMA_DBTOSR) |
330 imx_dmav1_readl(DMA_DRTOSR) |
331 imx_dmav1_readl(DMA_DSESR) |
332 imx_dmav1_readl(DMA_DBOSR);
337 imx_dmav1_writel(disr & err_mask, DMA_DISR);
339 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
340 if (!(err_mask & (1 << i)))
342 internal = &imxdma->channel[i].internal;
345 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
346 imx_dmav1_writel(1 << i, DMA_DBTOSR);
347 errcode |= IMX_DMA_ERR_BURST;
349 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
350 imx_dmav1_writel(1 << i, DMA_DRTOSR);
351 errcode |= IMX_DMA_ERR_REQUEST;
353 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
354 imx_dmav1_writel(1 << i, DMA_DSESR);
355 errcode |= IMX_DMA_ERR_TRANSFER;
357 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
358 imx_dmav1_writel(1 << i, DMA_DBOSR);
359 errcode |= IMX_DMA_ERR_BUFFER;
361 /* Tasklet error handler */
362 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
365 "DMA timeout on channel %d -%s%s%s%s\n", i,
366 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
367 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
368 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
369 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
374 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
376 struct imxdma_channel_internal *imxdma = &imxdmac->internal;
377 int chno = imxdmac->channel;
378 struct imxdma_desc *desc;
380 spin_lock(&imxdmac->lock);
381 if (list_empty(&imxdmac->ld_active)) {
382 spin_unlock(&imxdmac->lock);
386 desc = list_first_entry(&imxdmac->ld_active,
389 spin_unlock(&imxdmac->lock);
393 desc->sg = sg_next(desc->sg);
396 imxdma_sg_next(desc, desc->sg);
398 tmp = imx_dmav1_readl(DMA_CCR(chno));
400 if (imxdma_hw_chain(imxdma)) {
401 /* FIXME: The timeout should probably be
404 mod_timer(&imxdma->watchdog,
405 jiffies + msecs_to_jiffies(500));
407 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
408 imx_dmav1_writel(tmp, DMA_CCR(chno));
410 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
414 imx_dmav1_writel(tmp, DMA_CCR(chno));
416 if (imxdma_chan_is_doing_cyclic(imxdmac))
417 /* Tasklet progression */
418 tasklet_schedule(&imxdmac->dma_tasklet);
423 if (imxdma_hw_chain(imxdma)) {
424 del_timer(&imxdma->watchdog);
430 imx_dmav1_writel(0, DMA_CCR(chno));
432 tasklet_schedule(&imxdmac->dma_tasklet);
435 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
437 struct imxdma_engine *imxdma = dev_id;
438 struct imxdma_channel_internal *internal;
441 if (cpu_is_mx21() || cpu_is_mx27())
442 imxdma_err_handler(irq, dev_id);
444 disr = imx_dmav1_readl(DMA_DISR);
446 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
449 imx_dmav1_writel(disr, DMA_DISR);
450 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
451 if (disr & (1 << i)) {
452 internal = &imxdma->channel[i].internal;
453 dma_irq_handle_channel(&imxdma->channel[i]);
460 static int imxdma_xfer_desc(struct imxdma_desc *d)
462 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
463 struct imxdma_engine *imxdma = imxdmac->imxdma;
465 /* Configure and enable */
467 case IMXDMA_DESC_MEMCPY:
468 imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel));
469 imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel));
470 imx_dmav1_writel(d->config_mem | (d->config_port << 2),
471 DMA_CCR(imxdmac->channel));
473 imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel));
475 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
476 "dma_length=%d\n", __func__, imxdmac->channel,
477 d->dest, d->src, d->len);
480 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
481 case IMXDMA_DESC_CYCLIC:
482 case IMXDMA_DESC_SLAVE_SG:
483 imxdmac->internal.resbytes = d->len;
485 if (d->direction == DMA_DEV_TO_MEM) {
486 imx_dmav1_writel(imxdmac->per_address,
487 DMA_SAR(imxdmac->channel));
488 imx_dmav1_writel(imxdmac->ccr_from_device,
489 DMA_CCR(imxdmac->channel));
491 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
492 "total length=%d dev_addr=0x%08x (dev2mem)\n",
493 __func__, imxdmac->channel, d->sg, d->sgcount,
494 d->len, imxdmac->per_address);
495 } else if (d->direction == DMA_MEM_TO_DEV) {
496 imx_dmav1_writel(imxdmac->per_address,
497 DMA_DAR(imxdmac->channel));
498 imx_dmav1_writel(imxdmac->ccr_to_device,
499 DMA_CCR(imxdmac->channel));
501 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
502 "total length=%d dev_addr=0x%08x (mem2dev)\n",
503 __func__, imxdmac->channel, d->sg, d->sgcount,
504 d->len, imxdmac->per_address);
506 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
507 __func__, imxdmac->channel);
511 imxdma_sg_next(d, d->sg);
521 static void imxdma_tasklet(unsigned long data)
523 struct imxdma_channel *imxdmac = (void *)data;
524 struct imxdma_engine *imxdma = imxdmac->imxdma;
525 struct imxdma_desc *desc;
527 spin_lock(&imxdmac->lock);
529 if (list_empty(&imxdmac->ld_active)) {
530 /* Someone might have called terminate all */
533 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
535 if (desc->desc.callback)
536 desc->desc.callback(desc->desc.callback_param);
538 dma_cookie_complete(&desc->desc);
540 /* If we are dealing with a cyclic descriptor keep it on ld_active */
541 if (imxdma_chan_is_doing_cyclic(imxdmac))
544 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
546 if (!list_empty(&imxdmac->ld_queue)) {
547 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
549 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
550 if (imxdma_xfer_desc(desc) < 0)
551 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
552 __func__, imxdmac->channel);
555 spin_unlock(&imxdmac->lock);
558 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
561 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
562 struct dma_slave_config *dmaengine_cfg = (void *)arg;
564 unsigned int mode = 0;
567 case DMA_TERMINATE_ALL:
568 imxdma_disable_hw(imxdmac);
570 spin_lock_irqsave(&imxdmac->lock, flags);
571 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
572 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
573 spin_unlock_irqrestore(&imxdmac->lock, flags);
575 case DMA_SLAVE_CONFIG:
576 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
577 imxdmac->per_address = dmaengine_cfg->src_addr;
578 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
579 imxdmac->word_size = dmaengine_cfg->src_addr_width;
581 imxdmac->per_address = dmaengine_cfg->dst_addr;
582 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
583 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
586 switch (imxdmac->word_size) {
587 case DMA_SLAVE_BUSWIDTH_1_BYTE:
588 mode = IMX_DMA_MEMSIZE_8;
590 case DMA_SLAVE_BUSWIDTH_2_BYTES:
591 mode = IMX_DMA_MEMSIZE_16;
594 case DMA_SLAVE_BUSWIDTH_4_BYTES:
595 mode = IMX_DMA_MEMSIZE_32;
599 imxdmac->internal.hw_chaining = 1;
600 if (!imxdma_hw_chain(&imxdmac->internal))
602 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
603 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
605 imxdmac->ccr_to_device =
606 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
607 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
608 imx_dmav1_writel(imxdmac->dma_request,
609 DMA_RSSR(imxdmac->channel));
611 /* Set burst length */
612 imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size,
613 DMA_BLR(imxdmac->channel));
623 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
625 struct dma_tx_state *txstate)
627 return dma_cookie_status(chan, cookie, txstate);
630 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
632 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
636 spin_lock_irqsave(&imxdmac->lock, flags);
637 cookie = dma_cookie_assign(tx);
638 spin_unlock_irqrestore(&imxdmac->lock, flags);
643 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
645 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
646 struct imx_dma_data *data = chan->private;
649 imxdmac->dma_request = data->dma_request;
651 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
652 struct imxdma_desc *desc;
654 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
657 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
658 dma_async_tx_descriptor_init(&desc->desc, chan);
659 desc->desc.tx_submit = imxdma_tx_submit;
660 /* txd.flags will be overwritten in prep funcs */
661 desc->desc.flags = DMA_CTRL_ACK;
662 desc->status = DMA_SUCCESS;
664 list_add_tail(&desc->node, &imxdmac->ld_free);
665 imxdmac->descs_allocated++;
668 if (!imxdmac->descs_allocated)
671 return imxdmac->descs_allocated;
674 static void imxdma_free_chan_resources(struct dma_chan *chan)
676 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
677 struct imxdma_desc *desc, *_desc;
680 spin_lock_irqsave(&imxdmac->lock, flags);
682 imxdma_disable_hw(imxdmac);
683 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
684 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
686 spin_unlock_irqrestore(&imxdmac->lock, flags);
688 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
690 imxdmac->descs_allocated--;
692 INIT_LIST_HEAD(&imxdmac->ld_free);
694 if (imxdmac->sg_list) {
695 kfree(imxdmac->sg_list);
696 imxdmac->sg_list = NULL;
700 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
701 struct dma_chan *chan, struct scatterlist *sgl,
702 unsigned int sg_len, enum dma_transfer_direction direction,
703 unsigned long flags, void *context)
705 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
706 struct scatterlist *sg;
707 int i, dma_length = 0;
708 struct imxdma_desc *desc;
710 if (list_empty(&imxdmac->ld_free) ||
711 imxdma_chan_is_doing_cyclic(imxdmac))
714 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
716 for_each_sg(sgl, sg, sg_len, i) {
717 dma_length += sg->length;
720 switch (imxdmac->word_size) {
721 case DMA_SLAVE_BUSWIDTH_4_BYTES:
722 if (sgl->length & 3 || sgl->dma_address & 3)
725 case DMA_SLAVE_BUSWIDTH_2_BYTES:
726 if (sgl->length & 1 || sgl->dma_address & 1)
729 case DMA_SLAVE_BUSWIDTH_1_BYTE:
735 desc->type = IMXDMA_DESC_SLAVE_SG;
737 desc->sgcount = sg_len;
738 desc->len = dma_length;
739 desc->direction = direction;
740 if (direction == DMA_DEV_TO_MEM) {
741 desc->src = imxdmac->per_address;
743 desc->dest = imxdmac->per_address;
745 desc->desc.callback = NULL;
746 desc->desc.callback_param = NULL;
751 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
752 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
753 size_t period_len, enum dma_transfer_direction direction,
756 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
757 struct imxdma_engine *imxdma = imxdmac->imxdma;
758 struct imxdma_desc *desc;
760 unsigned int periods = buf_len / period_len;
762 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
763 __func__, imxdmac->channel, buf_len, period_len);
765 if (list_empty(&imxdmac->ld_free) ||
766 imxdma_chan_is_doing_cyclic(imxdmac))
769 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
771 if (imxdmac->sg_list)
772 kfree(imxdmac->sg_list);
774 imxdmac->sg_list = kcalloc(periods + 1,
775 sizeof(struct scatterlist), GFP_KERNEL);
776 if (!imxdmac->sg_list)
779 sg_init_table(imxdmac->sg_list, periods);
781 for (i = 0; i < periods; i++) {
782 imxdmac->sg_list[i].page_link = 0;
783 imxdmac->sg_list[i].offset = 0;
784 imxdmac->sg_list[i].dma_address = dma_addr;
785 imxdmac->sg_list[i].length = period_len;
786 dma_addr += period_len;
790 imxdmac->sg_list[periods].offset = 0;
791 imxdmac->sg_list[periods].length = 0;
792 imxdmac->sg_list[periods].page_link =
793 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
795 desc->type = IMXDMA_DESC_CYCLIC;
796 desc->sg = imxdmac->sg_list;
797 desc->sgcount = periods;
798 desc->len = IMX_DMA_LENGTH_LOOP;
799 desc->direction = direction;
800 if (direction == DMA_DEV_TO_MEM) {
801 desc->src = imxdmac->per_address;
803 desc->dest = imxdmac->per_address;
805 desc->desc.callback = NULL;
806 desc->desc.callback_param = NULL;
811 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
812 struct dma_chan *chan, dma_addr_t dest,
813 dma_addr_t src, size_t len, unsigned long flags)
815 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
816 struct imxdma_engine *imxdma = imxdmac->imxdma;
817 struct imxdma_desc *desc;
819 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
820 __func__, imxdmac->channel, src, dest, len);
822 if (list_empty(&imxdmac->ld_free) ||
823 imxdma_chan_is_doing_cyclic(imxdmac))
826 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
828 desc->type = IMXDMA_DESC_MEMCPY;
832 desc->direction = DMA_MEM_TO_MEM;
833 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
834 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
835 desc->desc.callback = NULL;
836 desc->desc.callback_param = NULL;
841 static void imxdma_issue_pending(struct dma_chan *chan)
843 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
844 struct imxdma_engine *imxdma = imxdmac->imxdma;
845 struct imxdma_desc *desc;
848 spin_lock_irqsave(&imxdmac->lock, flags);
849 if (list_empty(&imxdmac->ld_active) &&
850 !list_empty(&imxdmac->ld_queue)) {
851 desc = list_first_entry(&imxdmac->ld_queue,
852 struct imxdma_desc, node);
854 if (imxdma_xfer_desc(desc) < 0) {
855 dev_warn(imxdma->dev,
856 "%s: channel: %d couldn't issue DMA xfer\n",
857 __func__, imxdmac->channel);
859 list_move_tail(imxdmac->ld_queue.next,
860 &imxdmac->ld_active);
863 spin_unlock_irqrestore(&imxdmac->lock, flags);
866 static int __init imxdma_probe(struct platform_device *pdev)
868 struct imxdma_engine *imxdma;
872 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
873 else if (cpu_is_mx21())
874 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
875 else if (cpu_is_mx27())
876 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
880 dma_clk = clk_get(NULL, "dma");
882 return PTR_ERR(dma_clk);
885 /* reset DMA module */
886 imx_dmav1_writel(DCR_DRST, DMA_DCR);
889 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
891 pr_crit("Can't register IRQ for DMA\n");
895 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
897 pr_crit("Can't register ERRIRQ for DMA\n");
898 free_irq(MX1_DMA_INT, NULL);
903 /* enable DMA module */
904 imx_dmav1_writel(DCR_DEN, DMA_DCR);
906 /* clear all interrupts */
907 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
909 /* disable interrupts */
910 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
912 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
916 INIT_LIST_HEAD(&imxdma->dma_device.channels);
918 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
919 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
920 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
922 /* Initialize channel parameters */
923 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
924 struct imxdma_channel *imxdmac = &imxdma->channel[i];
925 memset(&imxdmac->internal, 0, sizeof(imxdmac->internal));
926 if (cpu_is_mx21() || cpu_is_mx27()) {
927 ret = request_irq(MX2x_INT_DMACH0 + i,
928 dma_irq_handler, 0, "DMA", imxdma);
930 pr_crit("Can't register IRQ %d for DMA channel %d\n",
931 MX2x_INT_DMACH0 + i, i);
934 init_timer(&imxdmac->internal.watchdog);
935 imxdmac->internal.watchdog.function = &imxdma_watchdog;
936 imxdmac->internal.watchdog.data = (unsigned long)imxdmac;
939 imxdmac->imxdma = imxdma;
940 spin_lock_init(&imxdmac->lock);
942 INIT_LIST_HEAD(&imxdmac->ld_queue);
943 INIT_LIST_HEAD(&imxdmac->ld_free);
944 INIT_LIST_HEAD(&imxdmac->ld_active);
946 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
947 (unsigned long)imxdmac);
948 imxdmac->chan.device = &imxdma->dma_device;
949 dma_cookie_init(&imxdmac->chan);
950 imxdmac->channel = i;
952 /* Add the channel to the DMAC list */
953 list_add_tail(&imxdmac->chan.device_node,
954 &imxdma->dma_device.channels);
957 imxdma->dev = &pdev->dev;
958 imxdma->dma_device.dev = &pdev->dev;
960 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
961 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
962 imxdma->dma_device.device_tx_status = imxdma_tx_status;
963 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
964 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
965 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
966 imxdma->dma_device.device_control = imxdma_control;
967 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
969 platform_set_drvdata(pdev, imxdma);
971 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
972 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
973 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
975 ret = dma_async_device_register(&imxdma->dma_device);
977 dev_err(&pdev->dev, "unable to register\n");
985 if (cpu_is_mx21() || cpu_is_mx27()) {
987 free_irq(MX2x_INT_DMACH0 + i, NULL);
988 } else if cpu_is_mx1() {
989 free_irq(MX1_DMA_INT, NULL);
990 free_irq(MX1_DMA_ERR, NULL);
997 static int __exit imxdma_remove(struct platform_device *pdev)
999 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1002 dma_async_device_unregister(&imxdma->dma_device);
1004 if (cpu_is_mx21() || cpu_is_mx27()) {
1005 for (i = 0; i < IMX_DMA_CHANNELS; i++)
1006 free_irq(MX2x_INT_DMACH0 + i, NULL);
1007 } else if cpu_is_mx1() {
1008 free_irq(MX1_DMA_INT, NULL);
1009 free_irq(MX1_DMA_ERR, NULL);
1017 static struct platform_driver imxdma_driver = {
1021 .remove = __exit_p(imxdma_remove),
1024 static int __init imxdma_module_init(void)
1026 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1028 subsys_initcall(imxdma_module_init);
1030 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1031 MODULE_DESCRIPTION("i.MX dma driver");
1032 MODULE_LICENSE("GPL");