2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
23 #include "dw_dmac_regs.h"
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
35 #define DWC_DEFAULT_CTLLO(private) ({ \
36 struct dw_dma_slave *__slave = (private); \
37 int dms = __slave ? __slave->dst_master : 0; \
38 int sms = __slave ? __slave->src_master : 1; \
39 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
40 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
42 (DWC_CTLL_DST_MSIZE(dmsize) \
43 | DWC_CTLL_SRC_MSIZE(smsize) \
47 | DWC_CTLL_SMS(sms)); \
51 * This is configuration-dependent and usually a funny size like 4095.
53 * Note that this is a transfer count, i.e. if we transfer 32-bit
54 * words, we can do 16380 bytes per descriptor.
56 * This parameter is also system-specific.
58 #define DWC_MAX_COUNT 4095U
61 * Number of descriptors to allocate for each channel. This should be
62 * made configurable somehow; preferably, the clients (at least the
63 * ones using slave transfers) should be able to give us a hint.
65 #define NR_DESCS_PER_CHANNEL 64
67 /*----------------------------------------------------------------------*/
70 * Because we're not relying on writeback from the controller (it may not
71 * even be configured into the core!) we don't need to use dma_pool. These
72 * descriptors -- and associated data -- are cacheable. We do need to make
73 * sure their dcache entries are written back before handing them off to
74 * the controller, though.
77 static struct device *chan2dev(struct dma_chan *chan)
79 return &chan->dev->device;
81 static struct device *chan2parent(struct dma_chan *chan)
83 return chan->dev->device.parent;
86 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
88 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
91 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 struct dw_desc *desc, *_desc;
94 struct dw_desc *ret = NULL;
97 spin_lock_bh(&dwc->lock);
98 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
99 if (async_tx_test_ack(&desc->txd)) {
100 list_del(&desc->desc_node);
104 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
107 spin_unlock_bh(&dwc->lock);
109 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
114 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
116 struct dw_desc *child;
118 list_for_each_entry(child, &desc->tx_list, desc_node)
119 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
120 child->txd.phys, sizeof(child->lli),
122 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
123 desc->txd.phys, sizeof(desc->lli),
128 * Move a descriptor, including any children, to the free list.
129 * `desc' must not be on any lists.
131 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
134 struct dw_desc *child;
136 dwc_sync_desc_for_cpu(dwc, desc);
138 spin_lock_bh(&dwc->lock);
139 list_for_each_entry(child, &desc->tx_list, desc_node)
140 dev_vdbg(chan2dev(&dwc->chan),
141 "moving child desc %p to freelist\n",
143 list_splice_init(&desc->tx_list, &dwc->free_list);
144 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
145 list_add(&desc->desc_node, &dwc->free_list);
146 spin_unlock_bh(&dwc->lock);
150 /* Called with dwc->lock held and bh disabled */
152 dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
154 dma_cookie_t cookie = dwc->chan.cookie;
159 dwc->chan.cookie = cookie;
160 desc->txd.cookie = cookie;
165 /*----------------------------------------------------------------------*/
167 /* Called with dwc->lock held and bh disabled */
168 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
170 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
172 /* ASSERT: channel is idle */
173 if (dma_readl(dw, CH_EN) & dwc->mask) {
174 dev_err(chan2dev(&dwc->chan),
175 "BUG: Attempted to start non-idle channel\n");
176 dev_err(chan2dev(&dwc->chan),
177 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
178 channel_readl(dwc, SAR),
179 channel_readl(dwc, DAR),
180 channel_readl(dwc, LLP),
181 channel_readl(dwc, CTL_HI),
182 channel_readl(dwc, CTL_LO));
184 /* The tasklet will hopefully advance the queue... */
188 channel_writel(dwc, LLP, first->txd.phys);
189 channel_writel(dwc, CTL_LO,
190 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
191 channel_writel(dwc, CTL_HI, 0);
192 channel_set_bit(dw, CH_EN, dwc->mask);
195 /*----------------------------------------------------------------------*/
198 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
200 dma_async_tx_callback callback;
202 struct dma_async_tx_descriptor *txd = &desc->txd;
203 struct dw_desc *child;
205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
207 dwc->completed = txd->cookie;
208 callback = txd->callback;
209 param = txd->callback_param;
211 dwc_sync_desc_for_cpu(dwc, desc);
214 list_for_each_entry(child, &desc->tx_list, desc_node)
215 async_tx_ack(&child->txd);
216 async_tx_ack(&desc->txd);
218 list_splice_init(&desc->tx_list, &dwc->free_list);
219 list_move(&desc->desc_node, &dwc->free_list);
221 if (!dwc->chan.private) {
222 struct device *parent = chan2parent(&dwc->chan);
223 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
224 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
225 dma_unmap_single(parent, desc->lli.dar,
226 desc->len, DMA_FROM_DEVICE);
228 dma_unmap_page(parent, desc->lli.dar,
229 desc->len, DMA_FROM_DEVICE);
231 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
232 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
233 dma_unmap_single(parent, desc->lli.sar,
234 desc->len, DMA_TO_DEVICE);
236 dma_unmap_page(parent, desc->lli.sar,
237 desc->len, DMA_TO_DEVICE);
242 * The API requires that no submissions are done from a
243 * callback, so we don't need to drop the lock here
249 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
251 struct dw_desc *desc, *_desc;
254 if (dma_readl(dw, CH_EN) & dwc->mask) {
255 dev_err(chan2dev(&dwc->chan),
256 "BUG: XFER bit set, but channel not idle!\n");
258 /* Try to continue after resetting the channel... */
259 channel_clear_bit(dw, CH_EN, dwc->mask);
260 while (dma_readl(dw, CH_EN) & dwc->mask)
265 * Submit queued descriptors ASAP, i.e. before we go through
266 * the completed ones.
268 list_splice_init(&dwc->active_list, &list);
269 if (!list_empty(&dwc->queue)) {
270 list_move(dwc->queue.next, &dwc->active_list);
271 dwc_dostart(dwc, dwc_first_active(dwc));
274 list_for_each_entry_safe(desc, _desc, &list, desc_node)
275 dwc_descriptor_complete(dwc, desc);
278 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
281 struct dw_desc *desc, *_desc;
282 struct dw_desc *child;
286 * Clear block interrupt flag before scanning so that we don't
287 * miss any, and read LLP before RAW_XFER to ensure it is
288 * valid if we decide to scan the list.
290 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
291 llp = channel_readl(dwc, LLP);
292 status_xfer = dma_readl(dw, RAW.XFER);
294 if (status_xfer & dwc->mask) {
295 /* Everything we've submitted is done */
296 dma_writel(dw, CLEAR.XFER, dwc->mask);
297 dwc_complete_all(dw, dwc);
301 if (list_empty(&dwc->active_list))
304 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
306 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
307 /* check first descriptors addr */
308 if (desc->txd.phys == llp)
311 /* check first descriptors llp */
312 if (desc->lli.llp == llp)
313 /* This one is currently in progress */
316 list_for_each_entry(child, &desc->tx_list, desc_node)
317 if (child->lli.llp == llp)
318 /* Currently in progress */
322 * No descriptors so far seem to be in progress, i.e.
323 * this one must be done.
325 dwc_descriptor_complete(dwc, desc);
328 dev_err(chan2dev(&dwc->chan),
329 "BUG: All descriptors done, but channel not idle!\n");
331 /* Try to continue after resetting the channel... */
332 channel_clear_bit(dw, CH_EN, dwc->mask);
333 while (dma_readl(dw, CH_EN) & dwc->mask)
336 if (!list_empty(&dwc->queue)) {
337 list_move(dwc->queue.next, &dwc->active_list);
338 dwc_dostart(dwc, dwc_first_active(dwc));
342 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
344 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
345 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
346 lli->sar, lli->dar, lli->llp,
347 lli->ctlhi, lli->ctllo);
350 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
352 struct dw_desc *bad_desc;
353 struct dw_desc *child;
355 dwc_scan_descriptors(dw, dwc);
358 * The descriptor currently at the head of the active list is
359 * borked. Since we don't have any way to report errors, we'll
360 * just have to scream loudly and try to carry on.
362 bad_desc = dwc_first_active(dwc);
363 list_del_init(&bad_desc->desc_node);
364 list_move(dwc->queue.next, dwc->active_list.prev);
366 /* Clear the error flag and try to restart the controller */
367 dma_writel(dw, CLEAR.ERROR, dwc->mask);
368 if (!list_empty(&dwc->active_list))
369 dwc_dostart(dwc, dwc_first_active(dwc));
372 * KERN_CRITICAL may seem harsh, but since this only happens
373 * when someone submits a bad physical address in a
374 * descriptor, we should consider ourselves lucky that the
375 * controller flagged an error instead of scribbling over
376 * random memory locations.
378 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
379 "Bad descriptor submitted for DMA!\n");
380 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
381 " cookie: %d\n", bad_desc->txd.cookie);
382 dwc_dump_lli(dwc, &bad_desc->lli);
383 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
384 dwc_dump_lli(dwc, &child->lli);
386 /* Pretend the descriptor completed successfully */
387 dwc_descriptor_complete(dwc, bad_desc);
390 /* --------------------- Cyclic DMA API extensions -------------------- */
392 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
394 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
395 return channel_readl(dwc, SAR);
397 EXPORT_SYMBOL(dw_dma_get_src_addr);
399 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
401 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
402 return channel_readl(dwc, DAR);
404 EXPORT_SYMBOL(dw_dma_get_dst_addr);
406 /* called with dwc->lock held and all DMAC interrupts disabled */
407 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
408 u32 status_block, u32 status_err, u32 status_xfer)
410 if (status_block & dwc->mask) {
411 void (*callback)(void *param);
412 void *callback_param;
414 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
415 channel_readl(dwc, LLP));
416 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
418 callback = dwc->cdesc->period_callback;
419 callback_param = dwc->cdesc->period_callback_param;
421 spin_unlock(&dwc->lock);
422 callback(callback_param);
423 spin_lock(&dwc->lock);
428 * Error and transfer complete are highly unlikely, and will most
429 * likely be due to a configuration error by the user.
431 if (unlikely(status_err & dwc->mask) ||
432 unlikely(status_xfer & dwc->mask)) {
435 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
436 "interrupt, stopping DMA transfer\n",
437 status_xfer ? "xfer" : "error");
438 dev_err(chan2dev(&dwc->chan),
439 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
440 channel_readl(dwc, SAR),
441 channel_readl(dwc, DAR),
442 channel_readl(dwc, LLP),
443 channel_readl(dwc, CTL_HI),
444 channel_readl(dwc, CTL_LO));
446 channel_clear_bit(dw, CH_EN, dwc->mask);
447 while (dma_readl(dw, CH_EN) & dwc->mask)
450 /* make sure DMA does not restart by loading a new list */
451 channel_writel(dwc, LLP, 0);
452 channel_writel(dwc, CTL_LO, 0);
453 channel_writel(dwc, CTL_HI, 0);
455 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
456 dma_writel(dw, CLEAR.ERROR, dwc->mask);
457 dma_writel(dw, CLEAR.XFER, dwc->mask);
459 for (i = 0; i < dwc->cdesc->periods; i++)
460 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
464 /* ------------------------------------------------------------------------- */
466 static void dw_dma_tasklet(unsigned long data)
468 struct dw_dma *dw = (struct dw_dma *)data;
469 struct dw_dma_chan *dwc;
475 status_block = dma_readl(dw, RAW.BLOCK);
476 status_xfer = dma_readl(dw, RAW.XFER);
477 status_err = dma_readl(dw, RAW.ERROR);
479 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
480 status_block, status_err);
482 for (i = 0; i < dw->dma.chancnt; i++) {
484 spin_lock(&dwc->lock);
485 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
486 dwc_handle_cyclic(dw, dwc, status_block, status_err,
488 else if (status_err & (1 << i))
489 dwc_handle_error(dw, dwc);
490 else if ((status_block | status_xfer) & (1 << i))
491 dwc_scan_descriptors(dw, dwc);
492 spin_unlock(&dwc->lock);
496 * Re-enable interrupts. Block Complete interrupts are only
497 * enabled if the INT_EN bit in the descriptor is set. This
498 * will trigger a scan before the whole list is done.
500 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
501 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
502 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
505 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
507 struct dw_dma *dw = dev_id;
510 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
511 dma_readl(dw, STATUS_INT));
514 * Just disable the interrupts. We'll turn them back on in the
517 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
518 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
519 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
521 status = dma_readl(dw, STATUS_INT);
524 "BUG: Unexpected interrupts pending: 0x%x\n",
528 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
529 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
530 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
531 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
532 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
535 tasklet_schedule(&dw->tasklet);
540 /*----------------------------------------------------------------------*/
542 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
544 struct dw_desc *desc = txd_to_dw_desc(tx);
545 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
548 spin_lock_bh(&dwc->lock);
549 cookie = dwc_assign_cookie(dwc, desc);
552 * REVISIT: We should attempt to chain as many descriptors as
553 * possible, perhaps even appending to those already submitted
554 * for DMA. But this is hard to do in a race-free manner.
556 if (list_empty(&dwc->active_list)) {
557 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
559 list_add_tail(&desc->desc_node, &dwc->active_list);
560 dwc_dostart(dwc, dwc_first_active(dwc));
562 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
565 list_add_tail(&desc->desc_node, &dwc->queue);
568 spin_unlock_bh(&dwc->lock);
573 static struct dma_async_tx_descriptor *
574 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
575 size_t len, unsigned long flags)
577 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
578 struct dw_desc *desc;
579 struct dw_desc *first;
580 struct dw_desc *prev;
583 unsigned int src_width;
584 unsigned int dst_width;
587 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
588 dest, src, len, flags);
590 if (unlikely(!len)) {
591 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
596 * We can be a lot more clever here, but this should take care
597 * of the most common optimization.
599 if (!((src | dest | len) & 7))
600 src_width = dst_width = 3;
601 else if (!((src | dest | len) & 3))
602 src_width = dst_width = 2;
603 else if (!((src | dest | len) & 1))
604 src_width = dst_width = 1;
606 src_width = dst_width = 0;
608 ctllo = DWC_DEFAULT_CTLLO(chan->private)
609 | DWC_CTLL_DST_WIDTH(dst_width)
610 | DWC_CTLL_SRC_WIDTH(src_width)
616 for (offset = 0; offset < len; offset += xfer_count << src_width) {
617 xfer_count = min_t(size_t, (len - offset) >> src_width,
620 desc = dwc_desc_get(dwc);
624 desc->lli.sar = src + offset;
625 desc->lli.dar = dest + offset;
626 desc->lli.ctllo = ctllo;
627 desc->lli.ctlhi = xfer_count;
632 prev->lli.llp = desc->txd.phys;
633 dma_sync_single_for_device(chan2parent(chan),
634 prev->txd.phys, sizeof(prev->lli),
636 list_add_tail(&desc->desc_node,
643 if (flags & DMA_PREP_INTERRUPT)
644 /* Trigger interrupt after last block */
645 prev->lli.ctllo |= DWC_CTLL_INT_EN;
648 dma_sync_single_for_device(chan2parent(chan),
649 prev->txd.phys, sizeof(prev->lli),
652 first->txd.flags = flags;
658 dwc_desc_put(dwc, first);
662 static struct dma_async_tx_descriptor *
663 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
664 unsigned int sg_len, enum dma_data_direction direction,
667 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
668 struct dw_dma_slave *dws = chan->private;
669 struct dw_desc *prev;
670 struct dw_desc *first;
673 unsigned int reg_width;
674 unsigned int mem_width;
676 struct scatterlist *sg;
677 size_t total_len = 0;
679 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
681 if (unlikely(!dws || !sg_len))
684 reg_width = dws->reg_width;
689 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
690 | DWC_CTLL_DST_WIDTH(reg_width)
693 | DWC_CTLL_FC(dws->fc));
695 for_each_sg(sgl, sg, sg_len, i) {
696 struct dw_desc *desc;
700 desc = dwc_desc_get(dwc);
702 dev_err(chan2dev(chan),
703 "not enough descriptors available\n");
708 len = sg_dma_len(sg);
710 if (unlikely(mem & 3 || len & 3))
715 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
716 desc->lli.ctlhi = len >> mem_width;
721 prev->lli.llp = desc->txd.phys;
722 dma_sync_single_for_device(chan2parent(chan),
726 list_add_tail(&desc->desc_node,
733 case DMA_FROM_DEVICE:
734 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
735 | DWC_CTLL_SRC_WIDTH(reg_width)
738 | DWC_CTLL_FC(dws->fc));
741 for_each_sg(sgl, sg, sg_len, i) {
742 struct dw_desc *desc;
746 desc = dwc_desc_get(dwc);
748 dev_err(chan2dev(chan),
749 "not enough descriptors available\n");
754 len = sg_dma_len(sg);
756 if (unlikely(mem & 3 || len & 3))
761 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
762 desc->lli.ctlhi = len >> reg_width;
767 prev->lli.llp = desc->txd.phys;
768 dma_sync_single_for_device(chan2parent(chan),
772 list_add_tail(&desc->desc_node,
783 if (flags & DMA_PREP_INTERRUPT)
784 /* Trigger interrupt after last block */
785 prev->lli.ctllo |= DWC_CTLL_INT_EN;
788 dma_sync_single_for_device(chan2parent(chan),
789 prev->txd.phys, sizeof(prev->lli),
792 first->len = total_len;
797 dwc_desc_put(dwc, first);
801 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
804 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
805 struct dw_dma *dw = to_dw_dma(chan->device);
806 struct dw_desc *desc, *_desc;
809 /* Only supports DMA_TERMINATE_ALL */
810 if (cmd != DMA_TERMINATE_ALL)
814 * This is only called when something went wrong elsewhere, so
815 * we don't really care about the data. Just disable the
816 * channel. We still have to poll the channel enable bit due
817 * to AHB/HSB limitations.
819 spin_lock_bh(&dwc->lock);
821 channel_clear_bit(dw, CH_EN, dwc->mask);
823 while (dma_readl(dw, CH_EN) & dwc->mask)
826 /* active_list entries will end up before queued entries */
827 list_splice_init(&dwc->queue, &list);
828 list_splice_init(&dwc->active_list, &list);
830 spin_unlock_bh(&dwc->lock);
832 /* Flush all pending and queued descriptors */
833 list_for_each_entry_safe(desc, _desc, &list, desc_node)
834 dwc_descriptor_complete(dwc, desc);
839 static enum dma_status
840 dwc_tx_status(struct dma_chan *chan,
842 struct dma_tx_state *txstate)
844 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
845 dma_cookie_t last_used;
846 dma_cookie_t last_complete;
849 last_complete = dwc->completed;
850 last_used = chan->cookie;
852 ret = dma_async_is_complete(cookie, last_complete, last_used);
853 if (ret != DMA_SUCCESS) {
854 spin_lock_bh(&dwc->lock);
855 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
856 spin_unlock_bh(&dwc->lock);
858 last_complete = dwc->completed;
859 last_used = chan->cookie;
861 ret = dma_async_is_complete(cookie, last_complete, last_used);
864 dma_set_tx_state(txstate, last_complete, last_used, 0);
869 static void dwc_issue_pending(struct dma_chan *chan)
871 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
873 spin_lock_bh(&dwc->lock);
874 if (!list_empty(&dwc->queue))
875 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
876 spin_unlock_bh(&dwc->lock);
879 static int dwc_alloc_chan_resources(struct dma_chan *chan)
881 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
882 struct dw_dma *dw = to_dw_dma(chan->device);
883 struct dw_desc *desc;
884 struct dw_dma_slave *dws;
889 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
891 /* ASSERT: channel is idle */
892 if (dma_readl(dw, CH_EN) & dwc->mask) {
893 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
897 dwc->completed = chan->cookie = 1;
899 cfghi = DWC_CFGH_FIFO_MODE;
905 * We need controller-specific data to set up slave
908 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
911 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
914 cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
916 channel_writel(dwc, CFG_LO, cfglo);
917 channel_writel(dwc, CFG_HI, cfghi);
920 * NOTE: some controllers may have additional features that we
921 * need to initialize here, like "scatter-gather" (which
922 * doesn't mean what you think it means), and status writeback.
925 spin_lock_bh(&dwc->lock);
926 i = dwc->descs_allocated;
927 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
928 spin_unlock_bh(&dwc->lock);
930 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
932 dev_info(chan2dev(chan),
933 "only allocated %d descriptors\n", i);
934 spin_lock_bh(&dwc->lock);
938 INIT_LIST_HEAD(&desc->tx_list);
939 dma_async_tx_descriptor_init(&desc->txd, chan);
940 desc->txd.tx_submit = dwc_tx_submit;
941 desc->txd.flags = DMA_CTRL_ACK;
942 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
943 sizeof(desc->lli), DMA_TO_DEVICE);
944 dwc_desc_put(dwc, desc);
946 spin_lock_bh(&dwc->lock);
947 i = ++dwc->descs_allocated;
950 /* Enable interrupts */
951 channel_set_bit(dw, MASK.XFER, dwc->mask);
952 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
953 channel_set_bit(dw, MASK.ERROR, dwc->mask);
955 spin_unlock_bh(&dwc->lock);
957 dev_dbg(chan2dev(chan),
958 "alloc_chan_resources allocated %d descriptors\n", i);
963 static void dwc_free_chan_resources(struct dma_chan *chan)
965 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
966 struct dw_dma *dw = to_dw_dma(chan->device);
967 struct dw_desc *desc, *_desc;
970 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
971 dwc->descs_allocated);
973 /* ASSERT: channel is idle */
974 BUG_ON(!list_empty(&dwc->active_list));
975 BUG_ON(!list_empty(&dwc->queue));
976 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
978 spin_lock_bh(&dwc->lock);
979 list_splice_init(&dwc->free_list, &list);
980 dwc->descs_allocated = 0;
982 /* Disable interrupts */
983 channel_clear_bit(dw, MASK.XFER, dwc->mask);
984 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
985 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
987 spin_unlock_bh(&dwc->lock);
989 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
990 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
991 dma_unmap_single(chan2parent(chan), desc->txd.phys,
992 sizeof(desc->lli), DMA_TO_DEVICE);
996 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
999 /* --------------------- Cyclic DMA API extensions -------------------- */
1002 * dw_dma_cyclic_start - start the cyclic DMA transfer
1003 * @chan: the DMA channel to start
1005 * Must be called with soft interrupts disabled. Returns zero on success or
1006 * -errno on failure.
1008 int dw_dma_cyclic_start(struct dma_chan *chan)
1010 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1011 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1013 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1014 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1018 spin_lock(&dwc->lock);
1020 /* assert channel is idle */
1021 if (dma_readl(dw, CH_EN) & dwc->mask) {
1022 dev_err(chan2dev(&dwc->chan),
1023 "BUG: Attempted to start non-idle channel\n");
1024 dev_err(chan2dev(&dwc->chan),
1025 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1026 channel_readl(dwc, SAR),
1027 channel_readl(dwc, DAR),
1028 channel_readl(dwc, LLP),
1029 channel_readl(dwc, CTL_HI),
1030 channel_readl(dwc, CTL_LO));
1031 spin_unlock(&dwc->lock);
1035 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1036 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1037 dma_writel(dw, CLEAR.XFER, dwc->mask);
1039 /* setup DMAC channel registers */
1040 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1041 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1042 channel_writel(dwc, CTL_HI, 0);
1044 channel_set_bit(dw, CH_EN, dwc->mask);
1046 spin_unlock(&dwc->lock);
1050 EXPORT_SYMBOL(dw_dma_cyclic_start);
1053 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1054 * @chan: the DMA channel to stop
1056 * Must be called with soft interrupts disabled.
1058 void dw_dma_cyclic_stop(struct dma_chan *chan)
1060 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1061 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1063 spin_lock(&dwc->lock);
1065 channel_clear_bit(dw, CH_EN, dwc->mask);
1066 while (dma_readl(dw, CH_EN) & dwc->mask)
1069 spin_unlock(&dwc->lock);
1071 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1074 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1075 * @chan: the DMA channel to prepare
1076 * @buf_addr: physical DMA address where the buffer starts
1077 * @buf_len: total number of bytes for the entire buffer
1078 * @period_len: number of bytes for each period
1079 * @direction: transfer direction, to or from device
1081 * Must be called before trying to start the transfer. Returns a valid struct
1082 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1084 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1085 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1086 enum dma_data_direction direction)
1088 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1089 struct dw_cyclic_desc *cdesc;
1090 struct dw_cyclic_desc *retval = NULL;
1091 struct dw_desc *desc;
1092 struct dw_desc *last = NULL;
1093 struct dw_dma_slave *dws = chan->private;
1094 unsigned long was_cyclic;
1095 unsigned int reg_width;
1096 unsigned int periods;
1099 spin_lock_bh(&dwc->lock);
1100 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1101 spin_unlock_bh(&dwc->lock);
1102 dev_dbg(chan2dev(&dwc->chan),
1103 "queue and/or active list are not empty\n");
1104 return ERR_PTR(-EBUSY);
1107 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1108 spin_unlock_bh(&dwc->lock);
1110 dev_dbg(chan2dev(&dwc->chan),
1111 "channel already prepared for cyclic DMA\n");
1112 return ERR_PTR(-EBUSY);
1115 retval = ERR_PTR(-EINVAL);
1116 reg_width = dws->reg_width;
1117 periods = buf_len / period_len;
1119 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1120 if (period_len > (DWC_MAX_COUNT << reg_width))
1122 if (unlikely(period_len & ((1 << reg_width) - 1)))
1124 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1126 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1129 retval = ERR_PTR(-ENOMEM);
1131 if (periods > NR_DESCS_PER_CHANNEL)
1134 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1138 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1142 for (i = 0; i < periods; i++) {
1143 desc = dwc_desc_get(dwc);
1145 goto out_err_desc_get;
1147 switch (direction) {
1149 desc->lli.dar = dws->tx_reg;
1150 desc->lli.sar = buf_addr + (period_len * i);
1151 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1152 | DWC_CTLL_DST_WIDTH(reg_width)
1153 | DWC_CTLL_SRC_WIDTH(reg_width)
1156 | DWC_CTLL_FC(dws->fc)
1159 case DMA_FROM_DEVICE:
1160 desc->lli.dar = buf_addr + (period_len * i);
1161 desc->lli.sar = dws->rx_reg;
1162 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1163 | DWC_CTLL_SRC_WIDTH(reg_width)
1164 | DWC_CTLL_DST_WIDTH(reg_width)
1167 | DWC_CTLL_FC(dws->fc)
1174 desc->lli.ctlhi = (period_len >> reg_width);
1175 cdesc->desc[i] = desc;
1178 last->lli.llp = desc->txd.phys;
1179 dma_sync_single_for_device(chan2parent(chan),
1180 last->txd.phys, sizeof(last->lli),
1187 /* lets make a cyclic list */
1188 last->lli.llp = cdesc->desc[0]->txd.phys;
1189 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1190 sizeof(last->lli), DMA_TO_DEVICE);
1192 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1193 "period %zu periods %d\n", buf_addr, buf_len,
1194 period_len, periods);
1196 cdesc->periods = periods;
1203 dwc_desc_put(dwc, cdesc->desc[i]);
1207 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1208 return (struct dw_cyclic_desc *)retval;
1210 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1213 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1214 * @chan: the DMA channel to free
1216 void dw_dma_cyclic_free(struct dma_chan *chan)
1218 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1219 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1220 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1223 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1228 spin_lock_bh(&dwc->lock);
1230 channel_clear_bit(dw, CH_EN, dwc->mask);
1231 while (dma_readl(dw, CH_EN) & dwc->mask)
1234 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1235 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1236 dma_writel(dw, CLEAR.XFER, dwc->mask);
1238 spin_unlock_bh(&dwc->lock);
1240 for (i = 0; i < cdesc->periods; i++)
1241 dwc_desc_put(dwc, cdesc->desc[i]);
1246 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1248 EXPORT_SYMBOL(dw_dma_cyclic_free);
1250 /*----------------------------------------------------------------------*/
1252 static void dw_dma_off(struct dw_dma *dw)
1254 dma_writel(dw, CFG, 0);
1256 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1257 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1258 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1259 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1260 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1262 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1266 static int __init dw_probe(struct platform_device *pdev)
1268 struct dw_dma_platform_data *pdata;
1269 struct resource *io;
1276 pdata = pdev->dev.platform_data;
1277 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1280 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1284 irq = platform_get_irq(pdev, 0);
1288 size = sizeof(struct dw_dma);
1289 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1290 dw = kzalloc(size, GFP_KERNEL);
1294 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1299 dw->regs = ioremap(io->start, DW_REGLEN);
1305 dw->clk = clk_get(&pdev->dev, "hclk");
1306 if (IS_ERR(dw->clk)) {
1307 err = PTR_ERR(dw->clk);
1310 clk_enable(dw->clk);
1312 /* force dma off, just in case */
1315 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1319 platform_set_drvdata(pdev, dw);
1321 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1323 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1325 INIT_LIST_HEAD(&dw->dma.channels);
1326 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
1327 struct dw_dma_chan *dwc = &dw->chan[i];
1329 dwc->chan.device = &dw->dma;
1330 dwc->chan.cookie = dwc->completed = 1;
1331 dwc->chan.chan_id = i;
1332 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1333 list_add_tail(&dwc->chan.device_node,
1336 list_add(&dwc->chan.device_node, &dw->dma.channels);
1338 /* 7 is highest priority & 0 is lowest. */
1339 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1340 dwc->priority = 7 - i;
1344 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1345 spin_lock_init(&dwc->lock);
1348 INIT_LIST_HEAD(&dwc->active_list);
1349 INIT_LIST_HEAD(&dwc->queue);
1350 INIT_LIST_HEAD(&dwc->free_list);
1352 channel_clear_bit(dw, CH_EN, dwc->mask);
1355 /* Clear/disable all interrupts on all channels. */
1356 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1357 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1358 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1359 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1360 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1362 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1363 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1364 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1365 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1366 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1368 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1369 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1370 if (pdata->is_private)
1371 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1372 dw->dma.dev = &pdev->dev;
1373 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1374 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1376 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1378 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1379 dw->dma.device_control = dwc_control;
1381 dw->dma.device_tx_status = dwc_tx_status;
1382 dw->dma.device_issue_pending = dwc_issue_pending;
1384 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1386 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1387 dev_name(&pdev->dev), dw->dma.chancnt);
1389 dma_async_device_register(&dw->dma);
1394 clk_disable(dw->clk);
1400 release_resource(io);
1406 static int __exit dw_remove(struct platform_device *pdev)
1408 struct dw_dma *dw = platform_get_drvdata(pdev);
1409 struct dw_dma_chan *dwc, *_dwc;
1410 struct resource *io;
1413 dma_async_device_unregister(&dw->dma);
1415 free_irq(platform_get_irq(pdev, 0), dw);
1416 tasklet_kill(&dw->tasklet);
1418 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1420 list_del(&dwc->chan.device_node);
1421 channel_clear_bit(dw, CH_EN, dwc->mask);
1424 clk_disable(dw->clk);
1430 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1431 release_mem_region(io->start, DW_REGLEN);
1438 static void dw_shutdown(struct platform_device *pdev)
1440 struct dw_dma *dw = platform_get_drvdata(pdev);
1442 dw_dma_off(platform_get_drvdata(pdev));
1443 clk_disable(dw->clk);
1446 static int dw_suspend_noirq(struct device *dev)
1448 struct platform_device *pdev = to_platform_device(dev);
1449 struct dw_dma *dw = platform_get_drvdata(pdev);
1451 dw_dma_off(platform_get_drvdata(pdev));
1452 clk_disable(dw->clk);
1456 static int dw_resume_noirq(struct device *dev)
1458 struct platform_device *pdev = to_platform_device(dev);
1459 struct dw_dma *dw = platform_get_drvdata(pdev);
1461 clk_enable(dw->clk);
1462 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1466 static const struct dev_pm_ops dw_dev_pm_ops = {
1467 .suspend_noirq = dw_suspend_noirq,
1468 .resume_noirq = dw_resume_noirq,
1471 static struct platform_driver dw_driver = {
1472 .remove = __exit_p(dw_remove),
1473 .shutdown = dw_shutdown,
1476 .pm = &dw_dev_pm_ops,
1480 static int __init dw_init(void)
1482 return platform_driver_probe(&dw_driver, dw_probe);
1484 subsys_initcall(dw_init);
1486 static void __exit dw_exit(void)
1488 platform_driver_unregister(&dw_driver);
1490 module_exit(dw_exit);
1492 MODULE_LICENSE("GPL v2");
1493 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1494 MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");