2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller,
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include "at_hdmac_regs.h"
33 * at_hdmac : Name of the ATmel AHB DMA Controller
34 * at_dma_ / atdma : ATmel DMA controller entity related
35 * atc_ / atchan : ATmel DMA Channel entity related
38 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
39 #define ATC_DEFAULT_CTRLA (0)
40 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
41 |ATC_DIF(AT_DMA_MEM_IF))
44 * Initial number of descriptors to allocate for each channel. This could
45 * be increased during dma usage.
47 static unsigned int init_nr_desc_per_channel = 64;
48 module_param(init_nr_desc_per_channel, uint, 0644);
49 MODULE_PARM_DESC(init_nr_desc_per_channel,
50 "initial descriptors per channel (default: 64)");
54 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
57 /*----------------------------------------------------------------------*/
59 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
61 return list_first_entry(&atchan->active_list,
62 struct at_desc, desc_node);
65 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
67 return list_first_entry(&atchan->queue,
68 struct at_desc, desc_node);
72 * atc_alloc_descriptor - allocate and return an initialized descriptor
73 * @chan: the channel to allocate descriptors for
74 * @gfp_flags: GFP allocation flags
76 * Note: The ack-bit is positioned in the descriptor flag at creation time
77 * to make initial allocation more convenient. This bit will be cleared
78 * and control will be given to client at usage time (during
79 * preparation functions).
81 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
84 struct at_desc *desc = NULL;
85 struct at_dma *atdma = to_at_dma(chan->device);
88 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
90 memset(desc, 0, sizeof(struct at_desc));
91 INIT_LIST_HEAD(&desc->tx_list);
92 dma_async_tx_descriptor_init(&desc->txd, chan);
93 /* txd.flags will be overwritten in prep functions */
94 desc->txd.flags = DMA_CTRL_ACK;
95 desc->txd.tx_submit = atc_tx_submit;
96 desc->txd.phys = phys;
103 * atc_desc_get - get an unused descriptor from free_list
104 * @atchan: channel we want a new descriptor for
106 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL;
114 spin_lock_irqsave(&atchan->lock, flags);
115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
117 if (async_tx_test_ack(&desc->txd)) {
118 list_del(&desc->desc_node);
122 dev_dbg(chan2dev(&atchan->chan_common),
123 "desc %p not ACKed\n", desc);
125 spin_unlock_irqrestore(&atchan->lock, flags);
126 dev_vdbg(chan2dev(&atchan->chan_common),
127 "scanned %u descriptors on freelist\n", i);
129 /* no more descriptor available in initial pool: create one more */
131 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
133 spin_lock_irqsave(&atchan->lock, flags);
134 atchan->descs_allocated++;
135 spin_unlock_irqrestore(&atchan->lock, flags);
137 dev_err(chan2dev(&atchan->chan_common),
138 "not enough descriptors available\n");
146 * atc_desc_put - move a descriptor, including any children, to the free list
147 * @atchan: channel we work on
148 * @desc: descriptor, at the head of a chain, to move to free list
150 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
153 struct at_desc *child;
156 spin_lock_irqsave(&atchan->lock, flags);
157 list_for_each_entry(child, &desc->tx_list, desc_node)
158 dev_vdbg(chan2dev(&atchan->chan_common),
159 "moving child desc %p to freelist\n",
161 list_splice_init(&desc->tx_list, &atchan->free_list);
162 dev_vdbg(chan2dev(&atchan->chan_common),
163 "moving desc %p to freelist\n", desc);
164 list_add(&desc->desc_node, &atchan->free_list);
165 spin_unlock_irqrestore(&atchan->lock, flags);
170 * atc_desc_chain - build chain adding a descripor
171 * @first: address of first descripor of the chain
172 * @prev: address of previous descripor of the chain
173 * @desc: descriptor to queue
175 * Called from prep_* functions
177 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
178 struct at_desc *desc)
183 /* inform the HW lli about chaining */
184 (*prev)->lli.dscr = desc->txd.phys;
185 /* insert the link descriptor to the LD ring */
186 list_add_tail(&desc->desc_node,
193 * atc_assign_cookie - compute and assign new cookie
194 * @atchan: channel we work on
195 * @desc: descriptor to assign cookie for
197 * Called with atchan->lock held and bh disabled
200 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
202 dma_cookie_t cookie = atchan->chan_common.cookie;
207 atchan->chan_common.cookie = cookie;
208 desc->txd.cookie = cookie;
214 * atc_dostart - starts the DMA engine for real
215 * @atchan: the channel we want to start
216 * @first: first descriptor in the list we want to begin with
218 * Called with atchan->lock held and bh disabled
220 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
222 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
224 /* ASSERT: channel is idle */
225 if (atc_chan_is_enabled(atchan)) {
226 dev_err(chan2dev(&atchan->chan_common),
227 "BUG: Attempted to start non-idle channel\n");
228 dev_err(chan2dev(&atchan->chan_common),
229 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
230 channel_readl(atchan, SADDR),
231 channel_readl(atchan, DADDR),
232 channel_readl(atchan, CTRLA),
233 channel_readl(atchan, CTRLB),
234 channel_readl(atchan, DSCR));
236 /* The tasklet will hopefully advance the queue... */
240 vdbg_dump_regs(atchan);
242 channel_writel(atchan, SADDR, 0);
243 channel_writel(atchan, DADDR, 0);
244 channel_writel(atchan, CTRLA, 0);
245 channel_writel(atchan, CTRLB, 0);
246 channel_writel(atchan, DSCR, first->txd.phys);
247 dma_writel(atdma, CHER, atchan->mask);
249 vdbg_dump_regs(atchan);
253 * atc_chain_complete - finish work for one transaction chain
254 * @atchan: channel we work on
255 * @desc: descriptor at the head of the chain we want do complete
257 * Called with atchan->lock held and bh disabled */
259 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
261 struct dma_async_tx_descriptor *txd = &desc->txd;
263 dev_vdbg(chan2dev(&atchan->chan_common),
264 "descriptor %u complete\n", txd->cookie);
266 atchan->completed_cookie = txd->cookie;
268 /* move children to free_list */
269 list_splice_init(&desc->tx_list, &atchan->free_list);
270 /* move myself to free_list */
271 list_move(&desc->desc_node, &atchan->free_list);
273 /* unmap dma addresses (not on slave channels) */
274 if (!atchan->chan_common.private) {
275 struct device *parent = chan2parent(&atchan->chan_common);
276 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
277 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
278 dma_unmap_single(parent,
280 desc->len, DMA_FROM_DEVICE);
282 dma_unmap_page(parent,
284 desc->len, DMA_FROM_DEVICE);
286 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
287 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
288 dma_unmap_single(parent,
290 desc->len, DMA_TO_DEVICE);
292 dma_unmap_page(parent,
294 desc->len, DMA_TO_DEVICE);
298 /* for cyclic transfers,
299 * no need to replay callback function while stopping */
300 if (!atc_chan_is_cyclic(atchan)) {
301 dma_async_tx_callback callback = txd->callback;
302 void *param = txd->callback_param;
305 * The API requires that no submissions are done from a
306 * callback, so we don't need to drop the lock here
312 dma_run_dependencies(txd);
316 * atc_complete_all - finish work for all transactions
317 * @atchan: channel to complete transactions for
319 * Eventually submit queued descriptors if any
321 * Assume channel is idle while calling this function
322 * Called with atchan->lock held and bh disabled
324 static void atc_complete_all(struct at_dma_chan *atchan)
326 struct at_desc *desc, *_desc;
329 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
331 BUG_ON(atc_chan_is_enabled(atchan));
334 * Submit queued descriptors ASAP, i.e. before we go through
335 * the completed ones.
337 if (!list_empty(&atchan->queue))
338 atc_dostart(atchan, atc_first_queued(atchan));
339 /* empty active_list now it is completed */
340 list_splice_init(&atchan->active_list, &list);
341 /* empty queue list by moving descriptors (if any) to active_list */
342 list_splice_init(&atchan->queue, &atchan->active_list);
344 list_for_each_entry_safe(desc, _desc, &list, desc_node)
345 atc_chain_complete(atchan, desc);
349 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
350 * @atchan: channel to be cleaned up
352 * Called with atchan->lock held and bh disabled
354 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
356 struct at_desc *desc, *_desc;
357 struct at_desc *child;
359 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
361 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
362 if (!(desc->lli.ctrla & ATC_DONE))
363 /* This one is currently in progress */
366 list_for_each_entry(child, &desc->tx_list, desc_node)
367 if (!(child->lli.ctrla & ATC_DONE))
368 /* Currently in progress */
372 * No descriptors so far seem to be in progress, i.e.
373 * this chain must be done.
375 atc_chain_complete(atchan, desc);
380 * atc_advance_work - at the end of a transaction, move forward
381 * @atchan: channel where the transaction ended
383 * Called with atchan->lock held and bh disabled
385 static void atc_advance_work(struct at_dma_chan *atchan)
387 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
389 if (list_empty(&atchan->active_list) ||
390 list_is_singular(&atchan->active_list)) {
391 atc_complete_all(atchan);
393 atc_chain_complete(atchan, atc_first_active(atchan));
395 atc_dostart(atchan, atc_first_active(atchan));
401 * atc_handle_error - handle errors reported by DMA controller
402 * @atchan: channel where error occurs
404 * Called with atchan->lock held and bh disabled
406 static void atc_handle_error(struct at_dma_chan *atchan)
408 struct at_desc *bad_desc;
409 struct at_desc *child;
412 * The descriptor currently at the head of the active list is
413 * broked. Since we don't have any way to report errors, we'll
414 * just have to scream loudly and try to carry on.
416 bad_desc = atc_first_active(atchan);
417 list_del_init(&bad_desc->desc_node);
419 /* As we are stopped, take advantage to push queued descriptors
421 list_splice_init(&atchan->queue, atchan->active_list.prev);
423 /* Try to restart the controller */
424 if (!list_empty(&atchan->active_list))
425 atc_dostart(atchan, atc_first_active(atchan));
428 * KERN_CRITICAL may seem harsh, but since this only happens
429 * when someone submits a bad physical address in a
430 * descriptor, we should consider ourselves lucky that the
431 * controller flagged an error instead of scribbling over
432 * random memory locations.
434 dev_crit(chan2dev(&atchan->chan_common),
435 "Bad descriptor submitted for DMA!\n");
436 dev_crit(chan2dev(&atchan->chan_common),
437 " cookie: %d\n", bad_desc->txd.cookie);
438 atc_dump_lli(atchan, &bad_desc->lli);
439 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
440 atc_dump_lli(atchan, &child->lli);
442 /* Pretend the descriptor completed successfully */
443 atc_chain_complete(atchan, bad_desc);
447 * atc_handle_cyclic - at the end of a period, run callback function
448 * @atchan: channel used for cyclic operations
450 * Called with atchan->lock held and bh disabled
452 static void atc_handle_cyclic(struct at_dma_chan *atchan)
454 struct at_desc *first = atc_first_active(atchan);
455 struct dma_async_tx_descriptor *txd = &first->txd;
456 dma_async_tx_callback callback = txd->callback;
457 void *param = txd->callback_param;
459 dev_vdbg(chan2dev(&atchan->chan_common),
460 "new cyclic period llp 0x%08x\n",
461 channel_readl(atchan, DSCR));
467 /*-- IRQ & Tasklet ---------------------------------------------------*/
469 static void atc_tasklet(unsigned long data)
471 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
474 spin_lock_irqsave(&atchan->lock, flags);
475 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
476 atc_handle_error(atchan);
477 else if (atc_chan_is_cyclic(atchan))
478 atc_handle_cyclic(atchan);
480 atc_advance_work(atchan);
482 spin_unlock_irqrestore(&atchan->lock, flags);
485 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
487 struct at_dma *atdma = (struct at_dma *)dev_id;
488 struct at_dma_chan *atchan;
490 u32 status, pending, imr;
494 imr = dma_readl(atdma, EBCIMR);
495 status = dma_readl(atdma, EBCISR);
496 pending = status & imr;
501 dev_vdbg(atdma->dma_common.dev,
502 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
503 status, imr, pending);
505 for (i = 0; i < atdma->dma_common.chancnt; i++) {
506 atchan = &atdma->chan[i];
507 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
508 if (pending & AT_DMA_ERR(i)) {
509 /* Disable channel on AHB error */
510 dma_writel(atdma, CHDR,
511 AT_DMA_RES(i) | atchan->mask);
512 /* Give information to tasklet */
513 set_bit(ATC_IS_ERROR, &atchan->status);
515 tasklet_schedule(&atchan->tasklet);
526 /*-- DMA Engine API --------------------------------------------------*/
529 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
530 * @desc: descriptor at the head of the transaction chain
532 * Queue chain if DMA engine is working already
534 * Cookie increment and adding to active_list or queue must be atomic
536 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
538 struct at_desc *desc = txd_to_at_desc(tx);
539 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
543 spin_lock_irqsave(&atchan->lock, flags);
544 cookie = atc_assign_cookie(atchan, desc);
546 if (list_empty(&atchan->active_list)) {
547 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
549 atc_dostart(atchan, desc);
550 list_add_tail(&desc->desc_node, &atchan->active_list);
552 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
554 list_add_tail(&desc->desc_node, &atchan->queue);
557 spin_unlock_irqrestore(&atchan->lock, flags);
563 * atc_prep_dma_memcpy - prepare a memcpy operation
564 * @chan: the channel to prepare operation on
565 * @dest: operation virtual destination address
566 * @src: operation virtual source address
567 * @len: operation length
568 * @flags: tx descriptor status flags
570 static struct dma_async_tx_descriptor *
571 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
572 size_t len, unsigned long flags)
574 struct at_dma_chan *atchan = to_at_dma_chan(chan);
575 struct at_desc *desc = NULL;
576 struct at_desc *first = NULL;
577 struct at_desc *prev = NULL;
580 unsigned int src_width;
581 unsigned int dst_width;
585 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
586 dest, src, len, flags);
588 if (unlikely(!len)) {
589 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
593 ctrla = ATC_DEFAULT_CTRLA;
594 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
595 | ATC_SRC_ADDR_MODE_INCR
596 | ATC_DST_ADDR_MODE_INCR
600 * We can be a lot more clever here, but this should take care
601 * of the most common optimization.
603 if (!((src | dest | len) & 3)) {
604 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
605 src_width = dst_width = 2;
606 } else if (!((src | dest | len) & 1)) {
607 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
608 src_width = dst_width = 1;
610 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
611 src_width = dst_width = 0;
614 for (offset = 0; offset < len; offset += xfer_count << src_width) {
615 xfer_count = min_t(size_t, (len - offset) >> src_width,
618 desc = atc_desc_get(atchan);
622 desc->lli.saddr = src + offset;
623 desc->lli.daddr = dest + offset;
624 desc->lli.ctrla = ctrla | xfer_count;
625 desc->lli.ctrlb = ctrlb;
627 desc->txd.cookie = 0;
629 atc_desc_chain(&first, &prev, desc);
632 /* First descriptor of the chain embedds additional information */
633 first->txd.cookie = -EBUSY;
636 /* set end-of-link to the last link descriptor of list*/
639 first->txd.flags = flags; /* client is in control of this ack */
644 atc_desc_put(atchan, first);
650 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
652 * @sgl: scatterlist to transfer to/from
653 * @sg_len: number of entries in @scatterlist
654 * @direction: DMA direction
655 * @flags: tx descriptor status flags
657 static struct dma_async_tx_descriptor *
658 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
659 unsigned int sg_len, enum dma_data_direction direction,
662 struct at_dma_chan *atchan = to_at_dma_chan(chan);
663 struct at_dma_slave *atslave = chan->private;
664 struct at_desc *first = NULL;
665 struct at_desc *prev = NULL;
669 unsigned int reg_width;
670 unsigned int mem_width;
672 struct scatterlist *sg;
673 size_t total_len = 0;
675 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
677 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
680 if (unlikely(!atslave || !sg_len)) {
681 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
685 reg_width = atslave->reg_width;
687 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
692 ctrla |= ATC_DST_WIDTH(reg_width);
693 ctrlb |= ATC_DST_ADDR_MODE_FIXED
694 | ATC_SRC_ADDR_MODE_INCR
696 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
697 reg = atslave->tx_reg;
698 for_each_sg(sgl, sg, sg_len, i) {
699 struct at_desc *desc;
703 desc = atc_desc_get(atchan);
707 mem = sg_dma_address(sg);
708 len = sg_dma_len(sg);
709 if (unlikely(!len)) {
710 dev_dbg(chan2dev(chan),
711 "prep_slave_sg: sg(%d) data length is zero\n", i);
715 if (unlikely(mem & 3 || len & 3))
718 desc->lli.saddr = mem;
719 desc->lli.daddr = reg;
720 desc->lli.ctrla = ctrla
721 | ATC_SRC_WIDTH(mem_width)
723 desc->lli.ctrlb = ctrlb;
725 atc_desc_chain(&first, &prev, desc);
729 case DMA_FROM_DEVICE:
730 ctrla |= ATC_SRC_WIDTH(reg_width);
731 ctrlb |= ATC_DST_ADDR_MODE_INCR
732 | ATC_SRC_ADDR_MODE_FIXED
734 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
736 reg = atslave->rx_reg;
737 for_each_sg(sgl, sg, sg_len, i) {
738 struct at_desc *desc;
742 desc = atc_desc_get(atchan);
746 mem = sg_dma_address(sg);
747 len = sg_dma_len(sg);
748 if (unlikely(!len)) {
749 dev_dbg(chan2dev(chan),
750 "prep_slave_sg: sg(%d) data length is zero\n", i);
754 if (unlikely(mem & 3 || len & 3))
757 desc->lli.saddr = reg;
758 desc->lli.daddr = mem;
759 desc->lli.ctrla = ctrla
760 | ATC_DST_WIDTH(mem_width)
762 desc->lli.ctrlb = ctrlb;
764 atc_desc_chain(&first, &prev, desc);
772 /* set end-of-link to the last link descriptor of list*/
775 /* First descriptor of the chain embedds additional information */
776 first->txd.cookie = -EBUSY;
777 first->len = total_len;
779 /* first link descriptor of list is responsible of flags */
780 first->txd.flags = flags; /* client is in control of this ack */
785 dev_err(chan2dev(chan), "not enough descriptors available\n");
787 atc_desc_put(atchan, first);
792 * atc_dma_cyclic_check_values
793 * Check for too big/unaligned periods and unaligned DMA buffer
796 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
797 size_t period_len, enum dma_data_direction direction)
799 if (period_len > (ATC_BTSIZE_MAX << reg_width))
801 if (unlikely(period_len & ((1 << reg_width) - 1)))
803 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
805 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
815 * atc_dma_cyclic_fill_desc - Fill one period decriptor
818 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
819 unsigned int period_index, dma_addr_t buf_addr,
820 size_t period_len, enum dma_data_direction direction)
823 unsigned int reg_width = atslave->reg_width;
825 /* prepare common CRTLA value */
826 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
827 | ATC_DST_WIDTH(reg_width)
828 | ATC_SRC_WIDTH(reg_width)
829 | period_len >> reg_width;
833 desc->lli.saddr = buf_addr + (period_len * period_index);
834 desc->lli.daddr = atslave->tx_reg;
835 desc->lli.ctrla = ctrla;
836 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
837 | ATC_SRC_ADDR_MODE_INCR
839 | ATC_SIF(AT_DMA_MEM_IF)
840 | ATC_DIF(AT_DMA_PER_IF);
843 case DMA_FROM_DEVICE:
844 desc->lli.saddr = atslave->rx_reg;
845 desc->lli.daddr = buf_addr + (period_len * period_index);
846 desc->lli.ctrla = ctrla;
847 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
848 | ATC_SRC_ADDR_MODE_FIXED
850 | ATC_SIF(AT_DMA_PER_IF)
851 | ATC_DIF(AT_DMA_MEM_IF);
862 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
863 * @chan: the DMA channel to prepare
864 * @buf_addr: physical DMA address where the buffer starts
865 * @buf_len: total number of bytes for the entire buffer
866 * @period_len: number of bytes for each period
867 * @direction: transfer direction, to or from device
869 static struct dma_async_tx_descriptor *
870 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
871 size_t period_len, enum dma_data_direction direction)
873 struct at_dma_chan *atchan = to_at_dma_chan(chan);
874 struct at_dma_slave *atslave = chan->private;
875 struct at_desc *first = NULL;
876 struct at_desc *prev = NULL;
877 unsigned long was_cyclic;
878 unsigned int periods = buf_len / period_len;
881 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
882 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
884 periods, buf_len, period_len);
886 if (unlikely(!atslave || !buf_len || !period_len)) {
887 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
891 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
893 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
897 /* Check for too big/unaligned periods and unaligned DMA buffer */
898 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
899 period_len, direction))
902 /* build cyclic linked list */
903 for (i = 0; i < periods; i++) {
904 struct at_desc *desc;
906 desc = atc_desc_get(atchan);
910 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
911 period_len, direction))
914 atc_desc_chain(&first, &prev, desc);
917 /* lets make a cyclic list */
918 prev->lli.dscr = first->txd.phys;
920 /* First descriptor of the chain embedds additional information */
921 first->txd.cookie = -EBUSY;
922 first->len = buf_len;
927 dev_err(chan2dev(chan), "not enough descriptors available\n");
928 atc_desc_put(atchan, first);
930 clear_bit(ATC_IS_CYCLIC, &atchan->status);
935 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
938 struct at_dma_chan *atchan = to_at_dma_chan(chan);
939 struct at_dma *atdma = to_at_dma(chan->device);
940 int chan_id = atchan->chan_common.chan_id;
945 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
947 if (cmd == DMA_PAUSE) {
948 spin_lock_irqsave(&atchan->lock, flags);
950 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
951 set_bit(ATC_IS_PAUSED, &atchan->status);
953 spin_unlock_irqrestore(&atchan->lock, flags);
954 } else if (cmd == DMA_RESUME) {
955 if (!atc_chan_is_paused(atchan))
958 spin_lock_irqsave(&atchan->lock, flags);
960 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
961 clear_bit(ATC_IS_PAUSED, &atchan->status);
963 spin_unlock_irqrestore(&atchan->lock, flags);
964 } else if (cmd == DMA_TERMINATE_ALL) {
965 struct at_desc *desc, *_desc;
967 * This is only called when something went wrong elsewhere, so
968 * we don't really care about the data. Just disable the
969 * channel. We still have to poll the channel enable bit due
970 * to AHB/HSB limitations.
972 spin_lock_irqsave(&atchan->lock, flags);
974 /* disabling channel: must also remove suspend state */
975 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
977 /* confirm that this channel is disabled */
978 while (dma_readl(atdma, CHSR) & atchan->mask)
981 /* active_list entries will end up before queued entries */
982 list_splice_init(&atchan->queue, &list);
983 list_splice_init(&atchan->active_list, &list);
985 /* Flush all pending and queued descriptors */
986 list_for_each_entry_safe(desc, _desc, &list, desc_node)
987 atc_chain_complete(atchan, desc);
989 clear_bit(ATC_IS_PAUSED, &atchan->status);
990 /* if channel dedicated to cyclic operations, free it */
991 clear_bit(ATC_IS_CYCLIC, &atchan->status);
993 spin_unlock_irqrestore(&atchan->lock, flags);
1002 * atc_tx_status - poll for transaction completion
1003 * @chan: DMA channel
1004 * @cookie: transaction identifier to check status of
1005 * @txstate: if not %NULL updated with transaction state
1007 * If @txstate is passed in, upon return it reflect the driver
1008 * internal state and can be used with dma_async_is_complete() to check
1009 * the status of multiple cookies without re-checking hardware state.
1011 static enum dma_status
1012 atc_tx_status(struct dma_chan *chan,
1013 dma_cookie_t cookie,
1014 struct dma_tx_state *txstate)
1016 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1017 dma_cookie_t last_used;
1018 dma_cookie_t last_complete;
1019 unsigned long flags;
1020 enum dma_status ret;
1022 spin_lock_irqsave(&atchan->lock, flags);
1024 last_complete = atchan->completed_cookie;
1025 last_used = chan->cookie;
1027 ret = dma_async_is_complete(cookie, last_complete, last_used);
1028 if (ret != DMA_SUCCESS) {
1029 atc_cleanup_descriptors(atchan);
1031 last_complete = atchan->completed_cookie;
1032 last_used = chan->cookie;
1034 ret = dma_async_is_complete(cookie, last_complete, last_used);
1037 spin_unlock_irqrestore(&atchan->lock, flags);
1039 if (ret != DMA_SUCCESS)
1040 dma_set_tx_state(txstate, last_complete, last_used,
1041 atc_first_active(atchan)->len);
1043 dma_set_tx_state(txstate, last_complete, last_used, 0);
1045 if (atc_chan_is_paused(atchan))
1048 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1049 ret, cookie, last_complete ? last_complete : 0,
1050 last_used ? last_used : 0);
1056 * atc_issue_pending - try to finish work
1057 * @chan: target DMA channel
1059 static void atc_issue_pending(struct dma_chan *chan)
1061 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1062 unsigned long flags;
1064 dev_vdbg(chan2dev(chan), "issue_pending\n");
1066 /* Not needed for cyclic transfers */
1067 if (atc_chan_is_cyclic(atchan))
1070 spin_lock_irqsave(&atchan->lock, flags);
1071 if (!atc_chan_is_enabled(atchan)) {
1072 atc_advance_work(atchan);
1074 spin_unlock_irqrestore(&atchan->lock, flags);
1078 * atc_alloc_chan_resources - allocate resources for DMA channel
1079 * @chan: allocate descriptor resources for this channel
1080 * @client: current client requesting the channel be ready for requests
1082 * return - the number of allocated descriptors
1084 static int atc_alloc_chan_resources(struct dma_chan *chan)
1086 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1087 struct at_dma *atdma = to_at_dma(chan->device);
1088 struct at_desc *desc;
1089 struct at_dma_slave *atslave;
1090 unsigned long flags;
1093 LIST_HEAD(tmp_list);
1095 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1097 /* ASSERT: channel is idle */
1098 if (atc_chan_is_enabled(atchan)) {
1099 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1103 cfg = ATC_DEFAULT_CFG;
1105 atslave = chan->private;
1108 * We need controller-specific data to set up slave
1111 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1113 /* if cfg configuration specified take it instad of default */
1118 /* have we already been set up?
1119 * reconfigure channel but no need to reallocate descriptors */
1120 if (!list_empty(&atchan->free_list))
1121 return atchan->descs_allocated;
1123 /* Allocate initial pool of descriptors */
1124 for (i = 0; i < init_nr_desc_per_channel; i++) {
1125 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1127 dev_err(atdma->dma_common.dev,
1128 "Only %d initial descriptors\n", i);
1131 list_add_tail(&desc->desc_node, &tmp_list);
1134 spin_lock_irqsave(&atchan->lock, flags);
1135 atchan->descs_allocated = i;
1136 list_splice(&tmp_list, &atchan->free_list);
1137 atchan->completed_cookie = chan->cookie = 1;
1138 spin_unlock_irqrestore(&atchan->lock, flags);
1140 /* channel parameters */
1141 channel_writel(atchan, CFG, cfg);
1143 dev_dbg(chan2dev(chan),
1144 "alloc_chan_resources: allocated %d descriptors\n",
1145 atchan->descs_allocated);
1147 return atchan->descs_allocated;
1151 * atc_free_chan_resources - free all channel resources
1152 * @chan: DMA channel
1154 static void atc_free_chan_resources(struct dma_chan *chan)
1156 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1157 struct at_dma *atdma = to_at_dma(chan->device);
1158 struct at_desc *desc, *_desc;
1161 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1162 atchan->descs_allocated);
1164 /* ASSERT: channel is idle */
1165 BUG_ON(!list_empty(&atchan->active_list));
1166 BUG_ON(!list_empty(&atchan->queue));
1167 BUG_ON(atc_chan_is_enabled(atchan));
1169 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1170 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1171 list_del(&desc->desc_node);
1172 /* free link descriptor */
1173 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1175 list_splice_init(&atchan->free_list, &list);
1176 atchan->descs_allocated = 0;
1179 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1183 /*-- Module Management -----------------------------------------------*/
1186 * at_dma_off - disable DMA controller
1187 * @atdma: the Atmel HDAMC device
1189 static void at_dma_off(struct at_dma *atdma)
1191 dma_writel(atdma, EN, 0);
1193 /* disable all interrupts */
1194 dma_writel(atdma, EBCIDR, -1L);
1196 /* confirm that all channels are disabled */
1197 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1201 static int __init at_dma_probe(struct platform_device *pdev)
1203 struct at_dma_platform_data *pdata;
1204 struct resource *io;
1205 struct at_dma *atdma;
1211 /* get DMA Controller parameters from platform */
1212 pdata = pdev->dev.platform_data;
1213 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
1216 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1220 irq = platform_get_irq(pdev, 0);
1224 size = sizeof(struct at_dma);
1225 size += pdata->nr_channels * sizeof(struct at_dma_chan);
1226 atdma = kzalloc(size, GFP_KERNEL);
1230 /* discover transaction capabilites from the platform data */
1231 atdma->dma_common.cap_mask = pdata->cap_mask;
1232 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1234 size = resource_size(io);
1235 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1240 atdma->regs = ioremap(io->start, size);
1246 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1247 if (IS_ERR(atdma->clk)) {
1248 err = PTR_ERR(atdma->clk);
1251 clk_enable(atdma->clk);
1253 /* force dma off, just in case */
1256 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1260 platform_set_drvdata(pdev, atdma);
1262 /* create a pool of consistent memory blocks for hardware descriptors */
1263 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1264 &pdev->dev, sizeof(struct at_desc),
1265 4 /* word alignment */, 0);
1266 if (!atdma->dma_desc_pool) {
1267 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1269 goto err_pool_create;
1272 /* clear any pending interrupt */
1273 while (dma_readl(atdma, EBCISR))
1276 /* initialize channels related values */
1277 INIT_LIST_HEAD(&atdma->dma_common.channels);
1278 for (i = 0; i < pdata->nr_channels; i++) {
1279 struct at_dma_chan *atchan = &atdma->chan[i];
1281 atchan->chan_common.device = &atdma->dma_common;
1282 atchan->chan_common.cookie = atchan->completed_cookie = 1;
1283 list_add_tail(&atchan->chan_common.device_node,
1284 &atdma->dma_common.channels);
1286 atchan->ch_regs = atdma->regs + ch_regs(i);
1287 spin_lock_init(&atchan->lock);
1288 atchan->mask = 1 << i;
1290 INIT_LIST_HEAD(&atchan->active_list);
1291 INIT_LIST_HEAD(&atchan->queue);
1292 INIT_LIST_HEAD(&atchan->free_list);
1294 tasklet_init(&atchan->tasklet, atc_tasklet,
1295 (unsigned long)atchan);
1296 atc_enable_chan_irq(atdma, i);
1299 /* set base routines */
1300 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1301 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1302 atdma->dma_common.device_tx_status = atc_tx_status;
1303 atdma->dma_common.device_issue_pending = atc_issue_pending;
1304 atdma->dma_common.dev = &pdev->dev;
1306 /* set prep routines based on capability */
1307 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1308 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1310 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1311 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1312 /* controller can do slave DMA: can trigger cyclic transfers */
1313 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1314 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1315 atdma->dma_common.device_control = atc_control;
1318 dma_writel(atdma, EN, AT_DMA_ENABLE);
1320 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1321 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1322 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1323 pdata->nr_channels);
1325 dma_async_device_register(&atdma->dma_common);
1330 platform_set_drvdata(pdev, NULL);
1331 free_irq(platform_get_irq(pdev, 0), atdma);
1333 clk_disable(atdma->clk);
1334 clk_put(atdma->clk);
1336 iounmap(atdma->regs);
1339 release_mem_region(io->start, size);
1345 static int __exit at_dma_remove(struct platform_device *pdev)
1347 struct at_dma *atdma = platform_get_drvdata(pdev);
1348 struct dma_chan *chan, *_chan;
1349 struct resource *io;
1352 dma_async_device_unregister(&atdma->dma_common);
1354 dma_pool_destroy(atdma->dma_desc_pool);
1355 platform_set_drvdata(pdev, NULL);
1356 free_irq(platform_get_irq(pdev, 0), atdma);
1358 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1360 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1362 /* Disable interrupts */
1363 atc_disable_chan_irq(atdma, chan->chan_id);
1364 tasklet_disable(&atchan->tasklet);
1366 tasklet_kill(&atchan->tasklet);
1367 list_del(&chan->device_node);
1370 clk_disable(atdma->clk);
1371 clk_put(atdma->clk);
1373 iounmap(atdma->regs);
1376 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1377 release_mem_region(io->start, resource_size(io));
1384 static void at_dma_shutdown(struct platform_device *pdev)
1386 struct at_dma *atdma = platform_get_drvdata(pdev);
1388 at_dma_off(platform_get_drvdata(pdev));
1389 clk_disable(atdma->clk);
1392 static int at_dma_prepare(struct device *dev)
1394 struct platform_device *pdev = to_platform_device(dev);
1395 struct at_dma *atdma = platform_get_drvdata(pdev);
1396 struct dma_chan *chan, *_chan;
1398 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1400 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1401 /* wait for transaction completion (except in cyclic case) */
1402 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1408 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1410 struct dma_chan *chan = &atchan->chan_common;
1412 /* Channel should be paused by user
1413 * do it anyway even if it is not done already */
1414 if (!atc_chan_is_paused(atchan)) {
1415 dev_warn(chan2dev(chan),
1416 "cyclic channel not paused, should be done by channel user\n");
1417 atc_control(chan, DMA_PAUSE, 0);
1420 /* now preserve additional data for cyclic operations */
1421 /* next descriptor address in the cyclic list */
1422 atchan->save_dscr = channel_readl(atchan, DSCR);
1424 vdbg_dump_regs(atchan);
1427 static int at_dma_suspend_noirq(struct device *dev)
1429 struct platform_device *pdev = to_platform_device(dev);
1430 struct at_dma *atdma = platform_get_drvdata(pdev);
1431 struct dma_chan *chan, *_chan;
1434 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1436 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1438 if (atc_chan_is_cyclic(atchan))
1439 atc_suspend_cyclic(atchan);
1440 atchan->save_cfg = channel_readl(atchan, CFG);
1442 atdma->save_imr = dma_readl(atdma, EBCIMR);
1444 /* disable DMA controller */
1446 clk_disable(atdma->clk);
1450 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1452 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1454 /* restore channel status for cyclic descriptors list:
1455 * next descriptor in the cyclic list at the time of suspend */
1456 channel_writel(atchan, SADDR, 0);
1457 channel_writel(atchan, DADDR, 0);
1458 channel_writel(atchan, CTRLA, 0);
1459 channel_writel(atchan, CTRLB, 0);
1460 channel_writel(atchan, DSCR, atchan->save_dscr);
1461 dma_writel(atdma, CHER, atchan->mask);
1463 /* channel pause status should be removed by channel user
1464 * We cannot take the initiative to do it here */
1466 vdbg_dump_regs(atchan);
1469 static int at_dma_resume_noirq(struct device *dev)
1471 struct platform_device *pdev = to_platform_device(dev);
1472 struct at_dma *atdma = platform_get_drvdata(pdev);
1473 struct dma_chan *chan, *_chan;
1475 /* bring back DMA controller */
1476 clk_enable(atdma->clk);
1477 dma_writel(atdma, EN, AT_DMA_ENABLE);
1479 /* clear any pending interrupt */
1480 while (dma_readl(atdma, EBCISR))
1483 /* restore saved data */
1484 dma_writel(atdma, EBCIER, atdma->save_imr);
1485 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1487 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1489 channel_writel(atchan, CFG, atchan->save_cfg);
1490 if (atc_chan_is_cyclic(atchan))
1491 atc_resume_cyclic(atchan);
1496 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1497 .prepare = at_dma_prepare,
1498 .suspend_noirq = at_dma_suspend_noirq,
1499 .resume_noirq = at_dma_resume_noirq,
1502 static struct platform_driver at_dma_driver = {
1503 .remove = __exit_p(at_dma_remove),
1504 .shutdown = at_dma_shutdown,
1507 .pm = &at_dma_dev_pm_ops,
1511 static int __init at_dma_init(void)
1513 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1515 subsys_initcall(at_dma_init);
1517 static void __exit at_dma_exit(void)
1519 platform_driver_unregister(&at_dma_driver);
1521 module_exit(at_dma_exit);
1523 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1524 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1525 MODULE_LICENSE("GPL");
1526 MODULE_ALIAS("platform:at_hdmac");