2 * SA11x0 DMAengine support
4 * Copyright (C) 2012 Russell King
5 * Derived in part from arch/arm/mach-sa1100/dma.c,
6 * Copyright (C) 2000, 2001 by Nicolas Pitre
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sa11x0-dma.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
28 #define DMA_MAX_SIZE 0x1fff
29 #define DMA_CHUNK_SIZE 0x1000
32 #define DMA_DCSR_S 0x04
33 #define DMA_DCSR_C 0x08
34 #define DMA_DCSR_R 0x0c
41 #define DCSR_RUN (1 << 0)
42 #define DCSR_IE (1 << 1)
43 #define DCSR_ERROR (1 << 2)
44 #define DCSR_DONEA (1 << 3)
45 #define DCSR_STRTA (1 << 4)
46 #define DCSR_DONEB (1 << 5)
47 #define DCSR_STRTB (1 << 6)
48 #define DCSR_BIU (1 << 7)
50 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
51 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
52 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
53 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
54 #define DDAR_Ser0UDCTr (0x0 << 4)
55 #define DDAR_Ser0UDCRc (0x1 << 4)
56 #define DDAR_Ser1SDLCTr (0x2 << 4)
57 #define DDAR_Ser1SDLCRc (0x3 << 4)
58 #define DDAR_Ser1UARTTr (0x4 << 4)
59 #define DDAR_Ser1UARTRc (0x5 << 4)
60 #define DDAR_Ser2ICPTr (0x6 << 4)
61 #define DDAR_Ser2ICPRc (0x7 << 4)
62 #define DDAR_Ser3UARTTr (0x8 << 4)
63 #define DDAR_Ser3UARTRc (0x9 << 4)
64 #define DDAR_Ser4MCP0Tr (0xa << 4)
65 #define DDAR_Ser4MCP0Rc (0xb << 4)
66 #define DDAR_Ser4MCP1Tr (0xc << 4)
67 #define DDAR_Ser4MCP1Rc (0xd << 4)
68 #define DDAR_Ser4SSPTr (0xe << 4)
69 #define DDAR_Ser4SSPRc (0xf << 4)
71 struct sa11x0_dma_sg {
76 struct sa11x0_dma_desc {
77 struct virt_dma_desc vd;
83 struct sa11x0_dma_sg sg[0];
86 struct sa11x0_dma_phy;
88 struct sa11x0_dma_chan {
89 struct virt_dma_chan vc;
91 /* protected by c->vc.lock */
92 struct sa11x0_dma_phy *phy;
93 enum dma_status status;
95 /* protected by d->lock */
96 struct list_head node;
102 struct sa11x0_dma_phy {
104 struct sa11x0_dma_dev *dev;
107 struct sa11x0_dma_chan *vchan;
109 /* Protected by c->vc.lock */
111 struct sa11x0_dma_desc *txd_load;
113 struct sa11x0_dma_desc *txd_done;
114 #ifdef CONFIG_PM_SLEEP
121 struct sa11x0_dma_dev {
122 struct dma_device slave;
125 struct tasklet_struct task;
126 struct list_head chan_pending;
127 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
130 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
132 return container_of(chan, struct sa11x0_dma_chan, vc.chan);
135 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
137 return container_of(dmadev, struct sa11x0_dma_dev, slave);
140 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
142 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
144 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
147 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
149 kfree(container_of(vd, struct sa11x0_dma_desc, vd));
152 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
154 list_del(&txd->vd.node);
158 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
159 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
162 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
163 struct sa11x0_dma_chan *c)
165 struct sa11x0_dma_desc *txd = p->txd_load;
166 struct sa11x0_dma_sg *sg;
167 void __iomem *base = p->base;
174 dcsr = readl_relaxed(base + DMA_DCSR_R);
176 /* Don't try to load the next transfer if both buffers are started */
177 if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
180 if (p->sg_load == txd->sglen) {
181 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
184 * We have reached the end of the current descriptor.
185 * Peek at the next descriptor, and if compatible with
186 * the current, start processing it.
188 if (txn && txn->ddar == txd->ddar) {
190 sa11x0_dma_start_desc(p, txn);
197 sg = &txd->sg[p->sg_load++];
199 /* Select buffer to load according to channel status */
200 if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
201 ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
204 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
208 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
211 writel_relaxed(sg->addr, base + dbsx);
212 writel_relaxed(sg->len, base + dbtx);
213 writel(dcsr, base + DMA_DCSR_S);
215 dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
217 'A' + (dbsx == DMA_DBSB), sg->addr,
218 'A' + (dbtx == DMA_DBTB), sg->len);
221 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
222 struct sa11x0_dma_chan *c)
224 struct sa11x0_dma_desc *txd = p->txd_done;
226 if (++p->sg_done == txd->sglen) {
227 vchan_cookie_complete(&txd->vd);
230 p->txd_done = p->txd_load;
233 tasklet_schedule(&p->dev->task);
236 sa11x0_dma_start_sg(p, c);
239 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
241 struct sa11x0_dma_phy *p = dev_id;
242 struct sa11x0_dma_dev *d = p->dev;
243 struct sa11x0_dma_chan *c;
246 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
247 if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
250 /* Clear reported status bits */
251 writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
252 p->base + DMA_DCSR_C);
254 dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
256 if (dcsr & DCSR_ERROR) {
257 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
259 readl_relaxed(p->base + DMA_DDAR),
260 readl_relaxed(p->base + DMA_DBSA),
261 readl_relaxed(p->base + DMA_DBTA),
262 readl_relaxed(p->base + DMA_DBSB),
263 readl_relaxed(p->base + DMA_DBTB));
270 spin_lock_irqsave(&c->vc.lock, flags);
272 * Now that we're holding the lock, check that the vchan
273 * really is associated with this pchan before touching the
274 * hardware. This should always succeed, because we won't
275 * change p->vchan or c->phy while the channel is actively
279 if (dcsr & DCSR_DONEA)
280 sa11x0_dma_complete(p, c);
281 if (dcsr & DCSR_DONEB)
282 sa11x0_dma_complete(p, c);
284 spin_unlock_irqrestore(&c->vc.lock, flags);
290 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
292 struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
294 /* If the issued list is empty, we have no further txds to process */
296 struct sa11x0_dma_phy *p = c->phy;
298 sa11x0_dma_start_desc(p, txd);
302 /* The channel should not have any transfers started */
303 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
304 (DCSR_STRTA | DCSR_STRTB));
306 /* Clear the run and start bits before changing DDAR */
307 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
308 p->base + DMA_DCSR_C);
309 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
311 /* Try to start both buffers */
312 sa11x0_dma_start_sg(p, c);
313 sa11x0_dma_start_sg(p, c);
317 static void sa11x0_dma_tasklet(unsigned long arg)
319 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
320 struct sa11x0_dma_phy *p;
321 struct sa11x0_dma_chan *c;
322 unsigned pch, pch_alloc = 0;
324 dev_dbg(d->slave.dev, "tasklet enter\n");
326 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
327 spin_lock_irq(&c->vc.lock);
329 if (p && !p->txd_done) {
330 sa11x0_dma_start_txd(c);
332 /* No current txd associated with this channel */
333 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
335 /* Mark this channel free */
340 spin_unlock_irq(&c->vc.lock);
343 spin_lock_irq(&d->lock);
344 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
347 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
348 c = list_first_entry(&d->chan_pending,
349 struct sa11x0_dma_chan, node);
350 list_del_init(&c->node);
352 pch_alloc |= 1 << pch;
354 /* Mark this channel allocated */
357 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
360 spin_unlock_irq(&d->lock);
362 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
363 if (pch_alloc & (1 << pch)) {
367 spin_lock_irq(&c->vc.lock);
370 sa11x0_dma_start_txd(c);
371 spin_unlock_irq(&c->vc.lock);
375 dev_dbg(d->slave.dev, "tasklet exit\n");
379 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
384 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
386 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
387 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
390 spin_lock_irqsave(&d->lock, flags);
391 list_del_init(&c->node);
392 spin_unlock_irqrestore(&d->lock, flags);
394 vchan_free_chan_resources(&c->vc);
397 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
402 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
404 if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
405 (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
410 return readl_relaxed(p->base + reg);
413 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
414 dma_cookie_t cookie, struct dma_tx_state *state)
416 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
417 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
418 struct sa11x0_dma_phy *p;
419 struct virt_dma_desc *vd;
423 ret = dma_cookie_status(&c->vc.chan, cookie, state);
424 if (ret == DMA_SUCCESS)
430 spin_lock_irqsave(&c->vc.lock, flags);
434 * If the cookie is on our issue queue, then the residue is
437 vd = vchan_find_desc(&c->vc, cookie);
439 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
443 struct sa11x0_dma_desc *txd;
446 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
448 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
455 dma_addr_t addr = sa11x0_dma_pos(p);
458 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
460 for (i = 0; i < txd->sglen; i++) {
461 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
462 i, txd->sg[i].addr, txd->sg[i].len);
463 if (addr >= txd->sg[i].addr &&
464 addr < txd->sg[i].addr + txd->sg[i].len) {
467 len = txd->sg[i].len -
468 (addr - txd->sg[i].addr);
469 dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
476 for (; i < txd->sglen; i++) {
477 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
478 i, txd->sg[i].addr, txd->sg[i].len);
479 bytes += txd->sg[i].len;
482 state->residue = bytes;
484 spin_unlock_irqrestore(&c->vc.lock, flags);
486 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
492 * Move pending txds to the issued list, and re-init pending list.
493 * If not already pending, add this channel to the list of pending
494 * channels and trigger the tasklet to run.
496 static void sa11x0_dma_issue_pending(struct dma_chan *chan)
498 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
499 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
502 spin_lock_irqsave(&c->vc.lock, flags);
503 if (vchan_issue_pending(&c->vc)) {
506 if (list_empty(&c->node)) {
507 list_add_tail(&c->node, &d->chan_pending);
508 tasklet_schedule(&d->task);
509 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
511 spin_unlock(&d->lock);
514 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
515 spin_unlock_irqrestore(&c->vc.lock, flags);
518 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
519 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
520 enum dma_transfer_direction dir, unsigned long flags, void *context)
522 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
523 struct sa11x0_dma_desc *txd;
524 struct scatterlist *sgent;
525 unsigned i, j = sglen;
528 /* SA11x0 channels can only operate in their native direction */
529 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
530 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
531 &c->vc, c->ddar, dir);
535 /* Do not allow zero-sized txds */
539 for_each_sg(sg, sgent, sglen, i) {
540 dma_addr_t addr = sg_dma_address(sgent);
541 unsigned int len = sg_dma_len(sgent);
543 if (len > DMA_MAX_SIZE)
544 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
545 if (addr & DMA_ALIGN) {
546 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
552 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
554 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
559 for_each_sg(sg, sgent, sglen, i) {
560 dma_addr_t addr = sg_dma_address(sgent);
561 unsigned len = sg_dma_len(sgent);
569 * Check whether the transfer will fit. If not, try
570 * to split the transfer up such that we end up with
571 * equal chunks - but make sure that we preserve the
572 * alignment. This avoids small segments.
574 if (tlen > DMA_MAX_SIZE) {
575 unsigned mult = DIV_ROUND_UP(tlen,
576 DMA_MAX_SIZE & ~DMA_ALIGN);
578 tlen = (tlen / mult) & ~DMA_ALIGN;
581 txd->sg[j].addr = addr;
582 txd->sg[j].len = tlen;
594 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
595 &c->vc, &txd->vd, txd->size, txd->sglen);
597 return vchan_tx_prep(&c->vc, &txd->vd, flags);
600 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
602 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
604 enum dma_slave_buswidth width;
607 if (ddar & DDAR_RW) {
608 addr = cfg->src_addr;
609 width = cfg->src_addr_width;
610 maxburst = cfg->src_maxburst;
612 addr = cfg->dst_addr;
613 width = cfg->dst_addr_width;
614 maxburst = cfg->dst_maxburst;
617 if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
618 width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
619 (maxburst != 4 && maxburst != 8))
622 if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
627 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
628 &c->vc, addr, width, maxburst);
630 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
635 static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
638 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
639 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
640 struct sa11x0_dma_phy *p;
646 case DMA_SLAVE_CONFIG:
647 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
649 case DMA_TERMINATE_ALL:
650 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
651 /* Clear the tx descriptor lists */
652 spin_lock_irqsave(&c->vc.lock, flags);
653 vchan_get_all_descriptors(&c->vc, &head);
657 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
658 /* vchan is assigned to a pchan - stop the channel */
659 writel(DCSR_RUN | DCSR_IE |
660 DCSR_STRTA | DCSR_DONEA |
661 DCSR_STRTB | DCSR_DONEB,
662 p->base + DMA_DCSR_C);
665 if (p->txd_load != p->txd_done)
666 list_add_tail(&p->txd_load->vd.node, &head);
670 list_add_tail(&p->txd_done->vd.node, &head);
676 spin_unlock(&d->lock);
677 tasklet_schedule(&d->task);
679 spin_unlock_irqrestore(&c->vc.lock, flags);
680 vchan_dma_desc_free_list(&c->vc, &head);
685 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
686 spin_lock_irqsave(&c->vc.lock, flags);
687 if (c->status == DMA_IN_PROGRESS) {
688 c->status = DMA_PAUSED;
692 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
695 list_del_init(&c->node);
696 spin_unlock(&d->lock);
699 spin_unlock_irqrestore(&c->vc.lock, flags);
704 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
705 spin_lock_irqsave(&c->vc.lock, flags);
706 if (c->status == DMA_PAUSED) {
707 c->status = DMA_IN_PROGRESS;
711 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
712 } else if (!list_empty(&c->vc.desc_issued)) {
714 list_add_tail(&c->node, &d->chan_pending);
715 spin_unlock(&d->lock);
718 spin_unlock_irqrestore(&c->vc.lock, flags);
730 struct sa11x0_dma_channel_desc {
735 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
736 static const struct sa11x0_dma_channel_desc chan_desc[] = {
738 CD(Ser0UDCRc, DDAR_RW),
740 CD(Ser1SDLCRc, DDAR_RW),
742 CD(Ser1UARTRc, DDAR_RW),
744 CD(Ser2ICPRc, DDAR_RW),
746 CD(Ser3UARTRc, DDAR_RW),
748 CD(Ser4MCP0Rc, DDAR_RW),
750 CD(Ser4MCP1Rc, DDAR_RW),
752 CD(Ser4SSPRc, DDAR_RW),
755 static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
760 dmadev->chancnt = ARRAY_SIZE(chan_desc);
761 INIT_LIST_HEAD(&dmadev->channels);
763 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
764 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
765 dmadev->device_control = sa11x0_dma_control;
766 dmadev->device_tx_status = sa11x0_dma_tx_status;
767 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
769 for (i = 0; i < dmadev->chancnt; i++) {
770 struct sa11x0_dma_chan *c;
772 c = kzalloc(sizeof(*c), GFP_KERNEL);
774 dev_err(dev, "no memory for channel %u\n", i);
778 c->status = DMA_IN_PROGRESS;
779 c->ddar = chan_desc[i].ddar;
780 c->name = chan_desc[i].name;
781 INIT_LIST_HEAD(&c->node);
783 c->vc.desc_free = sa11x0_dma_free_desc;
784 vchan_init(&c->vc, dmadev);
787 return dma_async_device_register(dmadev);
790 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
793 int irq = platform_get_irq(pdev, nr);
798 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
801 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
804 int irq = platform_get_irq(pdev, nr);
809 static void sa11x0_dma_free_channels(struct dma_device *dmadev)
811 struct sa11x0_dma_chan *c, *cn;
813 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
814 list_del(&c->vc.chan.device_node);
815 tasklet_kill(&c->vc.task);
820 static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
822 struct sa11x0_dma_dev *d;
823 struct resource *res;
827 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
831 d = kzalloc(sizeof(*d), GFP_KERNEL);
837 spin_lock_init(&d->lock);
838 INIT_LIST_HEAD(&d->chan_pending);
840 d->base = ioremap(res->start, resource_size(res));
846 tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
848 for (i = 0; i < NR_PHY_CHAN; i++) {
849 struct sa11x0_dma_phy *p = &d->phy[i];
853 p->base = d->base + i * DMA_SIZE;
854 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
855 DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
856 p->base + DMA_DCSR_C);
857 writel_relaxed(0, p->base + DMA_DDAR);
859 ret = sa11x0_dma_request_irq(pdev, i, p);
863 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
869 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
870 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
871 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
873 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
878 platform_set_drvdata(pdev, d);
882 sa11x0_dma_free_channels(&d->slave);
883 for (i = 0; i < NR_PHY_CHAN; i++)
884 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
886 tasklet_kill(&d->task);
894 static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
896 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
899 dma_async_device_unregister(&d->slave);
901 sa11x0_dma_free_channels(&d->slave);
902 for (pch = 0; pch < NR_PHY_CHAN; pch++)
903 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
904 tasklet_kill(&d->task);
911 #ifdef CONFIG_PM_SLEEP
912 static int sa11x0_dma_suspend(struct device *dev)
914 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
917 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
918 struct sa11x0_dma_phy *p = &d->phy[pch];
919 u32 dcsr, saved_dcsr;
921 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
922 if (dcsr & DCSR_RUN) {
923 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
924 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
927 saved_dcsr &= DCSR_RUN | DCSR_IE;
928 if (dcsr & DCSR_BIU) {
929 p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
930 p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
931 p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
932 p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
933 saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
934 (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
936 p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
937 p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
938 p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
939 p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
940 saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
942 p->dcsr = saved_dcsr;
944 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
950 static int sa11x0_dma_resume(struct device *dev)
952 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
955 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
956 struct sa11x0_dma_phy *p = &d->phy[pch];
957 struct sa11x0_dma_desc *txd = NULL;
958 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
960 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
964 else if (p->txd_load)
970 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
972 writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
973 writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
974 writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
975 writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
976 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
983 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
984 .suspend_noirq = sa11x0_dma_suspend,
985 .resume_noirq = sa11x0_dma_resume,
986 .freeze_noirq = sa11x0_dma_suspend,
987 .thaw_noirq = sa11x0_dma_resume,
988 .poweroff_noirq = sa11x0_dma_suspend,
989 .restore_noirq = sa11x0_dma_resume,
992 static struct platform_driver sa11x0_dma_driver = {
994 .name = "sa11x0-dma",
995 .owner = THIS_MODULE,
996 .pm = &sa11x0_dma_pm_ops,
998 .probe = sa11x0_dma_probe,
999 .remove = __devexit_p(sa11x0_dma_remove),
1002 bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1004 if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1005 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1006 const char *p = param;
1008 return !strcmp(c->name, p);
1012 EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1014 static int __init sa11x0_dma_init(void)
1016 return platform_driver_register(&sa11x0_dma_driver);
1018 subsys_initcall(sa11x0_dma_init);
1020 static void __exit sa11x0_dma_exit(void)
1022 platform_driver_unregister(&sa11x0_dma_driver);
1024 module_exit(sa11x0_dma_exit);
1026 MODULE_AUTHOR("Russell King");
1027 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1028 MODULE_LICENSE("GPL v2");
1029 MODULE_ALIAS("platform:sa11x0-dma");