db4fcbd80c7ddc252ff69fc113b0d9ef94164d98
[pandora-kernel.git] / drivers / dma / sa11x0-dma.c
1 /*
2  * SA11x0 DMAengine support
3  *
4  * Copyright (C) 2012 Russell King
5  *   Derived in part from arch/arm/mach-sa1100/dma.c,
6  *   Copyright (C) 2000, 2001 by Nicolas Pitre
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sa11x0-dma.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23
24 #include "virt-dma.h"
25
26 #define NR_PHY_CHAN     6
27 #define DMA_ALIGN       3
28 #define DMA_MAX_SIZE    0x1fff
29 #define DMA_CHUNK_SIZE  0x1000
30
31 #define DMA_DDAR        0x00
32 #define DMA_DCSR_S      0x04
33 #define DMA_DCSR_C      0x08
34 #define DMA_DCSR_R      0x0c
35 #define DMA_DBSA        0x10
36 #define DMA_DBTA        0x14
37 #define DMA_DBSB        0x18
38 #define DMA_DBTB        0x1c
39 #define DMA_SIZE        0x20
40
41 #define DCSR_RUN        (1 << 0)
42 #define DCSR_IE         (1 << 1)
43 #define DCSR_ERROR      (1 << 2)
44 #define DCSR_DONEA      (1 << 3)
45 #define DCSR_STRTA      (1 << 4)
46 #define DCSR_DONEB      (1 << 5)
47 #define DCSR_STRTB      (1 << 6)
48 #define DCSR_BIU        (1 << 7)
49
50 #define DDAR_RW         (1 << 0)        /* 0 = W, 1 = R */
51 #define DDAR_E          (1 << 1)        /* 0 = LE, 1 = BE */
52 #define DDAR_BS         (1 << 2)        /* 0 = BS4, 1 = BS8 */
53 #define DDAR_DW         (1 << 3)        /* 0 = 8b, 1 = 16b */
54 #define DDAR_Ser0UDCTr  (0x0 << 4)
55 #define DDAR_Ser0UDCRc  (0x1 << 4)
56 #define DDAR_Ser1SDLCTr (0x2 << 4)
57 #define DDAR_Ser1SDLCRc (0x3 << 4)
58 #define DDAR_Ser1UARTTr (0x4 << 4)
59 #define DDAR_Ser1UARTRc (0x5 << 4)
60 #define DDAR_Ser2ICPTr  (0x6 << 4)
61 #define DDAR_Ser2ICPRc  (0x7 << 4)
62 #define DDAR_Ser3UARTTr (0x8 << 4)
63 #define DDAR_Ser3UARTRc (0x9 << 4)
64 #define DDAR_Ser4MCP0Tr (0xa << 4)
65 #define DDAR_Ser4MCP0Rc (0xb << 4)
66 #define DDAR_Ser4MCP1Tr (0xc << 4)
67 #define DDAR_Ser4MCP1Rc (0xd << 4)
68 #define DDAR_Ser4SSPTr  (0xe << 4)
69 #define DDAR_Ser4SSPRc  (0xf << 4)
70
71 struct sa11x0_dma_sg {
72         u32                     addr;
73         u32                     len;
74 };
75
76 struct sa11x0_dma_desc {
77         struct virt_dma_desc    vd;
78
79         u32                     ddar;
80         size_t                  size;
81
82         unsigned                sglen;
83         struct sa11x0_dma_sg    sg[0];
84 };
85
86 struct sa11x0_dma_phy;
87
88 struct sa11x0_dma_chan {
89         struct virt_dma_chan    vc;
90
91         /* protected by c->vc.lock */
92         struct sa11x0_dma_phy   *phy;
93         enum dma_status         status;
94
95         /* protected by d->lock */
96         struct list_head        node;
97
98         u32                     ddar;
99         const char              *name;
100 };
101
102 struct sa11x0_dma_phy {
103         void __iomem            *base;
104         struct sa11x0_dma_dev   *dev;
105         unsigned                num;
106
107         struct sa11x0_dma_chan  *vchan;
108
109         /* Protected by c->vc.lock */
110         unsigned                sg_load;
111         struct sa11x0_dma_desc  *txd_load;
112         unsigned                sg_done;
113         struct sa11x0_dma_desc  *txd_done;
114 #ifdef CONFIG_PM_SLEEP
115         u32                     dbs[2];
116         u32                     dbt[2];
117         u32                     dcsr;
118 #endif
119 };
120
121 struct sa11x0_dma_dev {
122         struct dma_device       slave;
123         void __iomem            *base;
124         spinlock_t              lock;
125         struct tasklet_struct   task;
126         struct list_head        chan_pending;
127         struct sa11x0_dma_phy   phy[NR_PHY_CHAN];
128 };
129
130 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
131 {
132         return container_of(chan, struct sa11x0_dma_chan, vc.chan);
133 }
134
135 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
136 {
137         return container_of(dmadev, struct sa11x0_dma_dev, slave);
138 }
139
140 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
141 {
142         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
143
144         return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
145 }
146
147 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
148 {
149         kfree(container_of(vd, struct sa11x0_dma_desc, vd));
150 }
151
152 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
153 {
154         list_del(&txd->vd.node);
155         p->txd_load = txd;
156         p->sg_load = 0;
157
158         dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
159                 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
160 }
161
162 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
163         struct sa11x0_dma_chan *c)
164 {
165         struct sa11x0_dma_desc *txd = p->txd_load;
166         struct sa11x0_dma_sg *sg;
167         void __iomem *base = p->base;
168         unsigned dbsx, dbtx;
169         u32 dcsr;
170
171         if (!txd)
172                 return;
173
174         dcsr = readl_relaxed(base + DMA_DCSR_R);
175
176         /* Don't try to load the next transfer if both buffers are started */
177         if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
178                 return;
179
180         if (p->sg_load == txd->sglen) {
181                 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
182
183                 /*
184                  * We have reached the end of the current descriptor.
185                  * Peek at the next descriptor, and if compatible with
186                  * the current, start processing it.
187                  */
188                 if (txn && txn->ddar == txd->ddar) {
189                         txd = txn;
190                         sa11x0_dma_start_desc(p, txn);
191                 } else {
192                         p->txd_load = NULL;
193                         return;
194                 }
195         }
196
197         sg = &txd->sg[p->sg_load++];
198
199         /* Select buffer to load according to channel status */
200         if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
201             ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
202                 dbsx = DMA_DBSA;
203                 dbtx = DMA_DBTA;
204                 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
205         } else {
206                 dbsx = DMA_DBSB;
207                 dbtx = DMA_DBTB;
208                 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
209         }
210
211         writel_relaxed(sg->addr, base + dbsx);
212         writel_relaxed(sg->len, base + dbtx);
213         writel(dcsr, base + DMA_DCSR_S);
214
215         dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
216                 p->num, dcsr,
217                 'A' + (dbsx == DMA_DBSB), sg->addr,
218                 'A' + (dbtx == DMA_DBTB), sg->len);
219 }
220
221 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
222         struct sa11x0_dma_chan *c)
223 {
224         struct sa11x0_dma_desc *txd = p->txd_done;
225
226         if (++p->sg_done == txd->sglen) {
227                 vchan_cookie_complete(&txd->vd);
228
229                 p->sg_done = 0;
230                 p->txd_done = p->txd_load;
231
232                 if (!p->txd_done)
233                         tasklet_schedule(&p->dev->task);
234         }
235
236         sa11x0_dma_start_sg(p, c);
237 }
238
239 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
240 {
241         struct sa11x0_dma_phy *p = dev_id;
242         struct sa11x0_dma_dev *d = p->dev;
243         struct sa11x0_dma_chan *c;
244         u32 dcsr;
245
246         dcsr = readl_relaxed(p->base + DMA_DCSR_R);
247         if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
248                 return IRQ_NONE;
249
250         /* Clear reported status bits */
251         writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
252                 p->base + DMA_DCSR_C);
253
254         dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
255
256         if (dcsr & DCSR_ERROR) {
257                 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
258                         p->num, dcsr,
259                         readl_relaxed(p->base + DMA_DDAR),
260                         readl_relaxed(p->base + DMA_DBSA),
261                         readl_relaxed(p->base + DMA_DBTA),
262                         readl_relaxed(p->base + DMA_DBSB),
263                         readl_relaxed(p->base + DMA_DBTB));
264         }
265
266         c = p->vchan;
267         if (c) {
268                 unsigned long flags;
269
270                 spin_lock_irqsave(&c->vc.lock, flags);
271                 /*
272                  * Now that we're holding the lock, check that the vchan
273                  * really is associated with this pchan before touching the
274                  * hardware.  This should always succeed, because we won't
275                  * change p->vchan or c->phy while the channel is actively
276                  * transferring.
277                  */
278                 if (c->phy == p) {
279                         if (dcsr & DCSR_DONEA)
280                                 sa11x0_dma_complete(p, c);
281                         if (dcsr & DCSR_DONEB)
282                                 sa11x0_dma_complete(p, c);
283                 }
284                 spin_unlock_irqrestore(&c->vc.lock, flags);
285         }
286
287         return IRQ_HANDLED;
288 }
289
290 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
291 {
292         struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
293
294         /* If the issued list is empty, we have no further txds to process */
295         if (txd) {
296                 struct sa11x0_dma_phy *p = c->phy;
297
298                 sa11x0_dma_start_desc(p, txd);
299                 p->txd_done = txd;
300                 p->sg_done = 0;
301
302                 /* The channel should not have any transfers started */
303                 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
304                                       (DCSR_STRTA | DCSR_STRTB));
305
306                 /* Clear the run and start bits before changing DDAR */
307                 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
308                                p->base + DMA_DCSR_C);
309                 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
310
311                 /* Try to start both buffers */
312                 sa11x0_dma_start_sg(p, c);
313                 sa11x0_dma_start_sg(p, c);
314         }
315 }
316
317 static void sa11x0_dma_tasklet(unsigned long arg)
318 {
319         struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
320         struct sa11x0_dma_phy *p;
321         struct sa11x0_dma_chan *c;
322         unsigned pch, pch_alloc = 0;
323
324         dev_dbg(d->slave.dev, "tasklet enter\n");
325
326         list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
327                 spin_lock_irq(&c->vc.lock);
328                 p = c->phy;
329                 if (p && !p->txd_done) {
330                         sa11x0_dma_start_txd(c);
331                         if (!p->txd_done) {
332                                 /* No current txd associated with this channel */
333                                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
334
335                                 /* Mark this channel free */
336                                 c->phy = NULL;
337                                 p->vchan = NULL;
338                         }
339                 }
340                 spin_unlock_irq(&c->vc.lock);
341         }
342
343         spin_lock_irq(&d->lock);
344         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
345                 p = &d->phy[pch];
346
347                 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
348                         c = list_first_entry(&d->chan_pending,
349                                 struct sa11x0_dma_chan, node);
350                         list_del_init(&c->node);
351
352                         pch_alloc |= 1 << pch;
353
354                         /* Mark this channel allocated */
355                         p->vchan = c;
356
357                         dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
358                 }
359         }
360         spin_unlock_irq(&d->lock);
361
362         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
363                 if (pch_alloc & (1 << pch)) {
364                         p = &d->phy[pch];
365                         c = p->vchan;
366
367                         spin_lock_irq(&c->vc.lock);
368                         c->phy = p;
369
370                         sa11x0_dma_start_txd(c);
371                         spin_unlock_irq(&c->vc.lock);
372                 }
373         }
374
375         dev_dbg(d->slave.dev, "tasklet exit\n");
376 }
377
378
379 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
380 {
381         return 0;
382 }
383
384 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
385 {
386         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
387         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
388         unsigned long flags;
389
390         spin_lock_irqsave(&d->lock, flags);
391         list_del_init(&c->node);
392         spin_unlock_irqrestore(&d->lock, flags);
393
394         vchan_free_chan_resources(&c->vc);
395 }
396
397 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
398 {
399         unsigned reg;
400         u32 dcsr;
401
402         dcsr = readl_relaxed(p->base + DMA_DCSR_R);
403
404         if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
405             (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
406                 reg = DMA_DBSA;
407         else
408                 reg = DMA_DBSB;
409
410         return readl_relaxed(p->base + reg);
411 }
412
413 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
414         dma_cookie_t cookie, struct dma_tx_state *state)
415 {
416         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
417         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
418         struct sa11x0_dma_phy *p;
419         struct virt_dma_desc *vd;
420         unsigned long flags;
421         enum dma_status ret;
422
423         ret = dma_cookie_status(&c->vc.chan, cookie, state);
424         if (ret == DMA_SUCCESS)
425                 return ret;
426
427         if (!state)
428                 return c->status;
429
430         spin_lock_irqsave(&c->vc.lock, flags);
431         p = c->phy;
432
433         /*
434          * If the cookie is on our issue queue, then the residue is
435          * its total size.
436          */
437         vd = vchan_find_desc(&c->vc, cookie);
438         if (vd) {
439                 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
440         } else if (!p) {
441                 state->residue = 0;
442         } else {
443                 struct sa11x0_dma_desc *txd;
444                 size_t bytes = 0;
445
446                 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
447                         txd = p->txd_done;
448                 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
449                         txd = p->txd_load;
450                 else
451                         txd = NULL;
452
453                 ret = c->status;
454                 if (txd) {
455                         dma_addr_t addr = sa11x0_dma_pos(p);
456                         unsigned i;
457
458                         dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
459
460                         for (i = 0; i < txd->sglen; i++) {
461                                 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
462                                         i, txd->sg[i].addr, txd->sg[i].len);
463                                 if (addr >= txd->sg[i].addr &&
464                                     addr < txd->sg[i].addr + txd->sg[i].len) {
465                                         unsigned len;
466
467                                         len = txd->sg[i].len -
468                                                 (addr - txd->sg[i].addr);
469                                         dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
470                                                 i, len);
471                                         bytes += len;
472                                         i++;
473                                         break;
474                                 }
475                         }
476                         for (; i < txd->sglen; i++) {
477                                 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
478                                         i, txd->sg[i].addr, txd->sg[i].len);
479                                 bytes += txd->sg[i].len;
480                         }
481                 }
482                 state->residue = bytes;
483         }
484         spin_unlock_irqrestore(&c->vc.lock, flags);
485
486         dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
487
488         return ret;
489 }
490
491 /*
492  * Move pending txds to the issued list, and re-init pending list.
493  * If not already pending, add this channel to the list of pending
494  * channels and trigger the tasklet to run.
495  */
496 static void sa11x0_dma_issue_pending(struct dma_chan *chan)
497 {
498         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
499         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
500         unsigned long flags;
501
502         spin_lock_irqsave(&c->vc.lock, flags);
503         if (vchan_issue_pending(&c->vc)) {
504                 if (!c->phy) {
505                         spin_lock(&d->lock);
506                         if (list_empty(&c->node)) {
507                                 list_add_tail(&c->node, &d->chan_pending);
508                                 tasklet_schedule(&d->task);
509                                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
510                         }
511                         spin_unlock(&d->lock);
512                 }
513         } else
514                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
515         spin_unlock_irqrestore(&c->vc.lock, flags);
516 }
517
518 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
519         struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
520         enum dma_transfer_direction dir, unsigned long flags, void *context)
521 {
522         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
523         struct sa11x0_dma_desc *txd;
524         struct scatterlist *sgent;
525         unsigned i, j = sglen;
526         size_t size = 0;
527
528         /* SA11x0 channels can only operate in their native direction */
529         if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
530                 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
531                         &c->vc, c->ddar, dir);
532                 return NULL;
533         }
534
535         /* Do not allow zero-sized txds */
536         if (sglen == 0)
537                 return NULL;
538
539         for_each_sg(sg, sgent, sglen, i) {
540                 dma_addr_t addr = sg_dma_address(sgent);
541                 unsigned int len = sg_dma_len(sgent);
542
543                 if (len > DMA_MAX_SIZE)
544                         j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
545                 if (addr & DMA_ALIGN) {
546                         dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
547                                 &c->vc, addr);
548                         return NULL;
549                 }
550         }
551
552         txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
553         if (!txd) {
554                 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
555                 return NULL;
556         }
557
558         j = 0;
559         for_each_sg(sg, sgent, sglen, i) {
560                 dma_addr_t addr = sg_dma_address(sgent);
561                 unsigned len = sg_dma_len(sgent);
562
563                 size += len;
564
565                 do {
566                         unsigned tlen = len;
567
568                         /*
569                          * Check whether the transfer will fit.  If not, try
570                          * to split the transfer up such that we end up with
571                          * equal chunks - but make sure that we preserve the
572                          * alignment.  This avoids small segments.
573                          */
574                         if (tlen > DMA_MAX_SIZE) {
575                                 unsigned mult = DIV_ROUND_UP(tlen,
576                                         DMA_MAX_SIZE & ~DMA_ALIGN);
577
578                                 tlen = (tlen / mult) & ~DMA_ALIGN;
579                         }
580
581                         txd->sg[j].addr = addr;
582                         txd->sg[j].len = tlen;
583
584                         addr += tlen;
585                         len -= tlen;
586                         j++;
587                 } while (len);
588         }
589
590         txd->ddar = c->ddar;
591         txd->size = size;
592         txd->sglen = j;
593
594         dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
595                 &c->vc, &txd->vd, txd->size, txd->sglen);
596
597         return vchan_tx_prep(&c->vc, &txd->vd, flags);
598 }
599
600 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
601 {
602         u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
603         dma_addr_t addr;
604         enum dma_slave_buswidth width;
605         u32 maxburst;
606
607         if (ddar & DDAR_RW) {
608                 addr = cfg->src_addr;
609                 width = cfg->src_addr_width;
610                 maxburst = cfg->src_maxburst;
611         } else {
612                 addr = cfg->dst_addr;
613                 width = cfg->dst_addr_width;
614                 maxburst = cfg->dst_maxburst;
615         }
616
617         if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
618              width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
619             (maxburst != 4 && maxburst != 8))
620                 return -EINVAL;
621
622         if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
623                 ddar |= DDAR_DW;
624         if (maxburst == 8)
625                 ddar |= DDAR_BS;
626
627         dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
628                 &c->vc, addr, width, maxburst);
629
630         c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
631
632         return 0;
633 }
634
635 static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
636         unsigned long arg)
637 {
638         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
639         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
640         struct sa11x0_dma_phy *p;
641         LIST_HEAD(head);
642         unsigned long flags;
643         int ret;
644
645         switch (cmd) {
646         case DMA_SLAVE_CONFIG:
647                 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
648
649         case DMA_TERMINATE_ALL:
650                 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
651                 /* Clear the tx descriptor lists */
652                 spin_lock_irqsave(&c->vc.lock, flags);
653                 vchan_get_all_descriptors(&c->vc, &head);
654
655                 p = c->phy;
656                 if (p) {
657                         dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
658                         /* vchan is assigned to a pchan - stop the channel */
659                         writel(DCSR_RUN | DCSR_IE |
660                                 DCSR_STRTA | DCSR_DONEA |
661                                 DCSR_STRTB | DCSR_DONEB,
662                                 p->base + DMA_DCSR_C);
663
664                         if (p->txd_load) {
665                                 if (p->txd_load != p->txd_done)
666                                         list_add_tail(&p->txd_load->vd.node, &head);
667                                 p->txd_load = NULL;
668                         }
669                         if (p->txd_done) {
670                                 list_add_tail(&p->txd_done->vd.node, &head);
671                                 p->txd_done = NULL;
672                         }
673                         c->phy = NULL;
674                         spin_lock(&d->lock);
675                         p->vchan = NULL;
676                         spin_unlock(&d->lock);
677                         tasklet_schedule(&d->task);
678                 }
679                 spin_unlock_irqrestore(&c->vc.lock, flags);
680                 vchan_dma_desc_free_list(&c->vc, &head);
681                 ret = 0;
682                 break;
683
684         case DMA_PAUSE:
685                 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
686                 spin_lock_irqsave(&c->vc.lock, flags);
687                 if (c->status == DMA_IN_PROGRESS) {
688                         c->status = DMA_PAUSED;
689
690                         p = c->phy;
691                         if (p) {
692                                 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
693                         } else {
694                                 spin_lock(&d->lock);
695                                 list_del_init(&c->node);
696                                 spin_unlock(&d->lock);
697                         }
698                 }
699                 spin_unlock_irqrestore(&c->vc.lock, flags);
700                 ret = 0;
701                 break;
702
703         case DMA_RESUME:
704                 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
705                 spin_lock_irqsave(&c->vc.lock, flags);
706                 if (c->status == DMA_PAUSED) {
707                         c->status = DMA_IN_PROGRESS;
708
709                         p = c->phy;
710                         if (p) {
711                                 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
712                         } else if (!list_empty(&c->vc.desc_issued)) {
713                                 spin_lock(&d->lock);
714                                 list_add_tail(&c->node, &d->chan_pending);
715                                 spin_unlock(&d->lock);
716                         }
717                 }
718                 spin_unlock_irqrestore(&c->vc.lock, flags);
719                 ret = 0;
720                 break;
721
722         default:
723                 ret = -ENXIO;
724                 break;
725         }
726
727         return ret;
728 }
729
730 struct sa11x0_dma_channel_desc {
731         u32 ddar;
732         const char *name;
733 };
734
735 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
736 static const struct sa11x0_dma_channel_desc chan_desc[] = {
737         CD(Ser0UDCTr, 0),
738         CD(Ser0UDCRc, DDAR_RW),
739         CD(Ser1SDLCTr, 0),
740         CD(Ser1SDLCRc, DDAR_RW),
741         CD(Ser1UARTTr, 0),
742         CD(Ser1UARTRc, DDAR_RW),
743         CD(Ser2ICPTr, 0),
744         CD(Ser2ICPRc, DDAR_RW),
745         CD(Ser3UARTTr, 0),
746         CD(Ser3UARTRc, DDAR_RW),
747         CD(Ser4MCP0Tr, 0),
748         CD(Ser4MCP0Rc, DDAR_RW),
749         CD(Ser4MCP1Tr, 0),
750         CD(Ser4MCP1Rc, DDAR_RW),
751         CD(Ser4SSPTr, 0),
752         CD(Ser4SSPRc, DDAR_RW),
753 };
754
755 static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
756         struct device *dev)
757 {
758         unsigned i;
759
760         dmadev->chancnt = ARRAY_SIZE(chan_desc);
761         INIT_LIST_HEAD(&dmadev->channels);
762         dmadev->dev = dev;
763         dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
764         dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
765         dmadev->device_control = sa11x0_dma_control;
766         dmadev->device_tx_status = sa11x0_dma_tx_status;
767         dmadev->device_issue_pending = sa11x0_dma_issue_pending;
768
769         for (i = 0; i < dmadev->chancnt; i++) {
770                 struct sa11x0_dma_chan *c;
771
772                 c = kzalloc(sizeof(*c), GFP_KERNEL);
773                 if (!c) {
774                         dev_err(dev, "no memory for channel %u\n", i);
775                         return -ENOMEM;
776                 }
777
778                 c->status = DMA_IN_PROGRESS;
779                 c->ddar = chan_desc[i].ddar;
780                 c->name = chan_desc[i].name;
781                 INIT_LIST_HEAD(&c->node);
782
783                 c->vc.desc_free = sa11x0_dma_free_desc;
784                 vchan_init(&c->vc, dmadev);
785         }
786
787         return dma_async_device_register(dmadev);
788 }
789
790 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
791         void *data)
792 {
793         int irq = platform_get_irq(pdev, nr);
794
795         if (irq <= 0)
796                 return -ENXIO;
797
798         return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
799 }
800
801 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
802         void *data)
803 {
804         int irq = platform_get_irq(pdev, nr);
805         if (irq > 0)
806                 free_irq(irq, data);
807 }
808
809 static void sa11x0_dma_free_channels(struct dma_device *dmadev)
810 {
811         struct sa11x0_dma_chan *c, *cn;
812
813         list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
814                 list_del(&c->vc.chan.device_node);
815                 tasklet_kill(&c->vc.task);
816                 kfree(c);
817         }
818 }
819
820 static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
821 {
822         struct sa11x0_dma_dev *d;
823         struct resource *res;
824         unsigned i;
825         int ret;
826
827         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
828         if (!res)
829                 return -ENXIO;
830
831         d = kzalloc(sizeof(*d), GFP_KERNEL);
832         if (!d) {
833                 ret = -ENOMEM;
834                 goto err_alloc;
835         }
836
837         spin_lock_init(&d->lock);
838         INIT_LIST_HEAD(&d->chan_pending);
839
840         d->base = ioremap(res->start, resource_size(res));
841         if (!d->base) {
842                 ret = -ENOMEM;
843                 goto err_ioremap;
844         }
845
846         tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
847
848         for (i = 0; i < NR_PHY_CHAN; i++) {
849                 struct sa11x0_dma_phy *p = &d->phy[i];
850
851                 p->dev = d;
852                 p->num = i;
853                 p->base = d->base + i * DMA_SIZE;
854                 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
855                         DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
856                         p->base + DMA_DCSR_C);
857                 writel_relaxed(0, p->base + DMA_DDAR);
858
859                 ret = sa11x0_dma_request_irq(pdev, i, p);
860                 if (ret) {
861                         while (i) {
862                                 i--;
863                                 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
864                         }
865                         goto err_irq;
866                 }
867         }
868
869         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
870         d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
871         ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
872         if (ret) {
873                 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
874                         ret);
875                 goto err_slave_reg;
876         }
877
878         platform_set_drvdata(pdev, d);
879         return 0;
880
881  err_slave_reg:
882         sa11x0_dma_free_channels(&d->slave);
883         for (i = 0; i < NR_PHY_CHAN; i++)
884                 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
885  err_irq:
886         tasklet_kill(&d->task);
887         iounmap(d->base);
888  err_ioremap:
889         kfree(d);
890  err_alloc:
891         return ret;
892 }
893
894 static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
895 {
896         struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
897         unsigned pch;
898
899         dma_async_device_unregister(&d->slave);
900
901         sa11x0_dma_free_channels(&d->slave);
902         for (pch = 0; pch < NR_PHY_CHAN; pch++)
903                 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
904         tasklet_kill(&d->task);
905         iounmap(d->base);
906         kfree(d);
907
908         return 0;
909 }
910
911 #ifdef CONFIG_PM_SLEEP
912 static int sa11x0_dma_suspend(struct device *dev)
913 {
914         struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
915         unsigned pch;
916
917         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
918                 struct sa11x0_dma_phy *p = &d->phy[pch];
919                 u32 dcsr, saved_dcsr;
920
921                 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
922                 if (dcsr & DCSR_RUN) {
923                         writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
924                         dcsr = readl_relaxed(p->base + DMA_DCSR_R);
925                 }
926
927                 saved_dcsr &= DCSR_RUN | DCSR_IE;
928                 if (dcsr & DCSR_BIU) {
929                         p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
930                         p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
931                         p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
932                         p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
933                         saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
934                                       (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
935                 } else {
936                         p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
937                         p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
938                         p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
939                         p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
940                         saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
941                 }
942                 p->dcsr = saved_dcsr;
943
944                 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
945         }
946
947         return 0;
948 }
949
950 static int sa11x0_dma_resume(struct device *dev)
951 {
952         struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
953         unsigned pch;
954
955         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
956                 struct sa11x0_dma_phy *p = &d->phy[pch];
957                 struct sa11x0_dma_desc *txd = NULL;
958                 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
959
960                 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
961
962                 if (p->txd_done)
963                         txd = p->txd_done;
964                 else if (p->txd_load)
965                         txd = p->txd_load;
966
967                 if (!txd)
968                         continue;
969
970                 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
971
972                 writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
973                 writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
974                 writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
975                 writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
976                 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
977         }
978
979         return 0;
980 }
981 #endif
982
983 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
984         .suspend_noirq = sa11x0_dma_suspend,
985         .resume_noirq = sa11x0_dma_resume,
986         .freeze_noirq = sa11x0_dma_suspend,
987         .thaw_noirq = sa11x0_dma_resume,
988         .poweroff_noirq = sa11x0_dma_suspend,
989         .restore_noirq = sa11x0_dma_resume,
990 };
991
992 static struct platform_driver sa11x0_dma_driver = {
993         .driver = {
994                 .name   = "sa11x0-dma",
995                 .owner  = THIS_MODULE,
996                 .pm     = &sa11x0_dma_pm_ops,
997         },
998         .probe          = sa11x0_dma_probe,
999         .remove         = __devexit_p(sa11x0_dma_remove),
1000 };
1001
1002 bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1003 {
1004         if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1005                 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1006                 const char *p = param;
1007
1008                 return !strcmp(c->name, p);
1009         }
1010         return false;
1011 }
1012 EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1013
1014 static int __init sa11x0_dma_init(void)
1015 {
1016         return platform_driver_register(&sa11x0_dma_driver);
1017 }
1018 subsys_initcall(sa11x0_dma_init);
1019
1020 static void __exit sa11x0_dma_exit(void)
1021 {
1022         platform_driver_unregister(&sa11x0_dma_driver);
1023 }
1024 module_exit(sa11x0_dma_exit);
1025
1026 MODULE_AUTHOR("Russell King");
1027 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1028 MODULE_LICENSE("GPL v2");
1029 MODULE_ALIAS("platform:sa11x0-dma");