rtlwifi: rtl8192se: Fix gcc 4.7.x warning
[pandora-kernel.git] / drivers / dma / amba-pl08x.c
1 /*
2  * Copyright (c) 2006 ARM Ltd.
3  * Copyright (c) 2010 ST-Ericsson SA
4  *
5  * Author: Peter Pearse <peter.pearse@arm.com>
6  * Author: Linus Walleij <linus.walleij@stericsson.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc., 59
20  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
21  *
22  * The full GNU General Public License is in this distribution in the file
23  * called COPYING.
24  *
25  * Documentation: ARM DDI 0196G == PL080
26  * Documentation: ARM DDI 0218E == PL081
27  *
28  * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29  * channel.
30  *
31  * The PL080 has 8 channels available for simultaneous use, and the PL081
32  * has only two channels. So on these DMA controllers the number of channels
33  * and the number of incoming DMA signals are two totally different things.
34  * It is usually not possible to theoretically handle all physical signals,
35  * so a multiplexing scheme with possible denial of use is necessary.
36  *
37  * The PL080 has a dual bus master, PL081 has a single master.
38  *
39  * Memory to peripheral transfer may be visualized as
40  *      Get data from memory to DMAC
41  *      Until no data left
42  *              On burst request from peripheral
43  *                      Destination burst from DMAC to peripheral
44  *                      Clear burst request
45  *      Raise terminal count interrupt
46  *
47  * For peripherals with a FIFO:
48  * Source      burst size == half the depth of the peripheral FIFO
49  * Destination burst size == the depth of the peripheral FIFO
50  *
51  * (Bursts are irrelevant for mem to mem transfers - there are no burst
52  * signals, the DMA controller will simply facilitate its AHB master.)
53  *
54  * ASSUMES default (little) endianness for DMA transfers
55  *
56  * The PL08x has two flow control settings:
57  *  - DMAC flow control: the transfer size defines the number of transfers
58  *    which occur for the current LLI entry, and the DMAC raises TC at the
59  *    end of every LLI entry.  Observed behaviour shows the DMAC listening
60  *    to both the BREQ and SREQ signals (contrary to documented),
61  *    transferring data if either is active.  The LBREQ and LSREQ signals
62  *    are ignored.
63  *
64  *  - Peripheral flow control: the transfer size is ignored (and should be
65  *    zero).  The data is transferred from the current LLI entry, until
66  *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
67  *    will then move to the next LLI entry.
68  *
69  * Global TODO:
70  * - Break out common code from arch/arm/mach-s3c64xx and share
71  */
72 #include <linux/amba/bus.h>
73 #include <linux/amba/pl08x.h>
74 #include <linux/debugfs.h>
75 #include <linux/delay.h>
76 #include <linux/device.h>
77 #include <linux/dmaengine.h>
78 #include <linux/dmapool.h>
79 #include <linux/dma-mapping.h>
80 #include <linux/init.h>
81 #include <linux/interrupt.h>
82 #include <linux/module.h>
83 #include <linux/pm_runtime.h>
84 #include <linux/seq_file.h>
85 #include <linux/slab.h>
86 #include <asm/hardware/pl080.h>
87
88 #define DRIVER_NAME     "pl08xdmac"
89
90 static struct amba_driver pl08x_amba_driver;
91
92 /**
93  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
94  * @channels: the number of channels available in this variant
95  * @dualmaster: whether this version supports dual AHB masters or not.
96  */
97 struct vendor_data {
98         u8 channels;
99         bool dualmaster;
100 };
101
102 /*
103  * PL08X private data structures
104  * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
105  * start & end do not - their bus bit info is in cctl.  Also note that these
106  * are fixed 32-bit quantities.
107  */
108 struct pl08x_lli {
109         u32 src;
110         u32 dst;
111         u32 lli;
112         u32 cctl;
113 };
114
115 /**
116  * struct pl08x_driver_data - the local state holder for the PL08x
117  * @slave: slave engine for this instance
118  * @memcpy: memcpy engine for this instance
119  * @base: virtual memory base (remapped) for the PL08x
120  * @adev: the corresponding AMBA (PrimeCell) bus entry
121  * @vd: vendor data for this PL08x variant
122  * @pd: platform data passed in from the platform/machine
123  * @phy_chans: array of data for the physical channels
124  * @pool: a pool for the LLI descriptors
125  * @pool_ctr: counter of LLIs in the pool
126  * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
127  * fetches
128  * @mem_buses: set to indicate memory transfers on AHB2.
129  * @lock: a spinlock for this struct
130  */
131 struct pl08x_driver_data {
132         struct dma_device slave;
133         struct dma_device memcpy;
134         void __iomem *base;
135         struct amba_device *adev;
136         const struct vendor_data *vd;
137         struct pl08x_platform_data *pd;
138         struct pl08x_phy_chan *phy_chans;
139         struct dma_pool *pool;
140         int pool_ctr;
141         u8 lli_buses;
142         u8 mem_buses;
143         spinlock_t lock;
144 };
145
146 /*
147  * PL08X specific defines
148  */
149
150 /* Size (bytes) of each LLI buffer allocated for one transfer */
151 # define PL08X_LLI_TSFR_SIZE    0x2000
152
153 /* Maximum times we call dma_pool_alloc on this pool without freeing */
154 #define MAX_NUM_TSFR_LLIS       (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
155 #define PL08X_ALIGN             8
156
157 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
158 {
159         return container_of(chan, struct pl08x_dma_chan, chan);
160 }
161
162 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
163 {
164         return container_of(tx, struct pl08x_txd, tx);
165 }
166
167 /*
168  * Physical channel handling
169  */
170
171 /* Whether a certain channel is busy or not */
172 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
173 {
174         unsigned int val;
175
176         val = readl(ch->base + PL080_CH_CONFIG);
177         return val & PL080_CONFIG_ACTIVE;
178 }
179
180 /*
181  * Set the initial DMA register values i.e. those for the first LLI
182  * The next LLI pointer and the configuration interrupt bit have
183  * been set when the LLIs were constructed.  Poke them into the hardware
184  * and start the transfer.
185  */
186 static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
187         struct pl08x_txd *txd)
188 {
189         struct pl08x_driver_data *pl08x = plchan->host;
190         struct pl08x_phy_chan *phychan = plchan->phychan;
191         struct pl08x_lli *lli = &txd->llis_va[0];
192         u32 val;
193
194         plchan->at = txd;
195
196         /* Wait for channel inactive */
197         while (pl08x_phy_channel_busy(phychan))
198                 cpu_relax();
199
200         dev_vdbg(&pl08x->adev->dev,
201                 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
202                 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
203                 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
204                 txd->ccfg);
205
206         writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
207         writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
208         writel(lli->lli, phychan->base + PL080_CH_LLI);
209         writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
210         writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
211
212         /* Enable the DMA channel */
213         /* Do not access config register until channel shows as disabled */
214         while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
215                 cpu_relax();
216
217         /* Do not access config register until channel shows as inactive */
218         val = readl(phychan->base + PL080_CH_CONFIG);
219         while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
220                 val = readl(phychan->base + PL080_CH_CONFIG);
221
222         writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
223 }
224
225 /*
226  * Pause the channel by setting the HALT bit.
227  *
228  * For M->P transfers, pause the DMAC first and then stop the peripheral -
229  * the FIFO can only drain if the peripheral is still requesting data.
230  * (note: this can still timeout if the DMAC FIFO never drains of data.)
231  *
232  * For P->M transfers, disable the peripheral first to stop it filling
233  * the DMAC FIFO, and then pause the DMAC.
234  */
235 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
236 {
237         u32 val;
238         int timeout;
239
240         /* Set the HALT bit and wait for the FIFO to drain */
241         val = readl(ch->base + PL080_CH_CONFIG);
242         val |= PL080_CONFIG_HALT;
243         writel(val, ch->base + PL080_CH_CONFIG);
244
245         /* Wait for channel inactive */
246         for (timeout = 1000; timeout; timeout--) {
247                 if (!pl08x_phy_channel_busy(ch))
248                         break;
249                 udelay(1);
250         }
251         if (pl08x_phy_channel_busy(ch))
252                 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
253 }
254
255 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
256 {
257         u32 val;
258
259         /* Clear the HALT bit */
260         val = readl(ch->base + PL080_CH_CONFIG);
261         val &= ~PL080_CONFIG_HALT;
262         writel(val, ch->base + PL080_CH_CONFIG);
263 }
264
265 /*
266  * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
267  * clears any pending interrupt status.  This should not be used for
268  * an on-going transfer, but as a method of shutting down a channel
269  * (eg, when it's no longer used) or terminating a transfer.
270  */
271 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
272         struct pl08x_phy_chan *ch)
273 {
274         u32 val = readl(ch->base + PL080_CH_CONFIG);
275
276         val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
277                  PL080_CONFIG_TC_IRQ_MASK);
278
279         writel(val, ch->base + PL080_CH_CONFIG);
280
281         writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
282         writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
283 }
284
285 static inline u32 get_bytes_in_cctl(u32 cctl)
286 {
287         /* The source width defines the number of bytes */
288         u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
289
290         switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
291         case PL080_WIDTH_8BIT:
292                 break;
293         case PL080_WIDTH_16BIT:
294                 bytes *= 2;
295                 break;
296         case PL080_WIDTH_32BIT:
297                 bytes *= 4;
298                 break;
299         }
300         return bytes;
301 }
302
303 /* The channel should be paused when calling this */
304 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
305 {
306         struct pl08x_phy_chan *ch;
307         struct pl08x_txd *txd;
308         unsigned long flags;
309         size_t bytes = 0;
310
311         spin_lock_irqsave(&plchan->lock, flags);
312         ch = plchan->phychan;
313         txd = plchan->at;
314
315         /*
316          * Follow the LLIs to get the number of remaining
317          * bytes in the currently active transaction.
318          */
319         if (ch && txd) {
320                 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
321
322                 /* First get the remaining bytes in the active transfer */
323                 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
324
325                 if (clli) {
326                         struct pl08x_lli *llis_va = txd->llis_va;
327                         dma_addr_t llis_bus = txd->llis_bus;
328                         int index;
329
330                         BUG_ON(clli < llis_bus || clli >= llis_bus +
331                                 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
332
333                         /*
334                          * Locate the next LLI - as this is an array,
335                          * it's simple maths to find.
336                          */
337                         index = (clli - llis_bus) / sizeof(struct pl08x_lli);
338
339                         for (; index < MAX_NUM_TSFR_LLIS; index++) {
340                                 bytes += get_bytes_in_cctl(llis_va[index].cctl);
341
342                                 /*
343                                  * A LLI pointer of 0 terminates the LLI list
344                                  */
345                                 if (!llis_va[index].lli)
346                                         break;
347                         }
348                 }
349         }
350
351         /* Sum up all queued transactions */
352         if (!list_empty(&plchan->pend_list)) {
353                 struct pl08x_txd *txdi;
354                 list_for_each_entry(txdi, &plchan->pend_list, node) {
355                         struct pl08x_sg *dsg;
356                         list_for_each_entry(dsg, &txd->dsg_list, node)
357                                 bytes += dsg->len;
358                 }
359         }
360
361         spin_unlock_irqrestore(&plchan->lock, flags);
362
363         return bytes;
364 }
365
366 /*
367  * Allocate a physical channel for a virtual channel
368  *
369  * Try to locate a physical channel to be used for this transfer. If all
370  * are taken return NULL and the requester will have to cope by using
371  * some fallback PIO mode or retrying later.
372  */
373 static struct pl08x_phy_chan *
374 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
375                       struct pl08x_dma_chan *virt_chan)
376 {
377         struct pl08x_phy_chan *ch = NULL;
378         unsigned long flags;
379         int i;
380
381         for (i = 0; i < pl08x->vd->channels; i++) {
382                 ch = &pl08x->phy_chans[i];
383
384                 spin_lock_irqsave(&ch->lock, flags);
385
386                 if (!ch->serving) {
387                         ch->serving = virt_chan;
388                         ch->signal = -1;
389                         spin_unlock_irqrestore(&ch->lock, flags);
390                         break;
391                 }
392
393                 spin_unlock_irqrestore(&ch->lock, flags);
394         }
395
396         if (i == pl08x->vd->channels) {
397                 /* No physical channel available, cope with it */
398                 return NULL;
399         }
400
401         pm_runtime_get_sync(&pl08x->adev->dev);
402         return ch;
403 }
404
405 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
406                                          struct pl08x_phy_chan *ch)
407 {
408         unsigned long flags;
409
410         spin_lock_irqsave(&ch->lock, flags);
411
412         /* Stop the channel and clear its interrupts */
413         pl08x_terminate_phy_chan(pl08x, ch);
414
415         pm_runtime_put(&pl08x->adev->dev);
416
417         /* Mark it as free */
418         ch->serving = NULL;
419         spin_unlock_irqrestore(&ch->lock, flags);
420 }
421
422 /*
423  * LLI handling
424  */
425
426 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
427 {
428         switch (coded) {
429         case PL080_WIDTH_8BIT:
430                 return 1;
431         case PL080_WIDTH_16BIT:
432                 return 2;
433         case PL080_WIDTH_32BIT:
434                 return 4;
435         default:
436                 break;
437         }
438         BUG();
439         return 0;
440 }
441
442 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
443                                   size_t tsize)
444 {
445         u32 retbits = cctl;
446
447         /* Remove all src, dst and transfer size bits */
448         retbits &= ~PL080_CONTROL_DWIDTH_MASK;
449         retbits &= ~PL080_CONTROL_SWIDTH_MASK;
450         retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
451
452         /* Then set the bits according to the parameters */
453         switch (srcwidth) {
454         case 1:
455                 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
456                 break;
457         case 2:
458                 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
459                 break;
460         case 4:
461                 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
462                 break;
463         default:
464                 BUG();
465                 break;
466         }
467
468         switch (dstwidth) {
469         case 1:
470                 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
471                 break;
472         case 2:
473                 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
474                 break;
475         case 4:
476                 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
477                 break;
478         default:
479                 BUG();
480                 break;
481         }
482
483         retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
484         return retbits;
485 }
486
487 struct pl08x_lli_build_data {
488         struct pl08x_txd *txd;
489         struct pl08x_bus_data srcbus;
490         struct pl08x_bus_data dstbus;
491         size_t remainder;
492         u32 lli_bus;
493 };
494
495 /*
496  * Autoselect a master bus to use for the transfer. Slave will be the chosen as
497  * victim in case src & dest are not similarly aligned. i.e. If after aligning
498  * masters address with width requirements of transfer (by sending few byte by
499  * byte data), slave is still not aligned, then its width will be reduced to
500  * BYTE.
501  * - prefers the destination bus if both available
502  * - prefers bus with fixed address (i.e. peripheral)
503  */
504 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
505         struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
506 {
507         if (!(cctl & PL080_CONTROL_DST_INCR)) {
508                 *mbus = &bd->dstbus;
509                 *sbus = &bd->srcbus;
510         } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
511                 *mbus = &bd->srcbus;
512                 *sbus = &bd->dstbus;
513         } else {
514                 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
515                         *mbus = &bd->dstbus;
516                         *sbus = &bd->srcbus;
517                 } else {
518                         *mbus = &bd->srcbus;
519                         *sbus = &bd->dstbus;
520                 }
521         }
522 }
523
524 /*
525  * Fills in one LLI for a certain transfer descriptor and advance the counter
526  */
527 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
528         int num_llis, int len, u32 cctl)
529 {
530         struct pl08x_lli *llis_va = bd->txd->llis_va;
531         dma_addr_t llis_bus = bd->txd->llis_bus;
532
533         BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
534
535         llis_va[num_llis].cctl = cctl;
536         llis_va[num_llis].src = bd->srcbus.addr;
537         llis_va[num_llis].dst = bd->dstbus.addr;
538         llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
539                 sizeof(struct pl08x_lli);
540         llis_va[num_llis].lli |= bd->lli_bus;
541
542         if (cctl & PL080_CONTROL_SRC_INCR)
543                 bd->srcbus.addr += len;
544         if (cctl & PL080_CONTROL_DST_INCR)
545                 bd->dstbus.addr += len;
546
547         BUG_ON(bd->remainder < len);
548
549         bd->remainder -= len;
550 }
551
552 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
553                 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
554 {
555         *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
556         pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
557         (*total_bytes) += len;
558 }
559
560 /*
561  * This fills in the table of LLIs for the transfer descriptor
562  * Note that we assume we never have to change the burst sizes
563  * Return 0 for error
564  */
565 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
566                               struct pl08x_txd *txd)
567 {
568         struct pl08x_bus_data *mbus, *sbus;
569         struct pl08x_lli_build_data bd;
570         int num_llis = 0;
571         u32 cctl, early_bytes = 0;
572         size_t max_bytes_per_lli, total_bytes;
573         struct pl08x_lli *llis_va;
574         struct pl08x_sg *dsg;
575
576         txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
577         if (!txd->llis_va) {
578                 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
579                 return 0;
580         }
581
582         pl08x->pool_ctr++;
583
584         bd.txd = txd;
585         bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
586         cctl = txd->cctl;
587
588         /* Find maximum width of the source bus */
589         bd.srcbus.maxwidth =
590                 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
591                                        PL080_CONTROL_SWIDTH_SHIFT);
592
593         /* Find maximum width of the destination bus */
594         bd.dstbus.maxwidth =
595                 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
596                                        PL080_CONTROL_DWIDTH_SHIFT);
597
598         list_for_each_entry(dsg, &txd->dsg_list, node) {
599                 total_bytes = 0;
600                 cctl = txd->cctl;
601
602                 bd.srcbus.addr = dsg->src_addr;
603                 bd.dstbus.addr = dsg->dst_addr;
604                 bd.remainder = dsg->len;
605                 bd.srcbus.buswidth = bd.srcbus.maxwidth;
606                 bd.dstbus.buswidth = bd.dstbus.maxwidth;
607
608                 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
609
610                 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
611                         bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
612                         bd.srcbus.buswidth,
613                         bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
614                         bd.dstbus.buswidth,
615                         bd.remainder);
616                 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
617                         mbus == &bd.srcbus ? "src" : "dst",
618                         sbus == &bd.srcbus ? "src" : "dst");
619
620                 /*
621                  * Zero length is only allowed if all these requirements are
622                  * met:
623                  * - flow controller is peripheral.
624                  * - src.addr is aligned to src.width
625                  * - dst.addr is aligned to dst.width
626                  *
627                  * sg_len == 1 should be true, as there can be two cases here:
628                  *
629                  * - Memory addresses are contiguous and are not scattered.
630                  *   Here, Only one sg will be passed by user driver, with
631                  *   memory address and zero length. We pass this to controller
632                  *   and after the transfer it will receive the last burst
633                  *   request from peripheral and so transfer finishes.
634                  *
635                  * - Memory addresses are scattered and are not contiguous.
636                  *   Here, Obviously as DMA controller doesn't know when a lli's
637                  *   transfer gets over, it can't load next lli. So in this
638                  *   case, there has to be an assumption that only one lli is
639                  *   supported. Thus, we can't have scattered addresses.
640                  */
641                 if (!bd.remainder) {
642                         u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
643                                 PL080_CONFIG_FLOW_CONTROL_SHIFT;
644                         if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
645                                         (fc <= PL080_FLOW_SRC2DST_SRC))) {
646                                 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
647                                         __func__);
648                                 return 0;
649                         }
650
651                         if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
652                                         (bd.srcbus.addr % bd.srcbus.buswidth)) {
653                                 dev_err(&pl08x->adev->dev,
654                                         "%s src & dst address must be aligned to src"
655                                         " & dst width if peripheral is flow controller",
656                                         __func__);
657                                 return 0;
658                         }
659
660                         cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
661                                         bd.dstbus.buswidth, 0);
662                         pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
663                         break;
664                 }
665
666                 /*
667                  * Send byte by byte for following cases
668                  * - Less than a bus width available
669                  * - until master bus is aligned
670                  */
671                 if (bd.remainder < mbus->buswidth)
672                         early_bytes = bd.remainder;
673                 else if ((mbus->addr) % (mbus->buswidth)) {
674                         early_bytes = mbus->buswidth - (mbus->addr) %
675                                 (mbus->buswidth);
676                         if ((bd.remainder - early_bytes) < mbus->buswidth)
677                                 early_bytes = bd.remainder;
678                 }
679
680                 if (early_bytes) {
681                         dev_vdbg(&pl08x->adev->dev,
682                                 "%s byte width LLIs (remain 0x%08x)\n",
683                                 __func__, bd.remainder);
684                         prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
685                                 &total_bytes);
686                 }
687
688                 if (bd.remainder) {
689                         /*
690                          * Master now aligned
691                          * - if slave is not then we must set its width down
692                          */
693                         if (sbus->addr % sbus->buswidth) {
694                                 dev_dbg(&pl08x->adev->dev,
695                                         "%s set down bus width to one byte\n",
696                                         __func__);
697
698                                 sbus->buswidth = 1;
699                         }
700
701                         /*
702                          * Bytes transferred = tsize * src width, not
703                          * MIN(buswidths)
704                          */
705                         max_bytes_per_lli = bd.srcbus.buswidth *
706                                 PL080_CONTROL_TRANSFER_SIZE_MASK;
707                         dev_vdbg(&pl08x->adev->dev,
708                                 "%s max bytes per lli = %zu\n",
709                                 __func__, max_bytes_per_lli);
710
711                         /*
712                          * Make largest possible LLIs until less than one bus
713                          * width left
714                          */
715                         while (bd.remainder > (mbus->buswidth - 1)) {
716                                 size_t lli_len, tsize, width;
717
718                                 /*
719                                  * If enough left try to send max possible,
720                                  * otherwise try to send the remainder
721                                  */
722                                 lli_len = min(bd.remainder, max_bytes_per_lli);
723
724                                 /*
725                                  * Check against maximum bus alignment:
726                                  * Calculate actual transfer size in relation to
727                                  * bus width an get a maximum remainder of the
728                                  * highest bus width - 1
729                                  */
730                                 width = max(mbus->buswidth, sbus->buswidth);
731                                 lli_len = (lli_len / width) * width;
732                                 tsize = lli_len / bd.srcbus.buswidth;
733
734                                 dev_vdbg(&pl08x->adev->dev,
735                                         "%s fill lli with single lli chunk of "
736                                         "size 0x%08zx (remainder 0x%08zx)\n",
737                                         __func__, lli_len, bd.remainder);
738
739                                 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
740                                         bd.dstbus.buswidth, tsize);
741                                 pl08x_fill_lli_for_desc(&bd, num_llis++,
742                                                 lli_len, cctl);
743                                 total_bytes += lli_len;
744                         }
745
746                         /*
747                          * Send any odd bytes
748                          */
749                         if (bd.remainder) {
750                                 dev_vdbg(&pl08x->adev->dev,
751                                         "%s align with boundary, send odd bytes (remain %zu)\n",
752                                         __func__, bd.remainder);
753                                 prep_byte_width_lli(&bd, &cctl, bd.remainder,
754                                                 num_llis++, &total_bytes);
755                         }
756                 }
757
758                 if (total_bytes != dsg->len) {
759                         dev_err(&pl08x->adev->dev,
760                                 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
761                                 __func__, total_bytes, dsg->len);
762                         return 0;
763                 }
764
765                 if (num_llis >= MAX_NUM_TSFR_LLIS) {
766                         dev_err(&pl08x->adev->dev,
767                                 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
768                                 __func__, (u32) MAX_NUM_TSFR_LLIS);
769                         return 0;
770                 }
771         }
772
773         llis_va = txd->llis_va;
774         /* The final LLI terminates the LLI. */
775         llis_va[num_llis - 1].lli = 0;
776         /* The final LLI element shall also fire an interrupt. */
777         llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
778
779 #ifdef VERBOSE_DEBUG
780         {
781                 int i;
782
783                 dev_vdbg(&pl08x->adev->dev,
784                          "%-3s %-9s  %-10s %-10s %-10s %s\n",
785                          "lli", "", "csrc", "cdst", "clli", "cctl");
786                 for (i = 0; i < num_llis; i++) {
787                         dev_vdbg(&pl08x->adev->dev,
788                                  "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
789                                  i, &llis_va[i], llis_va[i].src,
790                                  llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
791                                 );
792                 }
793         }
794 #endif
795
796         return num_llis;
797 }
798
799 /* You should call this with the struct pl08x lock held */
800 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
801                            struct pl08x_txd *txd)
802 {
803         struct pl08x_sg *dsg, *_dsg;
804
805         /* Free the LLI */
806         if (txd->llis_va)
807                 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
808
809         pl08x->pool_ctr--;
810
811         list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
812                 list_del(&dsg->node);
813                 kfree(dsg);
814         }
815
816         kfree(txd);
817 }
818
819 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
820                                 struct pl08x_dma_chan *plchan)
821 {
822         struct pl08x_txd *txdi = NULL;
823         struct pl08x_txd *next;
824
825         if (!list_empty(&plchan->pend_list)) {
826                 list_for_each_entry_safe(txdi,
827                                          next, &plchan->pend_list, node) {
828                         list_del(&txdi->node);
829                         pl08x_free_txd(pl08x, txdi);
830                 }
831         }
832 }
833
834 /*
835  * The DMA ENGINE API
836  */
837 static int pl08x_alloc_chan_resources(struct dma_chan *chan)
838 {
839         return 0;
840 }
841
842 static void pl08x_free_chan_resources(struct dma_chan *chan)
843 {
844 }
845
846 /*
847  * This should be called with the channel plchan->lock held
848  */
849 static int prep_phy_channel(struct pl08x_dma_chan *plchan,
850                             struct pl08x_txd *txd)
851 {
852         struct pl08x_driver_data *pl08x = plchan->host;
853         struct pl08x_phy_chan *ch;
854         int ret;
855
856         /* Check if we already have a channel */
857         if (plchan->phychan)
858                 return 0;
859
860         ch = pl08x_get_phy_channel(pl08x, plchan);
861         if (!ch) {
862                 /* No physical channel available, cope with it */
863                 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
864                 return -EBUSY;
865         }
866
867         /*
868          * OK we have a physical channel: for memcpy() this is all we
869          * need, but for slaves the physical signals may be muxed!
870          * Can the platform allow us to use this channel?
871          */
872         if (plchan->slave && pl08x->pd->get_signal) {
873                 ret = pl08x->pd->get_signal(plchan);
874                 if (ret < 0) {
875                         dev_dbg(&pl08x->adev->dev,
876                                 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
877                                 ch->id, plchan->name);
878                         /* Release physical channel & return */
879                         pl08x_put_phy_channel(pl08x, ch);
880                         return -EBUSY;
881                 }
882                 ch->signal = ret;
883
884                 /* Assign the flow control signal to this channel */
885                 if (txd->direction == DMA_TO_DEVICE)
886                         txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
887                 else if (txd->direction == DMA_FROM_DEVICE)
888                         txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
889         }
890
891         dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
892                  ch->id,
893                  ch->signal,
894                  plchan->name);
895
896         plchan->phychan_hold++;
897         plchan->phychan = ch;
898
899         return 0;
900 }
901
902 static void release_phy_channel(struct pl08x_dma_chan *plchan)
903 {
904         struct pl08x_driver_data *pl08x = plchan->host;
905
906         if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
907                 pl08x->pd->put_signal(plchan);
908                 plchan->phychan->signal = -1;
909         }
910         pl08x_put_phy_channel(pl08x, plchan->phychan);
911         plchan->phychan = NULL;
912 }
913
914 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
915 {
916         struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
917         struct pl08x_txd *txd = to_pl08x_txd(tx);
918         unsigned long flags;
919
920         spin_lock_irqsave(&plchan->lock, flags);
921
922         plchan->chan.cookie += 1;
923         if (plchan->chan.cookie < 0)
924                 plchan->chan.cookie = 1;
925         tx->cookie = plchan->chan.cookie;
926
927         /* Put this onto the pending list */
928         list_add_tail(&txd->node, &plchan->pend_list);
929
930         /*
931          * If there was no physical channel available for this memcpy,
932          * stack the request up and indicate that the channel is waiting
933          * for a free physical channel.
934          */
935         if (!plchan->slave && !plchan->phychan) {
936                 /* Do this memcpy whenever there is a channel ready */
937                 plchan->state = PL08X_CHAN_WAITING;
938                 plchan->waiting = txd;
939         } else {
940                 plchan->phychan_hold--;
941         }
942
943         spin_unlock_irqrestore(&plchan->lock, flags);
944
945         return tx->cookie;
946 }
947
948 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
949                 struct dma_chan *chan, unsigned long flags)
950 {
951         struct dma_async_tx_descriptor *retval = NULL;
952
953         return retval;
954 }
955
956 /*
957  * Code accessing dma_async_is_complete() in a tight loop may give problems.
958  * If slaves are relying on interrupts to signal completion this function
959  * must not be called with interrupts disabled.
960  */
961 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
962                 dma_cookie_t cookie, struct dma_tx_state *txstate)
963 {
964         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
965         dma_cookie_t last_used;
966         dma_cookie_t last_complete;
967         enum dma_status ret;
968         u32 bytesleft = 0;
969
970         last_used = plchan->chan.cookie;
971         last_complete = plchan->lc;
972
973         ret = dma_async_is_complete(cookie, last_complete, last_used);
974         if (ret == DMA_SUCCESS) {
975                 dma_set_tx_state(txstate, last_complete, last_used, 0);
976                 return ret;
977         }
978
979         /*
980          * This cookie not complete yet
981          */
982         last_used = plchan->chan.cookie;
983         last_complete = plchan->lc;
984
985         /* Get number of bytes left in the active transactions and queue */
986         bytesleft = pl08x_getbytes_chan(plchan);
987
988         dma_set_tx_state(txstate, last_complete, last_used,
989                          bytesleft);
990
991         if (plchan->state == PL08X_CHAN_PAUSED)
992                 return DMA_PAUSED;
993
994         /* Whether waiting or running, we're in progress */
995         return DMA_IN_PROGRESS;
996 }
997
998 /* PrimeCell DMA extension */
999 struct burst_table {
1000         u32 burstwords;
1001         u32 reg;
1002 };
1003
1004 static const struct burst_table burst_sizes[] = {
1005         {
1006                 .burstwords = 256,
1007                 .reg = PL080_BSIZE_256,
1008         },
1009         {
1010                 .burstwords = 128,
1011                 .reg = PL080_BSIZE_128,
1012         },
1013         {
1014                 .burstwords = 64,
1015                 .reg = PL080_BSIZE_64,
1016         },
1017         {
1018                 .burstwords = 32,
1019                 .reg = PL080_BSIZE_32,
1020         },
1021         {
1022                 .burstwords = 16,
1023                 .reg = PL080_BSIZE_16,
1024         },
1025         {
1026                 .burstwords = 8,
1027                 .reg = PL080_BSIZE_8,
1028         },
1029         {
1030                 .burstwords = 4,
1031                 .reg = PL080_BSIZE_4,
1032         },
1033         {
1034                 .burstwords = 0,
1035                 .reg = PL080_BSIZE_1,
1036         },
1037 };
1038
1039 /*
1040  * Given the source and destination available bus masks, select which
1041  * will be routed to each port.  We try to have source and destination
1042  * on separate ports, but always respect the allowable settings.
1043  */
1044 static u32 pl08x_select_bus(u8 src, u8 dst)
1045 {
1046         u32 cctl = 0;
1047
1048         if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1049                 cctl |= PL080_CONTROL_DST_AHB2;
1050         if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1051                 cctl |= PL080_CONTROL_SRC_AHB2;
1052
1053         return cctl;
1054 }
1055
1056 static u32 pl08x_cctl(u32 cctl)
1057 {
1058         cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1059                   PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1060                   PL080_CONTROL_PROT_MASK);
1061
1062         /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1063         return cctl | PL080_CONTROL_PROT_SYS;
1064 }
1065
1066 static u32 pl08x_width(enum dma_slave_buswidth width)
1067 {
1068         switch (width) {
1069         case DMA_SLAVE_BUSWIDTH_1_BYTE:
1070                 return PL080_WIDTH_8BIT;
1071         case DMA_SLAVE_BUSWIDTH_2_BYTES:
1072                 return PL080_WIDTH_16BIT;
1073         case DMA_SLAVE_BUSWIDTH_4_BYTES:
1074                 return PL080_WIDTH_32BIT;
1075         default:
1076                 return ~0;
1077         }
1078 }
1079
1080 static u32 pl08x_burst(u32 maxburst)
1081 {
1082         int i;
1083
1084         for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1085                 if (burst_sizes[i].burstwords <= maxburst)
1086                         break;
1087
1088         return burst_sizes[i].reg;
1089 }
1090
1091 static int dma_set_runtime_config(struct dma_chan *chan,
1092                                   struct dma_slave_config *config)
1093 {
1094         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1095         struct pl08x_driver_data *pl08x = plchan->host;
1096         enum dma_slave_buswidth addr_width;
1097         u32 width, burst, maxburst;
1098         u32 cctl = 0;
1099
1100         if (!plchan->slave)
1101                 return -EINVAL;
1102
1103         /* Transfer direction */
1104         plchan->runtime_direction = config->direction;
1105         if (config->direction == DMA_TO_DEVICE) {
1106                 addr_width = config->dst_addr_width;
1107                 maxburst = config->dst_maxburst;
1108         } else if (config->direction == DMA_FROM_DEVICE) {
1109                 addr_width = config->src_addr_width;
1110                 maxburst = config->src_maxburst;
1111         } else {
1112                 dev_err(&pl08x->adev->dev,
1113                         "bad runtime_config: alien transfer direction\n");
1114                 return -EINVAL;
1115         }
1116
1117         width = pl08x_width(addr_width);
1118         if (width == ~0) {
1119                 dev_err(&pl08x->adev->dev,
1120                         "bad runtime_config: alien address width\n");
1121                 return -EINVAL;
1122         }
1123
1124         cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1125         cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1126
1127         /*
1128          * If this channel will only request single transfers, set this
1129          * down to ONE element.  Also select one element if no maxburst
1130          * is specified.
1131          */
1132         if (plchan->cd->single)
1133                 maxburst = 1;
1134
1135         burst = pl08x_burst(maxburst);
1136         cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1137         cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1138
1139         if (plchan->runtime_direction == DMA_FROM_DEVICE) {
1140                 plchan->src_addr = config->src_addr;
1141                 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
1142                         pl08x_select_bus(plchan->cd->periph_buses,
1143                                          pl08x->mem_buses);
1144         } else {
1145                 plchan->dst_addr = config->dst_addr;
1146                 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1147                         pl08x_select_bus(pl08x->mem_buses,
1148                                          plchan->cd->periph_buses);
1149         }
1150
1151         dev_dbg(&pl08x->adev->dev,
1152                 "configured channel %s (%s) for %s, data width %d, "
1153                 "maxburst %d words, LE, CCTL=0x%08x\n",
1154                 dma_chan_name(chan), plchan->name,
1155                 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1156                 addr_width,
1157                 maxburst,
1158                 cctl);
1159
1160         return 0;
1161 }
1162
1163 /*
1164  * Slave transactions callback to the slave device to allow
1165  * synchronization of slave DMA signals with the DMAC enable
1166  */
1167 static void pl08x_issue_pending(struct dma_chan *chan)
1168 {
1169         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1170         unsigned long flags;
1171
1172         spin_lock_irqsave(&plchan->lock, flags);
1173         /* Something is already active, or we're waiting for a channel... */
1174         if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1175                 spin_unlock_irqrestore(&plchan->lock, flags);
1176                 return;
1177         }
1178
1179         /* Take the first element in the queue and execute it */
1180         if (!list_empty(&plchan->pend_list)) {
1181                 struct pl08x_txd *next;
1182
1183                 next = list_first_entry(&plchan->pend_list,
1184                                         struct pl08x_txd,
1185                                         node);
1186                 list_del(&next->node);
1187                 plchan->state = PL08X_CHAN_RUNNING;
1188
1189                 pl08x_start_txd(plchan, next);
1190         }
1191
1192         spin_unlock_irqrestore(&plchan->lock, flags);
1193 }
1194
1195 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1196                                         struct pl08x_txd *txd)
1197 {
1198         struct pl08x_driver_data *pl08x = plchan->host;
1199         unsigned long flags;
1200         int num_llis, ret;
1201
1202         num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1203         if (!num_llis) {
1204                 spin_lock_irqsave(&plchan->lock, flags);
1205                 pl08x_free_txd(pl08x, txd);
1206                 spin_unlock_irqrestore(&plchan->lock, flags);
1207                 return -EINVAL;
1208         }
1209
1210         spin_lock_irqsave(&plchan->lock, flags);
1211
1212         /*
1213          * See if we already have a physical channel allocated,
1214          * else this is the time to try to get one.
1215          */
1216         ret = prep_phy_channel(plchan, txd);
1217         if (ret) {
1218                 /*
1219                  * No physical channel was available.
1220                  *
1221                  * memcpy transfers can be sorted out at submission time.
1222                  *
1223                  * Slave transfers may have been denied due to platform
1224                  * channel muxing restrictions.  Since there is no guarantee
1225                  * that this will ever be resolved, and the signal must be
1226                  * acquired AFTER acquiring the physical channel, we will let
1227                  * them be NACK:ed with -EBUSY here. The drivers can retry
1228                  * the prep() call if they are eager on doing this using DMA.
1229                  */
1230                 if (plchan->slave) {
1231                         pl08x_free_txd_list(pl08x, plchan);
1232                         pl08x_free_txd(pl08x, txd);
1233                         spin_unlock_irqrestore(&plchan->lock, flags);
1234                         return -EBUSY;
1235                 }
1236         } else
1237                 /*
1238                  * Else we're all set, paused and ready to roll, status
1239                  * will switch to PL08X_CHAN_RUNNING when we call
1240                  * issue_pending(). If there is something running on the
1241                  * channel already we don't change its state.
1242                  */
1243                 if (plchan->state == PL08X_CHAN_IDLE)
1244                         plchan->state = PL08X_CHAN_PAUSED;
1245
1246         spin_unlock_irqrestore(&plchan->lock, flags);
1247
1248         return 0;
1249 }
1250
1251 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1252         unsigned long flags)
1253 {
1254         struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1255
1256         if (txd) {
1257                 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1258                 txd->tx.flags = flags;
1259                 txd->tx.tx_submit = pl08x_tx_submit;
1260                 INIT_LIST_HEAD(&txd->node);
1261                 INIT_LIST_HEAD(&txd->dsg_list);
1262
1263                 /* Always enable error and terminal interrupts */
1264                 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1265                             PL080_CONFIG_TC_IRQ_MASK;
1266         }
1267         return txd;
1268 }
1269
1270 /*
1271  * Initialize a descriptor to be used by memcpy submit
1272  */
1273 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1274                 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1275                 size_t len, unsigned long flags)
1276 {
1277         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1278         struct pl08x_driver_data *pl08x = plchan->host;
1279         struct pl08x_txd *txd;
1280         struct pl08x_sg *dsg;
1281         int ret;
1282
1283         txd = pl08x_get_txd(plchan, flags);
1284         if (!txd) {
1285                 dev_err(&pl08x->adev->dev,
1286                         "%s no memory for descriptor\n", __func__);
1287                 return NULL;
1288         }
1289
1290         dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1291         if (!dsg) {
1292                 pl08x_free_txd(pl08x, txd);
1293                 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1294                                 __func__);
1295                 return NULL;
1296         }
1297         list_add_tail(&dsg->node, &txd->dsg_list);
1298
1299         txd->direction = DMA_NONE;
1300         dsg->src_addr = src;
1301         dsg->dst_addr = dest;
1302         dsg->len = len;
1303
1304         /* Set platform data for m2m */
1305         txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1306         txd->cctl = pl08x->pd->memcpy_channel.cctl &
1307                         ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1308
1309         /* Both to be incremented or the code will break */
1310         txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1311
1312         if (pl08x->vd->dualmaster)
1313                 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1314                                               pl08x->mem_buses);
1315
1316         ret = pl08x_prep_channel_resources(plchan, txd);
1317         if (ret)
1318                 return NULL;
1319
1320         return &txd->tx;
1321 }
1322
1323 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324                 struct dma_chan *chan, struct scatterlist *sgl,
1325                 unsigned int sg_len, enum dma_data_direction direction,
1326                 unsigned long flags)
1327 {
1328         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1329         struct pl08x_driver_data *pl08x = plchan->host;
1330         struct pl08x_txd *txd;
1331         struct pl08x_sg *dsg;
1332         struct scatterlist *sg;
1333         dma_addr_t slave_addr;
1334         int ret, tmp;
1335
1336         dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1337                         __func__, sgl->length, plchan->name);
1338
1339         txd = pl08x_get_txd(plchan, flags);
1340         if (!txd) {
1341                 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1342                 return NULL;
1343         }
1344
1345         if (direction != plchan->runtime_direction)
1346                 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1347                         "the direction configured for the PrimeCell\n",
1348                         __func__);
1349
1350         /*
1351          * Set up addresses, the PrimeCell configured address
1352          * will take precedence since this may configure the
1353          * channel target address dynamically at runtime.
1354          */
1355         txd->direction = direction;
1356
1357         if (direction == DMA_TO_DEVICE) {
1358                 txd->cctl = plchan->dst_cctl;
1359                 slave_addr = plchan->dst_addr;
1360         } else if (direction == DMA_FROM_DEVICE) {
1361                 txd->cctl = plchan->src_cctl;
1362                 slave_addr = plchan->src_addr;
1363         } else {
1364                 pl08x_free_txd(pl08x, txd);
1365                 dev_err(&pl08x->adev->dev,
1366                         "%s direction unsupported\n", __func__);
1367                 return NULL;
1368         }
1369
1370         if (plchan->cd->device_fc)
1371                 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
1372                         PL080_FLOW_PER2MEM_PER;
1373         else
1374                 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
1375                         PL080_FLOW_PER2MEM;
1376
1377         txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1378
1379         for_each_sg(sgl, sg, sg_len, tmp) {
1380                 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1381                 if (!dsg) {
1382                         pl08x_free_txd(pl08x, txd);
1383                         dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1384                                         __func__);
1385                         return NULL;
1386                 }
1387                 list_add_tail(&dsg->node, &txd->dsg_list);
1388
1389                 dsg->len = sg_dma_len(sg);
1390                 if (direction == DMA_TO_DEVICE) {
1391                         dsg->src_addr = sg_phys(sg);
1392                         dsg->dst_addr = slave_addr;
1393                 } else {
1394                         dsg->src_addr = slave_addr;
1395                         dsg->dst_addr = sg_phys(sg);
1396                 }
1397         }
1398
1399         ret = pl08x_prep_channel_resources(plchan, txd);
1400         if (ret)
1401                 return NULL;
1402
1403         return &txd->tx;
1404 }
1405
1406 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1407                          unsigned long arg)
1408 {
1409         struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1410         struct pl08x_driver_data *pl08x = plchan->host;
1411         unsigned long flags;
1412         int ret = 0;
1413
1414         /* Controls applicable to inactive channels */
1415         if (cmd == DMA_SLAVE_CONFIG) {
1416                 return dma_set_runtime_config(chan,
1417                                               (struct dma_slave_config *)arg);
1418         }
1419
1420         /*
1421          * Anything succeeds on channels with no physical allocation and
1422          * no queued transfers.
1423          */
1424         spin_lock_irqsave(&plchan->lock, flags);
1425         if (!plchan->phychan && !plchan->at) {
1426                 spin_unlock_irqrestore(&plchan->lock, flags);
1427                 return 0;
1428         }
1429
1430         switch (cmd) {
1431         case DMA_TERMINATE_ALL:
1432                 plchan->state = PL08X_CHAN_IDLE;
1433
1434                 if (plchan->phychan) {
1435                         pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1436
1437                         /*
1438                          * Mark physical channel as free and free any slave
1439                          * signal
1440                          */
1441                         release_phy_channel(plchan);
1442                 }
1443                 /* Dequeue jobs and free LLIs */
1444                 if (plchan->at) {
1445                         pl08x_free_txd(pl08x, plchan->at);
1446                         plchan->at = NULL;
1447                 }
1448                 /* Dequeue jobs not yet fired as well */
1449                 pl08x_free_txd_list(pl08x, plchan);
1450                 break;
1451         case DMA_PAUSE:
1452                 pl08x_pause_phy_chan(plchan->phychan);
1453                 plchan->state = PL08X_CHAN_PAUSED;
1454                 break;
1455         case DMA_RESUME:
1456                 pl08x_resume_phy_chan(plchan->phychan);
1457                 plchan->state = PL08X_CHAN_RUNNING;
1458                 break;
1459         default:
1460                 /* Unknown command */
1461                 ret = -ENXIO;
1462                 break;
1463         }
1464
1465         spin_unlock_irqrestore(&plchan->lock, flags);
1466
1467         return ret;
1468 }
1469
1470 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1471 {
1472         struct pl08x_dma_chan *plchan;
1473         char *name = chan_id;
1474
1475         /* Reject channels for devices not bound to this driver */
1476         if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1477                 return false;
1478
1479         plchan = to_pl08x_chan(chan);
1480
1481         /* Check that the channel is not taken! */
1482         if (!strcmp(plchan->name, name))
1483                 return true;
1484
1485         return false;
1486 }
1487
1488 /*
1489  * Just check that the device is there and active
1490  * TODO: turn this bit on/off depending on the number of physical channels
1491  * actually used, if it is zero... well shut it off. That will save some
1492  * power. Cut the clock at the same time.
1493  */
1494 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1495 {
1496         writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1497 }
1498
1499 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1500 {
1501         struct device *dev = txd->tx.chan->device->dev;
1502         struct pl08x_sg *dsg;
1503
1504         if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1505                 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1506                         list_for_each_entry(dsg, &txd->dsg_list, node)
1507                                 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1508                                                 DMA_TO_DEVICE);
1509                 else {
1510                         list_for_each_entry(dsg, &txd->dsg_list, node)
1511                                 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1512                                                 DMA_TO_DEVICE);
1513                 }
1514         }
1515         if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1516                 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1517                         list_for_each_entry(dsg, &txd->dsg_list, node)
1518                                 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1519                                                 DMA_FROM_DEVICE);
1520                 else
1521                         list_for_each_entry(dsg, &txd->dsg_list, node)
1522                                 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1523                                                 DMA_FROM_DEVICE);
1524         }
1525 }
1526
1527 static void pl08x_tasklet(unsigned long data)
1528 {
1529         struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1530         struct pl08x_driver_data *pl08x = plchan->host;
1531         struct pl08x_txd *txd;
1532         unsigned long flags;
1533
1534         spin_lock_irqsave(&plchan->lock, flags);
1535
1536         txd = plchan->at;
1537         plchan->at = NULL;
1538
1539         if (txd) {
1540                 /* Update last completed */
1541                 plchan->lc = txd->tx.cookie;
1542         }
1543
1544         /* If a new descriptor is queued, set it up plchan->at is NULL here */
1545         if (!list_empty(&plchan->pend_list)) {
1546                 struct pl08x_txd *next;
1547
1548                 next = list_first_entry(&plchan->pend_list,
1549                                         struct pl08x_txd,
1550                                         node);
1551                 list_del(&next->node);
1552
1553                 pl08x_start_txd(plchan, next);
1554         } else if (plchan->phychan_hold) {
1555                 /*
1556                  * This channel is still in use - we have a new txd being
1557                  * prepared and will soon be queued.  Don't give up the
1558                  * physical channel.
1559                  */
1560         } else {
1561                 struct pl08x_dma_chan *waiting = NULL;
1562
1563                 /*
1564                  * No more jobs, so free up the physical channel
1565                  * Free any allocated signal on slave transfers too
1566                  */
1567                 release_phy_channel(plchan);
1568                 plchan->state = PL08X_CHAN_IDLE;
1569
1570                 /*
1571                  * And NOW before anyone else can grab that free:d up
1572                  * physical channel, see if there is some memcpy pending
1573                  * that seriously needs to start because of being stacked
1574                  * up while we were choking the physical channels with data.
1575                  */
1576                 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1577                                     chan.device_node) {
1578                         if (waiting->state == PL08X_CHAN_WAITING &&
1579                                 waiting->waiting != NULL) {
1580                                 int ret;
1581
1582                                 /* This should REALLY not fail now */
1583                                 ret = prep_phy_channel(waiting,
1584                                                        waiting->waiting);
1585                                 BUG_ON(ret);
1586                                 waiting->phychan_hold--;
1587                                 waiting->state = PL08X_CHAN_RUNNING;
1588                                 waiting->waiting = NULL;
1589                                 pl08x_issue_pending(&waiting->chan);
1590                                 break;
1591                         }
1592                 }
1593         }
1594
1595         spin_unlock_irqrestore(&plchan->lock, flags);
1596
1597         if (txd) {
1598                 dma_async_tx_callback callback = txd->tx.callback;
1599                 void *callback_param = txd->tx.callback_param;
1600
1601                 /* Don't try to unmap buffers on slave channels */
1602                 if (!plchan->slave)
1603                         pl08x_unmap_buffers(txd);
1604
1605                 /* Free the descriptor */
1606                 spin_lock_irqsave(&plchan->lock, flags);
1607                 pl08x_free_txd(pl08x, txd);
1608                 spin_unlock_irqrestore(&plchan->lock, flags);
1609
1610                 /* Callback to signal completion */
1611                 if (callback)
1612                         callback(callback_param);
1613         }
1614 }
1615
1616 static irqreturn_t pl08x_irq(int irq, void *dev)
1617 {
1618         struct pl08x_driver_data *pl08x = dev;
1619         u32 mask = 0, err, tc, i;
1620
1621         /* check & clear - ERR & TC interrupts */
1622         err = readl(pl08x->base + PL080_ERR_STATUS);
1623         if (err) {
1624                 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1625                         __func__, err);
1626                 writel(err, pl08x->base + PL080_ERR_CLEAR);
1627         }
1628         tc = readl(pl08x->base + PL080_INT_STATUS);
1629         if (tc)
1630                 writel(tc, pl08x->base + PL080_TC_CLEAR);
1631
1632         if (!err && !tc)
1633                 return IRQ_NONE;
1634
1635         for (i = 0; i < pl08x->vd->channels; i++) {
1636                 if (((1 << i) & err) || ((1 << i) & tc)) {
1637                         /* Locate physical channel */
1638                         struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1639                         struct pl08x_dma_chan *plchan = phychan->serving;
1640
1641                         if (!plchan) {
1642                                 dev_err(&pl08x->adev->dev,
1643                                         "%s Error TC interrupt on unused channel: 0x%08x\n",
1644                                         __func__, i);
1645                                 continue;
1646                         }
1647
1648                         /* Schedule tasklet on this channel */
1649                         tasklet_schedule(&plchan->tasklet);
1650                         mask |= (1 << i);
1651                 }
1652         }
1653
1654         return mask ? IRQ_HANDLED : IRQ_NONE;
1655 }
1656
1657 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1658 {
1659         u32 cctl = pl08x_cctl(chan->cd->cctl);
1660
1661         chan->slave = true;
1662         chan->name = chan->cd->bus_id;
1663         chan->src_addr = chan->cd->addr;
1664         chan->dst_addr = chan->cd->addr;
1665         chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1666                 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1667         chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1668                 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1669 }
1670
1671 /*
1672  * Initialise the DMAC memcpy/slave channels.
1673  * Make a local wrapper to hold required data
1674  */
1675 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1676                 struct dma_device *dmadev, unsigned int channels, bool slave)
1677 {
1678         struct pl08x_dma_chan *chan;
1679         int i;
1680
1681         INIT_LIST_HEAD(&dmadev->channels);
1682
1683         /*
1684          * Register as many many memcpy as we have physical channels,
1685          * we won't always be able to use all but the code will have
1686          * to cope with that situation.
1687          */
1688         for (i = 0; i < channels; i++) {
1689                 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1690                 if (!chan) {
1691                         dev_err(&pl08x->adev->dev,
1692                                 "%s no memory for channel\n", __func__);
1693                         return -ENOMEM;
1694                 }
1695
1696                 chan->host = pl08x;
1697                 chan->state = PL08X_CHAN_IDLE;
1698
1699                 if (slave) {
1700                         chan->cd = &pl08x->pd->slave_channels[i];
1701                         pl08x_dma_slave_init(chan);
1702                 } else {
1703                         chan->cd = &pl08x->pd->memcpy_channel;
1704                         chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1705                         if (!chan->name) {
1706                                 kfree(chan);
1707                                 return -ENOMEM;
1708                         }
1709                 }
1710                 if (chan->cd->circular_buffer) {
1711                         dev_err(&pl08x->adev->dev,
1712                                 "channel %s: circular buffers not supported\n",
1713                                 chan->name);
1714                         kfree(chan);
1715                         continue;
1716                 }
1717                 dev_dbg(&pl08x->adev->dev,
1718                          "initialize virtual channel \"%s\"\n",
1719                          chan->name);
1720
1721                 chan->chan.device = dmadev;
1722                 chan->chan.cookie = 0;
1723                 chan->lc = 0;
1724
1725                 spin_lock_init(&chan->lock);
1726                 INIT_LIST_HEAD(&chan->pend_list);
1727                 tasklet_init(&chan->tasklet, pl08x_tasklet,
1728                              (unsigned long) chan);
1729
1730                 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1731         }
1732         dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1733                  i, slave ? "slave" : "memcpy");
1734         return i;
1735 }
1736
1737 static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1738 {
1739         struct pl08x_dma_chan *chan = NULL;
1740         struct pl08x_dma_chan *next;
1741
1742         list_for_each_entry_safe(chan,
1743                                  next, &dmadev->channels, chan.device_node) {
1744                 list_del(&chan->chan.device_node);
1745                 kfree(chan);
1746         }
1747 }
1748
1749 #ifdef CONFIG_DEBUG_FS
1750 static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1751 {
1752         switch (state) {
1753         case PL08X_CHAN_IDLE:
1754                 return "idle";
1755         case PL08X_CHAN_RUNNING:
1756                 return "running";
1757         case PL08X_CHAN_PAUSED:
1758                 return "paused";
1759         case PL08X_CHAN_WAITING:
1760                 return "waiting";
1761         default:
1762                 break;
1763         }
1764         return "UNKNOWN STATE";
1765 }
1766
1767 static int pl08x_debugfs_show(struct seq_file *s, void *data)
1768 {
1769         struct pl08x_driver_data *pl08x = s->private;
1770         struct pl08x_dma_chan *chan;
1771         struct pl08x_phy_chan *ch;
1772         unsigned long flags;
1773         int i;
1774
1775         seq_printf(s, "PL08x physical channels:\n");
1776         seq_printf(s, "CHANNEL:\tUSER:\n");
1777         seq_printf(s, "--------\t-----\n");
1778         for (i = 0; i < pl08x->vd->channels; i++) {
1779                 struct pl08x_dma_chan *virt_chan;
1780
1781                 ch = &pl08x->phy_chans[i];
1782
1783                 spin_lock_irqsave(&ch->lock, flags);
1784                 virt_chan = ch->serving;
1785
1786                 seq_printf(s, "%d\t\t%s\n",
1787                            ch->id, virt_chan ? virt_chan->name : "(none)");
1788
1789                 spin_unlock_irqrestore(&ch->lock, flags);
1790         }
1791
1792         seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1793         seq_printf(s, "CHANNEL:\tSTATE:\n");
1794         seq_printf(s, "--------\t------\n");
1795         list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1796                 seq_printf(s, "%s\t\t%s\n", chan->name,
1797                            pl08x_state_str(chan->state));
1798         }
1799
1800         seq_printf(s, "\nPL08x virtual slave channels:\n");
1801         seq_printf(s, "CHANNEL:\tSTATE:\n");
1802         seq_printf(s, "--------\t------\n");
1803         list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1804                 seq_printf(s, "%s\t\t%s\n", chan->name,
1805                            pl08x_state_str(chan->state));
1806         }
1807
1808         return 0;
1809 }
1810
1811 static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1812 {
1813         return single_open(file, pl08x_debugfs_show, inode->i_private);
1814 }
1815
1816 static const struct file_operations pl08x_debugfs_operations = {
1817         .open           = pl08x_debugfs_open,
1818         .read           = seq_read,
1819         .llseek         = seq_lseek,
1820         .release        = single_release,
1821 };
1822
1823 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1824 {
1825         /* Expose a simple debugfs interface to view all clocks */
1826         (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1827                         S_IFREG | S_IRUGO, NULL, pl08x,
1828                         &pl08x_debugfs_operations);
1829 }
1830
1831 #else
1832 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1833 {
1834 }
1835 #endif
1836
1837 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1838 {
1839         struct pl08x_driver_data *pl08x;
1840         const struct vendor_data *vd = id->data;
1841         int ret = 0;
1842         int i;
1843
1844         ret = amba_request_regions(adev, NULL);
1845         if (ret)
1846                 return ret;
1847
1848         /* Create the driver state holder */
1849         pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1850         if (!pl08x) {
1851                 ret = -ENOMEM;
1852                 goto out_no_pl08x;
1853         }
1854
1855         pm_runtime_set_active(&adev->dev);
1856         pm_runtime_enable(&adev->dev);
1857
1858         /* Initialize memcpy engine */
1859         dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1860         pl08x->memcpy.dev = &adev->dev;
1861         pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1862         pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1863         pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1864         pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1865         pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1866         pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1867         pl08x->memcpy.device_control = pl08x_control;
1868
1869         /* Initialize slave engine */
1870         dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1871         pl08x->slave.dev = &adev->dev;
1872         pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1873         pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1874         pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1875         pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1876         pl08x->slave.device_issue_pending = pl08x_issue_pending;
1877         pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1878         pl08x->slave.device_control = pl08x_control;
1879
1880         /* Get the platform data */
1881         pl08x->pd = dev_get_platdata(&adev->dev);
1882         if (!pl08x->pd) {
1883                 dev_err(&adev->dev, "no platform data supplied\n");
1884                 goto out_no_platdata;
1885         }
1886
1887         /* Assign useful pointers to the driver state */
1888         pl08x->adev = adev;
1889         pl08x->vd = vd;
1890
1891         /* By default, AHB1 only.  If dualmaster, from platform */
1892         pl08x->lli_buses = PL08X_AHB1;
1893         pl08x->mem_buses = PL08X_AHB1;
1894         if (pl08x->vd->dualmaster) {
1895                 pl08x->lli_buses = pl08x->pd->lli_buses;
1896                 pl08x->mem_buses = pl08x->pd->mem_buses;
1897         }
1898
1899         /* A DMA memory pool for LLIs, align on 1-byte boundary */
1900         pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1901                         PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
1902         if (!pl08x->pool) {
1903                 ret = -ENOMEM;
1904                 goto out_no_lli_pool;
1905         }
1906
1907         spin_lock_init(&pl08x->lock);
1908
1909         pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1910         if (!pl08x->base) {
1911                 ret = -ENOMEM;
1912                 goto out_no_ioremap;
1913         }
1914
1915         /* Turn on the PL08x */
1916         pl08x_ensure_on(pl08x);
1917
1918         /* Attach the interrupt handler */
1919         writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1920         writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
1921
1922         ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
1923                           DRIVER_NAME, pl08x);
1924         if (ret) {
1925                 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
1926                         __func__, adev->irq[0]);
1927                 goto out_no_irq;
1928         }
1929
1930         /* Initialize physical channels */
1931         pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1932                         GFP_KERNEL);
1933         if (!pl08x->phy_chans) {
1934                 dev_err(&adev->dev, "%s failed to allocate "
1935                         "physical channel holders\n",
1936                         __func__);
1937                 goto out_no_phychans;
1938         }
1939
1940         for (i = 0; i < vd->channels; i++) {
1941                 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
1942
1943                 ch->id = i;
1944                 ch->base = pl08x->base + PL080_Cx_BASE(i);
1945                 spin_lock_init(&ch->lock);
1946                 ch->serving = NULL;
1947                 ch->signal = -1;
1948                 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1949                         i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1950         }
1951
1952         /* Register as many memcpy channels as there are physical channels */
1953         ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
1954                                               pl08x->vd->channels, false);
1955         if (ret <= 0) {
1956                 dev_warn(&pl08x->adev->dev,
1957                          "%s failed to enumerate memcpy channels - %d\n",
1958                          __func__, ret);
1959                 goto out_no_memcpy;
1960         }
1961         pl08x->memcpy.chancnt = ret;
1962
1963         /* Register slave channels */
1964         ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1965                         pl08x->pd->num_slave_channels, true);
1966         if (ret <= 0) {
1967                 dev_warn(&pl08x->adev->dev,
1968                         "%s failed to enumerate slave channels - %d\n",
1969                                 __func__, ret);
1970                 goto out_no_slave;
1971         }
1972         pl08x->slave.chancnt = ret;
1973
1974         ret = dma_async_device_register(&pl08x->memcpy);
1975         if (ret) {
1976                 dev_warn(&pl08x->adev->dev,
1977                         "%s failed to register memcpy as an async device - %d\n",
1978                         __func__, ret);
1979                 goto out_no_memcpy_reg;
1980         }
1981
1982         ret = dma_async_device_register(&pl08x->slave);
1983         if (ret) {
1984                 dev_warn(&pl08x->adev->dev,
1985                         "%s failed to register slave as an async device - %d\n",
1986                         __func__, ret);
1987                 goto out_no_slave_reg;
1988         }
1989
1990         amba_set_drvdata(adev, pl08x);
1991         init_pl08x_debugfs(pl08x);
1992         dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1993                  amba_part(adev), amba_rev(adev),
1994                  (unsigned long long)adev->res.start, adev->irq[0]);
1995
1996         pm_runtime_put(&adev->dev);
1997         return 0;
1998
1999 out_no_slave_reg:
2000         dma_async_device_unregister(&pl08x->memcpy);
2001 out_no_memcpy_reg:
2002         pl08x_free_virtual_channels(&pl08x->slave);
2003 out_no_slave:
2004         pl08x_free_virtual_channels(&pl08x->memcpy);
2005 out_no_memcpy:
2006         kfree(pl08x->phy_chans);
2007 out_no_phychans:
2008         free_irq(adev->irq[0], pl08x);
2009 out_no_irq:
2010         iounmap(pl08x->base);
2011 out_no_ioremap:
2012         dma_pool_destroy(pl08x->pool);
2013 out_no_lli_pool:
2014 out_no_platdata:
2015         pm_runtime_put(&adev->dev);
2016         pm_runtime_disable(&adev->dev);
2017
2018         kfree(pl08x);
2019 out_no_pl08x:
2020         amba_release_regions(adev);
2021         return ret;
2022 }
2023
2024 /* PL080 has 8 channels and the PL080 have just 2 */
2025 static struct vendor_data vendor_pl080 = {
2026         .channels = 8,
2027         .dualmaster = true,
2028 };
2029
2030 static struct vendor_data vendor_pl081 = {
2031         .channels = 2,
2032         .dualmaster = false,
2033 };
2034
2035 static struct amba_id pl08x_ids[] = {
2036         /* PL080 */
2037         {
2038                 .id     = 0x00041080,
2039                 .mask   = 0x000fffff,
2040                 .data   = &vendor_pl080,
2041         },
2042         /* PL081 */
2043         {
2044                 .id     = 0x00041081,
2045                 .mask   = 0x000fffff,
2046                 .data   = &vendor_pl081,
2047         },
2048         /* Nomadik 8815 PL080 variant */
2049         {
2050                 .id     = 0x00280880,
2051                 .mask   = 0x00ffffff,
2052                 .data   = &vendor_pl080,
2053         },
2054         { 0, 0 },
2055 };
2056
2057 static struct amba_driver pl08x_amba_driver = {
2058         .drv.name       = DRIVER_NAME,
2059         .id_table       = pl08x_ids,
2060         .probe          = pl08x_probe,
2061 };
2062
2063 static int __init pl08x_init(void)
2064 {
2065         int retval;
2066         retval = amba_driver_register(&pl08x_amba_driver);
2067         if (retval)
2068                 printk(KERN_WARNING DRIVER_NAME
2069                        "failed to register as an AMBA device (%d)\n",
2070                        retval);
2071         return retval;
2072 }
2073 subsys_initcall(pl08x_init);