dma/imx-sdma: use num_events to validate event_id0
[pandora-kernel.git] / drivers / dma / imx-sdma.c
1 /*
2  * drivers/dma/imx-sdma.c
3  *
4  * This file contains a driver for the Freescale Smart DMA engine
5  *
6  * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
7  *
8  * Based on code from Freescale:
9  *
10  * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
11  *
12  * The code contained herein is licensed under the GNU General Public
13  * License. You may obtain a copy of the GNU General Public License
14  * Version 2 or later at the following locations:
15  *
16  * http://www.opensource.org/licenses/gpl-license.html
17  * http://www.gnu.org/copyleft/gpl.html
18  */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/clk.h>
26 #include <linux/wait.h>
27 #include <linux/sched.h>
28 #include <linux/semaphore.h>
29 #include <linux/spinlock.h>
30 #include <linux/device.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/firmware.h>
33 #include <linux/slab.h>
34 #include <linux/platform_device.h>
35 #include <linux/dmaengine.h>
36 #include <linux/of.h>
37 #include <linux/of_device.h>
38 #include <linux/module.h>
39
40 #include <asm/irq.h>
41 #include <mach/sdma.h>
42 #include <mach/dma.h>
43 #include <mach/hardware.h>
44
45 /* SDMA registers */
46 #define SDMA_H_C0PTR            0x000
47 #define SDMA_H_INTR             0x004
48 #define SDMA_H_STATSTOP         0x008
49 #define SDMA_H_START            0x00c
50 #define SDMA_H_EVTOVR           0x010
51 #define SDMA_H_DSPOVR           0x014
52 #define SDMA_H_HOSTOVR          0x018
53 #define SDMA_H_EVTPEND          0x01c
54 #define SDMA_H_DSPENBL          0x020
55 #define SDMA_H_RESET            0x024
56 #define SDMA_H_EVTERR           0x028
57 #define SDMA_H_INTRMSK          0x02c
58 #define SDMA_H_PSW              0x030
59 #define SDMA_H_EVTERRDBG        0x034
60 #define SDMA_H_CONFIG           0x038
61 #define SDMA_ONCE_ENB           0x040
62 #define SDMA_ONCE_DATA          0x044
63 #define SDMA_ONCE_INSTR         0x048
64 #define SDMA_ONCE_STAT          0x04c
65 #define SDMA_ONCE_CMD           0x050
66 #define SDMA_EVT_MIRROR         0x054
67 #define SDMA_ILLINSTADDR        0x058
68 #define SDMA_CHN0ADDR           0x05c
69 #define SDMA_ONCE_RTB           0x060
70 #define SDMA_XTRIG_CONF1        0x070
71 #define SDMA_XTRIG_CONF2        0x074
72 #define SDMA_CHNENBL0_IMX35     0x200
73 #define SDMA_CHNENBL0_IMX31     0x080
74 #define SDMA_CHNPRI_0           0x100
75
76 /*
77  * Buffer descriptor status values.
78  */
79 #define BD_DONE  0x01
80 #define BD_WRAP  0x02
81 #define BD_CONT  0x04
82 #define BD_INTR  0x08
83 #define BD_RROR  0x10
84 #define BD_LAST  0x20
85 #define BD_EXTD  0x80
86
87 /*
88  * Data Node descriptor status values.
89  */
90 #define DND_END_OF_FRAME  0x80
91 #define DND_END_OF_XFER   0x40
92 #define DND_DONE          0x20
93 #define DND_UNUSED        0x01
94
95 /*
96  * IPCV2 descriptor status values.
97  */
98 #define BD_IPCV2_END_OF_FRAME  0x40
99
100 #define IPCV2_MAX_NODES        50
101 /*
102  * Error bit set in the CCB status field by the SDMA,
103  * in setbd routine, in case of a transfer error
104  */
105 #define DATA_ERROR  0x10000000
106
107 /*
108  * Buffer descriptor commands.
109  */
110 #define C0_ADDR             0x01
111 #define C0_LOAD             0x02
112 #define C0_DUMP             0x03
113 #define C0_SETCTX           0x07
114 #define C0_GETCTX           0x03
115 #define C0_SETDM            0x01
116 #define C0_SETPM            0x04
117 #define C0_GETDM            0x02
118 #define C0_GETPM            0x08
119 /*
120  * Change endianness indicator in the BD command field
121  */
122 #define CHANGE_ENDIANNESS   0x80
123
124 /*
125  * Mode/Count of data node descriptors - IPCv2
126  */
127 struct sdma_mode_count {
128         u32 count   : 16; /* size of the buffer pointed by this BD */
129         u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
130         u32 command :  8; /* command mostlky used for channel 0 */
131 };
132
133 /*
134  * Buffer descriptor
135  */
136 struct sdma_buffer_descriptor {
137         struct sdma_mode_count  mode;
138         u32 buffer_addr;        /* address of the buffer described */
139         u32 ext_buffer_addr;    /* extended buffer address */
140 } __attribute__ ((packed));
141
142 /**
143  * struct sdma_channel_control - Channel control Block
144  *
145  * @current_bd_ptr      current buffer descriptor processed
146  * @base_bd_ptr         first element of buffer descriptor array
147  * @unused              padding. The SDMA engine expects an array of 128 byte
148  *                      control blocks
149  */
150 struct sdma_channel_control {
151         u32 current_bd_ptr;
152         u32 base_bd_ptr;
153         u32 unused[2];
154 } __attribute__ ((packed));
155
156 /**
157  * struct sdma_state_registers - SDMA context for a channel
158  *
159  * @pc:         program counter
160  * @t:          test bit: status of arithmetic & test instruction
161  * @rpc:        return program counter
162  * @sf:         source fault while loading data
163  * @spc:        loop start program counter
164  * @df:         destination fault while storing data
165  * @epc:        loop end program counter
166  * @lm:         loop mode
167  */
168 struct sdma_state_registers {
169         u32 pc     :14;
170         u32 unused1: 1;
171         u32 t      : 1;
172         u32 rpc    :14;
173         u32 unused0: 1;
174         u32 sf     : 1;
175         u32 spc    :14;
176         u32 unused2: 1;
177         u32 df     : 1;
178         u32 epc    :14;
179         u32 lm     : 2;
180 } __attribute__ ((packed));
181
182 /**
183  * struct sdma_context_data - sdma context specific to a channel
184  *
185  * @channel_state:      channel state bits
186  * @gReg:               general registers
187  * @mda:                burst dma destination address register
188  * @msa:                burst dma source address register
189  * @ms:                 burst dma status register
190  * @md:                 burst dma data register
191  * @pda:                peripheral dma destination address register
192  * @psa:                peripheral dma source address register
193  * @ps:                 peripheral dma status register
194  * @pd:                 peripheral dma data register
195  * @ca:                 CRC polynomial register
196  * @cs:                 CRC accumulator register
197  * @dda:                dedicated core destination address register
198  * @dsa:                dedicated core source address register
199  * @ds:                 dedicated core status register
200  * @dd:                 dedicated core data register
201  */
202 struct sdma_context_data {
203         struct sdma_state_registers  channel_state;
204         u32  gReg[8];
205         u32  mda;
206         u32  msa;
207         u32  ms;
208         u32  md;
209         u32  pda;
210         u32  psa;
211         u32  ps;
212         u32  pd;
213         u32  ca;
214         u32  cs;
215         u32  dda;
216         u32  dsa;
217         u32  ds;
218         u32  dd;
219         u32  scratch0;
220         u32  scratch1;
221         u32  scratch2;
222         u32  scratch3;
223         u32  scratch4;
224         u32  scratch5;
225         u32  scratch6;
226         u32  scratch7;
227 } __attribute__ ((packed));
228
229 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
230
231 struct sdma_engine;
232
233 /**
234  * struct sdma_channel - housekeeping for a SDMA channel
235  *
236  * @sdma                pointer to the SDMA engine for this channel
237  * @channel             the channel number, matches dmaengine chan_id + 1
238  * @direction           transfer type. Needed for setting SDMA script
239  * @peripheral_type     Peripheral type. Needed for setting SDMA script
240  * @event_id0           aka dma request line
241  * @event_id1           for channels that use 2 events
242  * @word_size           peripheral access size
243  * @buf_tail            ID of the buffer that was processed
244  * @done                channel completion
245  * @num_bd              max NUM_BD. number of descriptors currently handling
246  */
247 struct sdma_channel {
248         struct sdma_engine              *sdma;
249         unsigned int                    channel;
250         enum dma_transfer_direction             direction;
251         enum sdma_peripheral_type       peripheral_type;
252         unsigned int                    event_id0;
253         unsigned int                    event_id1;
254         enum dma_slave_buswidth         word_size;
255         unsigned int                    buf_tail;
256         struct completion               done;
257         unsigned int                    num_bd;
258         struct sdma_buffer_descriptor   *bd;
259         dma_addr_t                      bd_phys;
260         unsigned int                    pc_from_device, pc_to_device;
261         unsigned long                   flags;
262         dma_addr_t                      per_address;
263         u32                             event_mask0, event_mask1;
264         u32                             watermark_level;
265         u32                             shp_addr, per_addr;
266         struct dma_chan                 chan;
267         spinlock_t                      lock;
268         struct dma_async_tx_descriptor  desc;
269         dma_cookie_t                    last_completed;
270         enum dma_status                 status;
271         unsigned int                    chn_count;
272         unsigned int                    chn_real_count;
273 };
274
275 #define IMX_DMA_SG_LOOP         (1 << 0)
276
277 #define MAX_DMA_CHANNELS 32
278 #define MXC_SDMA_DEFAULT_PRIORITY 1
279 #define MXC_SDMA_MIN_PRIORITY 1
280 #define MXC_SDMA_MAX_PRIORITY 7
281
282 #define SDMA_FIRMWARE_MAGIC 0x414d4453
283
284 /**
285  * struct sdma_firmware_header - Layout of the firmware image
286  *
287  * @magic               "SDMA"
288  * @version_major       increased whenever layout of struct sdma_script_start_addrs
289  *                      changes.
290  * @version_minor       firmware minor version (for binary compatible changes)
291  * @script_addrs_start  offset of struct sdma_script_start_addrs in this image
292  * @num_script_addrs    Number of script addresses in this image
293  * @ram_code_start      offset of SDMA ram image in this firmware image
294  * @ram_code_size       size of SDMA ram image
295  * @script_addrs        Stores the start address of the SDMA scripts
296  *                      (in SDMA memory space)
297  */
298 struct sdma_firmware_header {
299         u32     magic;
300         u32     version_major;
301         u32     version_minor;
302         u32     script_addrs_start;
303         u32     num_script_addrs;
304         u32     ram_code_start;
305         u32     ram_code_size;
306 };
307
308 enum sdma_devtype {
309         IMX31_SDMA,     /* runs on i.mx31 */
310         IMX35_SDMA,     /* runs on i.mx35 and later */
311 };
312
313 struct sdma_engine {
314         struct device                   *dev;
315         struct device_dma_parameters    dma_parms;
316         struct sdma_channel             channel[MAX_DMA_CHANNELS];
317         struct sdma_channel_control     *channel_control;
318         void __iomem                    *regs;
319         enum sdma_devtype               devtype;
320         unsigned int                    num_events;
321         struct sdma_context_data        *context;
322         dma_addr_t                      context_phys;
323         struct dma_device               dma_device;
324         struct clk                      *clk;
325         struct mutex                    channel_0_lock;
326         struct sdma_script_start_addrs  *script_addrs;
327 };
328
329 static struct platform_device_id sdma_devtypes[] = {
330         {
331                 .name = "imx31-sdma",
332                 .driver_data = IMX31_SDMA,
333         }, {
334                 .name = "imx35-sdma",
335                 .driver_data = IMX35_SDMA,
336         }, {
337                 /* sentinel */
338         }
339 };
340 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
341
342 static const struct of_device_id sdma_dt_ids[] = {
343         { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
344         { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
345         { /* sentinel */ }
346 };
347 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
348
349 #define SDMA_H_CONFIG_DSPDMA    (1 << 12) /* indicates if the DSPDMA is used */
350 #define SDMA_H_CONFIG_RTD_PINS  (1 << 11) /* indicates if Real-Time Debug pins are enabled */
351 #define SDMA_H_CONFIG_ACR       (1 << 4)  /* indicates if AHB freq /core freq = 2 or 1 */
352 #define SDMA_H_CONFIG_CSM       (3)       /* indicates which context switch mode is selected*/
353
354 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
355 {
356         u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
357                                                       SDMA_CHNENBL0_IMX35);
358         return chnenbl0 + event * 4;
359 }
360
361 static int sdma_config_ownership(struct sdma_channel *sdmac,
362                 bool event_override, bool mcu_override, bool dsp_override)
363 {
364         struct sdma_engine *sdma = sdmac->sdma;
365         int channel = sdmac->channel;
366         u32 evt, mcu, dsp;
367
368         if (event_override && mcu_override && dsp_override)
369                 return -EINVAL;
370
371         evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
372         mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
373         dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
374
375         if (dsp_override)
376                 dsp &= ~(1 << channel);
377         else
378                 dsp |= (1 << channel);
379
380         if (event_override)
381                 evt &= ~(1 << channel);
382         else
383                 evt |= (1 << channel);
384
385         if (mcu_override)
386                 mcu &= ~(1 << channel);
387         else
388                 mcu |= (1 << channel);
389
390         writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
391         writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
392         writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
393
394         return 0;
395 }
396
397 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
398 {
399         writel(1 << channel, sdma->regs + SDMA_H_START);
400 }
401
402 /*
403  * sdma_run_channel - run a channel and wait till it's done
404  */
405 static int sdma_run_channel(struct sdma_channel *sdmac)
406 {
407         struct sdma_engine *sdma = sdmac->sdma;
408         int channel = sdmac->channel;
409         int ret;
410
411         init_completion(&sdmac->done);
412
413         sdma_enable_channel(sdma, channel);
414
415         ret = wait_for_completion_timeout(&sdmac->done, HZ);
416
417         return ret ? 0 : -ETIMEDOUT;
418 }
419
420 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
421                 u32 address)
422 {
423         struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
424         void *buf_virt;
425         dma_addr_t buf_phys;
426         int ret;
427
428         mutex_lock(&sdma->channel_0_lock);
429
430         buf_virt = dma_alloc_coherent(NULL,
431                         size,
432                         &buf_phys, GFP_KERNEL);
433         if (!buf_virt) {
434                 ret = -ENOMEM;
435                 goto err_out;
436         }
437
438         bd0->mode.command = C0_SETPM;
439         bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
440         bd0->mode.count = size / 2;
441         bd0->buffer_addr = buf_phys;
442         bd0->ext_buffer_addr = address;
443
444         memcpy(buf_virt, buf, size);
445
446         ret = sdma_run_channel(&sdma->channel[0]);
447
448         dma_free_coherent(NULL, size, buf_virt, buf_phys);
449
450 err_out:
451         mutex_unlock(&sdma->channel_0_lock);
452
453         return ret;
454 }
455
456 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
457 {
458         struct sdma_engine *sdma = sdmac->sdma;
459         int channel = sdmac->channel;
460         u32 val;
461         u32 chnenbl = chnenbl_ofs(sdma, event);
462
463         val = readl_relaxed(sdma->regs + chnenbl);
464         val |= (1 << channel);
465         writel_relaxed(val, sdma->regs + chnenbl);
466 }
467
468 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
469 {
470         struct sdma_engine *sdma = sdmac->sdma;
471         int channel = sdmac->channel;
472         u32 chnenbl = chnenbl_ofs(sdma, event);
473         u32 val;
474
475         val = readl_relaxed(sdma->regs + chnenbl);
476         val &= ~(1 << channel);
477         writel_relaxed(val, sdma->regs + chnenbl);
478 }
479
480 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
481 {
482         struct sdma_buffer_descriptor *bd;
483
484         /*
485          * loop mode. Iterate over descriptors, re-setup them and
486          * call callback function.
487          */
488         while (1) {
489                 bd = &sdmac->bd[sdmac->buf_tail];
490
491                 if (bd->mode.status & BD_DONE)
492                         break;
493
494                 if (bd->mode.status & BD_RROR)
495                         sdmac->status = DMA_ERROR;
496                 else
497                         sdmac->status = DMA_IN_PROGRESS;
498
499                 bd->mode.status |= BD_DONE;
500                 sdmac->buf_tail++;
501                 sdmac->buf_tail %= sdmac->num_bd;
502
503                 if (sdmac->desc.callback)
504                         sdmac->desc.callback(sdmac->desc.callback_param);
505         }
506 }
507
508 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
509 {
510         struct sdma_buffer_descriptor *bd;
511         int i, error = 0;
512
513         sdmac->chn_real_count = 0;
514         /*
515          * non loop mode. Iterate over all descriptors, collect
516          * errors and call callback function
517          */
518         for (i = 0; i < sdmac->num_bd; i++) {
519                 bd = &sdmac->bd[i];
520
521                  if (bd->mode.status & (BD_DONE | BD_RROR))
522                         error = -EIO;
523                  sdmac->chn_real_count += bd->mode.count;
524         }
525
526         if (error)
527                 sdmac->status = DMA_ERROR;
528         else
529                 sdmac->status = DMA_SUCCESS;
530
531         sdmac->last_completed = sdmac->desc.cookie;
532         if (sdmac->desc.callback)
533                 sdmac->desc.callback(sdmac->desc.callback_param);
534 }
535
536 static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
537 {
538         complete(&sdmac->done);
539
540         /* not interested in channel 0 interrupts */
541         if (sdmac->channel == 0)
542                 return;
543
544         if (sdmac->flags & IMX_DMA_SG_LOOP)
545                 sdma_handle_channel_loop(sdmac);
546         else
547                 mxc_sdma_handle_channel_normal(sdmac);
548 }
549
550 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
551 {
552         struct sdma_engine *sdma = dev_id;
553         u32 stat;
554
555         stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
556         writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
557
558         while (stat) {
559                 int channel = fls(stat) - 1;
560                 struct sdma_channel *sdmac = &sdma->channel[channel];
561
562                 mxc_sdma_handle_channel(sdmac);
563
564                 stat &= ~(1 << channel);
565         }
566
567         return IRQ_HANDLED;
568 }
569
570 /*
571  * sets the pc of SDMA script according to the peripheral type
572  */
573 static void sdma_get_pc(struct sdma_channel *sdmac,
574                 enum sdma_peripheral_type peripheral_type)
575 {
576         struct sdma_engine *sdma = sdmac->sdma;
577         int per_2_emi = 0, emi_2_per = 0;
578         /*
579          * These are needed once we start to support transfers between
580          * two peripherals or memory-to-memory transfers
581          */
582         int per_2_per = 0, emi_2_emi = 0;
583
584         sdmac->pc_from_device = 0;
585         sdmac->pc_to_device = 0;
586
587         switch (peripheral_type) {
588         case IMX_DMATYPE_MEMORY:
589                 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
590                 break;
591         case IMX_DMATYPE_DSP:
592                 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
593                 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
594                 break;
595         case IMX_DMATYPE_FIRI:
596                 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
597                 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
598                 break;
599         case IMX_DMATYPE_UART:
600                 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
601                 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
602                 break;
603         case IMX_DMATYPE_UART_SP:
604                 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
605                 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
606                 break;
607         case IMX_DMATYPE_ATA:
608                 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
609                 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
610                 break;
611         case IMX_DMATYPE_CSPI:
612         case IMX_DMATYPE_EXT:
613         case IMX_DMATYPE_SSI:
614                 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
615                 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
616                 break;
617         case IMX_DMATYPE_SSI_SP:
618         case IMX_DMATYPE_MMC:
619         case IMX_DMATYPE_SDHC:
620         case IMX_DMATYPE_CSPI_SP:
621         case IMX_DMATYPE_ESAI:
622         case IMX_DMATYPE_MSHC_SP:
623                 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
624                 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
625                 break;
626         case IMX_DMATYPE_ASRC:
627                 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
628                 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
629                 per_2_per = sdma->script_addrs->per_2_per_addr;
630                 break;
631         case IMX_DMATYPE_MSHC:
632                 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
633                 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
634                 break;
635         case IMX_DMATYPE_CCM:
636                 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
637                 break;
638         case IMX_DMATYPE_SPDIF:
639                 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
640                 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
641                 break;
642         case IMX_DMATYPE_IPU_MEMORY:
643                 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
644                 break;
645         default:
646                 break;
647         }
648
649         sdmac->pc_from_device = per_2_emi;
650         sdmac->pc_to_device = emi_2_per;
651 }
652
653 static int sdma_load_context(struct sdma_channel *sdmac)
654 {
655         struct sdma_engine *sdma = sdmac->sdma;
656         int channel = sdmac->channel;
657         int load_address;
658         struct sdma_context_data *context = sdma->context;
659         struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
660         int ret;
661
662         if (sdmac->direction == DMA_DEV_TO_MEM) {
663                 load_address = sdmac->pc_from_device;
664         } else {
665                 load_address = sdmac->pc_to_device;
666         }
667
668         if (load_address < 0)
669                 return load_address;
670
671         dev_dbg(sdma->dev, "load_address = %d\n", load_address);
672         dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
673         dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
674         dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
675         dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
676         dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
677
678         mutex_lock(&sdma->channel_0_lock);
679
680         memset(context, 0, sizeof(*context));
681         context->channel_state.pc = load_address;
682
683         /* Send by context the event mask,base address for peripheral
684          * and watermark level
685          */
686         context->gReg[0] = sdmac->event_mask1;
687         context->gReg[1] = sdmac->event_mask0;
688         context->gReg[2] = sdmac->per_addr;
689         context->gReg[6] = sdmac->shp_addr;
690         context->gReg[7] = sdmac->watermark_level;
691
692         bd0->mode.command = C0_SETDM;
693         bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
694         bd0->mode.count = sizeof(*context) / 4;
695         bd0->buffer_addr = sdma->context_phys;
696         bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
697
698         ret = sdma_run_channel(&sdma->channel[0]);
699
700         mutex_unlock(&sdma->channel_0_lock);
701
702         return ret;
703 }
704
705 static void sdma_disable_channel(struct sdma_channel *sdmac)
706 {
707         struct sdma_engine *sdma = sdmac->sdma;
708         int channel = sdmac->channel;
709
710         writel_relaxed(1 << channel, sdma->regs + SDMA_H_STATSTOP);
711         sdmac->status = DMA_ERROR;
712 }
713
714 static int sdma_config_channel(struct sdma_channel *sdmac)
715 {
716         int ret;
717
718         sdma_disable_channel(sdmac);
719
720         sdmac->event_mask0 = 0;
721         sdmac->event_mask1 = 0;
722         sdmac->shp_addr = 0;
723         sdmac->per_addr = 0;
724
725         if (sdmac->event_id0) {
726                 if (sdmac->event_id0 >= sdmac->sdma->num_events)
727                         return -EINVAL;
728                 sdma_event_enable(sdmac, sdmac->event_id0);
729         }
730
731         switch (sdmac->peripheral_type) {
732         case IMX_DMATYPE_DSP:
733                 sdma_config_ownership(sdmac, false, true, true);
734                 break;
735         case IMX_DMATYPE_MEMORY:
736                 sdma_config_ownership(sdmac, false, true, false);
737                 break;
738         default:
739                 sdma_config_ownership(sdmac, true, true, false);
740                 break;
741         }
742
743         sdma_get_pc(sdmac, sdmac->peripheral_type);
744
745         if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
746                         (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
747                 /* Handle multiple event channels differently */
748                 if (sdmac->event_id1) {
749                         sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
750                         if (sdmac->event_id1 > 31)
751                                 sdmac->watermark_level |= 1 << 31;
752                         sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
753                         if (sdmac->event_id0 > 31)
754                                 sdmac->watermark_level |= 1 << 30;
755                 } else {
756                         sdmac->event_mask0 = 1 << sdmac->event_id0;
757                         sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
758                 }
759                 /* Watermark Level */
760                 sdmac->watermark_level |= sdmac->watermark_level;
761                 /* Address */
762                 sdmac->shp_addr = sdmac->per_address;
763         } else {
764                 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
765         }
766
767         ret = sdma_load_context(sdmac);
768
769         return ret;
770 }
771
772 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
773                 unsigned int priority)
774 {
775         struct sdma_engine *sdma = sdmac->sdma;
776         int channel = sdmac->channel;
777
778         if (priority < MXC_SDMA_MIN_PRIORITY
779             || priority > MXC_SDMA_MAX_PRIORITY) {
780                 return -EINVAL;
781         }
782
783         writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
784
785         return 0;
786 }
787
788 static int sdma_request_channel(struct sdma_channel *sdmac)
789 {
790         struct sdma_engine *sdma = sdmac->sdma;
791         int channel = sdmac->channel;
792         int ret = -EBUSY;
793
794         sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
795         if (!sdmac->bd) {
796                 ret = -ENOMEM;
797                 goto out;
798         }
799
800         memset(sdmac->bd, 0, PAGE_SIZE);
801
802         sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
803         sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
804
805         sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
806
807         init_completion(&sdmac->done);
808
809         sdmac->buf_tail = 0;
810
811         return 0;
812 out:
813
814         return ret;
815 }
816
817 static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
818 {
819         dma_cookie_t cookie = sdmac->chan.cookie;
820
821         if (++cookie < 0)
822                 cookie = 1;
823
824         sdmac->chan.cookie = cookie;
825         sdmac->desc.cookie = cookie;
826
827         return cookie;
828 }
829
830 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
831 {
832         return container_of(chan, struct sdma_channel, chan);
833 }
834
835 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
836 {
837         unsigned long flags;
838         struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
839         dma_cookie_t cookie;
840
841         spin_lock_irqsave(&sdmac->lock, flags);
842
843         cookie = sdma_assign_cookie(sdmac);
844
845         spin_unlock_irqrestore(&sdmac->lock, flags);
846
847         return cookie;
848 }
849
850 static int sdma_alloc_chan_resources(struct dma_chan *chan)
851 {
852         struct sdma_channel *sdmac = to_sdma_chan(chan);
853         struct imx_dma_data *data = chan->private;
854         int prio, ret;
855
856         if (!data)
857                 return -EINVAL;
858
859         switch (data->priority) {
860         case DMA_PRIO_HIGH:
861                 prio = 3;
862                 break;
863         case DMA_PRIO_MEDIUM:
864                 prio = 2;
865                 break;
866         case DMA_PRIO_LOW:
867         default:
868                 prio = 1;
869                 break;
870         }
871
872         sdmac->peripheral_type = data->peripheral_type;
873         sdmac->event_id0 = data->dma_request;
874
875         clk_enable(sdmac->sdma->clk);
876
877         ret = sdma_request_channel(sdmac);
878         if (ret)
879                 return ret;
880
881         ret = sdma_set_channel_priority(sdmac, prio);
882         if (ret)
883                 return ret;
884
885         dma_async_tx_descriptor_init(&sdmac->desc, chan);
886         sdmac->desc.tx_submit = sdma_tx_submit;
887         /* txd.flags will be overwritten in prep funcs */
888         sdmac->desc.flags = DMA_CTRL_ACK;
889
890         return 0;
891 }
892
893 static void sdma_free_chan_resources(struct dma_chan *chan)
894 {
895         struct sdma_channel *sdmac = to_sdma_chan(chan);
896         struct sdma_engine *sdma = sdmac->sdma;
897
898         sdma_disable_channel(sdmac);
899
900         if (sdmac->event_id0)
901                 sdma_event_disable(sdmac, sdmac->event_id0);
902         if (sdmac->event_id1)
903                 sdma_event_disable(sdmac, sdmac->event_id1);
904
905         sdmac->event_id0 = 0;
906         sdmac->event_id1 = 0;
907
908         sdma_set_channel_priority(sdmac, 0);
909
910         dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
911
912         clk_disable(sdma->clk);
913 }
914
915 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
916                 struct dma_chan *chan, struct scatterlist *sgl,
917                 unsigned int sg_len, enum dma_transfer_direction direction,
918                 unsigned long flags)
919 {
920         struct sdma_channel *sdmac = to_sdma_chan(chan);
921         struct sdma_engine *sdma = sdmac->sdma;
922         int ret, i, count;
923         int channel = sdmac->channel;
924         struct scatterlist *sg;
925
926         if (sdmac->status == DMA_IN_PROGRESS)
927                 return NULL;
928         sdmac->status = DMA_IN_PROGRESS;
929
930         sdmac->flags = 0;
931
932         dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
933                         sg_len, channel);
934
935         sdmac->direction = direction;
936         ret = sdma_load_context(sdmac);
937         if (ret)
938                 goto err_out;
939
940         if (sg_len > NUM_BD) {
941                 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
942                                 channel, sg_len, NUM_BD);
943                 ret = -EINVAL;
944                 goto err_out;
945         }
946
947         sdmac->chn_count = 0;
948         for_each_sg(sgl, sg, sg_len, i) {
949                 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
950                 int param;
951
952                 bd->buffer_addr = sg->dma_address;
953
954                 count = sg->length;
955
956                 if (count > 0xffff) {
957                         dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
958                                         channel, count, 0xffff);
959                         ret = -EINVAL;
960                         goto err_out;
961                 }
962
963                 bd->mode.count = count;
964                 sdmac->chn_count += count;
965
966                 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
967                         ret =  -EINVAL;
968                         goto err_out;
969                 }
970
971                 switch (sdmac->word_size) {
972                 case DMA_SLAVE_BUSWIDTH_4_BYTES:
973                         bd->mode.command = 0;
974                         if (count & 3 || sg->dma_address & 3)
975                                 return NULL;
976                         break;
977                 case DMA_SLAVE_BUSWIDTH_2_BYTES:
978                         bd->mode.command = 2;
979                         if (count & 1 || sg->dma_address & 1)
980                                 return NULL;
981                         break;
982                 case DMA_SLAVE_BUSWIDTH_1_BYTE:
983                         bd->mode.command = 1;
984                         break;
985                 default:
986                         return NULL;
987                 }
988
989                 param = BD_DONE | BD_EXTD | BD_CONT;
990
991                 if (i + 1 == sg_len) {
992                         param |= BD_INTR;
993                         param |= BD_LAST;
994                         param &= ~BD_CONT;
995                 }
996
997                 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
998                                 i, count, sg->dma_address,
999                                 param & BD_WRAP ? "wrap" : "",
1000                                 param & BD_INTR ? " intr" : "");
1001
1002                 bd->mode.status = param;
1003         }
1004
1005         sdmac->num_bd = sg_len;
1006         sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1007
1008         return &sdmac->desc;
1009 err_out:
1010         sdmac->status = DMA_ERROR;
1011         return NULL;
1012 }
1013
1014 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1015                 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1016                 size_t period_len, enum dma_transfer_direction direction)
1017 {
1018         struct sdma_channel *sdmac = to_sdma_chan(chan);
1019         struct sdma_engine *sdma = sdmac->sdma;
1020         int num_periods = buf_len / period_len;
1021         int channel = sdmac->channel;
1022         int ret, i = 0, buf = 0;
1023
1024         dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1025
1026         if (sdmac->status == DMA_IN_PROGRESS)
1027                 return NULL;
1028
1029         sdmac->status = DMA_IN_PROGRESS;
1030
1031         sdmac->flags |= IMX_DMA_SG_LOOP;
1032         sdmac->direction = direction;
1033         ret = sdma_load_context(sdmac);
1034         if (ret)
1035                 goto err_out;
1036
1037         if (num_periods > NUM_BD) {
1038                 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1039                                 channel, num_periods, NUM_BD);
1040                 goto err_out;
1041         }
1042
1043         if (period_len > 0xffff) {
1044                 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1045                                 channel, period_len, 0xffff);
1046                 goto err_out;
1047         }
1048
1049         while (buf < buf_len) {
1050                 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1051                 int param;
1052
1053                 bd->buffer_addr = dma_addr;
1054
1055                 bd->mode.count = period_len;
1056
1057                 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1058                         goto err_out;
1059                 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1060                         bd->mode.command = 0;
1061                 else
1062                         bd->mode.command = sdmac->word_size;
1063
1064                 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1065                 if (i + 1 == num_periods)
1066                         param |= BD_WRAP;
1067
1068                 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1069                                 i, period_len, dma_addr,
1070                                 param & BD_WRAP ? "wrap" : "",
1071                                 param & BD_INTR ? " intr" : "");
1072
1073                 bd->mode.status = param;
1074
1075                 dma_addr += period_len;
1076                 buf += period_len;
1077
1078                 i++;
1079         }
1080
1081         sdmac->num_bd = num_periods;
1082         sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1083
1084         return &sdmac->desc;
1085 err_out:
1086         sdmac->status = DMA_ERROR;
1087         return NULL;
1088 }
1089
1090 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1091                 unsigned long arg)
1092 {
1093         struct sdma_channel *sdmac = to_sdma_chan(chan);
1094         struct dma_slave_config *dmaengine_cfg = (void *)arg;
1095
1096         switch (cmd) {
1097         case DMA_TERMINATE_ALL:
1098                 sdma_disable_channel(sdmac);
1099                 return 0;
1100         case DMA_SLAVE_CONFIG:
1101                 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1102                         sdmac->per_address = dmaengine_cfg->src_addr;
1103                         sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1104                         sdmac->word_size = dmaengine_cfg->src_addr_width;
1105                 } else {
1106                         sdmac->per_address = dmaengine_cfg->dst_addr;
1107                         sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1108                         sdmac->word_size = dmaengine_cfg->dst_addr_width;
1109                 }
1110                 sdmac->direction = dmaengine_cfg->direction;
1111                 return sdma_config_channel(sdmac);
1112         default:
1113                 return -ENOSYS;
1114         }
1115
1116         return -EINVAL;
1117 }
1118
1119 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1120                                             dma_cookie_t cookie,
1121                                             struct dma_tx_state *txstate)
1122 {
1123         struct sdma_channel *sdmac = to_sdma_chan(chan);
1124         dma_cookie_t last_used;
1125
1126         last_used = chan->cookie;
1127
1128         dma_set_tx_state(txstate, sdmac->last_completed, last_used,
1129                         sdmac->chn_count - sdmac->chn_real_count);
1130
1131         return sdmac->status;
1132 }
1133
1134 static void sdma_issue_pending(struct dma_chan *chan)
1135 {
1136         struct sdma_channel *sdmac = to_sdma_chan(chan);
1137         struct sdma_engine *sdma = sdmac->sdma;
1138
1139         if (sdmac->status == DMA_IN_PROGRESS)
1140                 sdma_enable_channel(sdma, sdmac->channel);
1141 }
1142
1143 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1144
1145 static void sdma_add_scripts(struct sdma_engine *sdma,
1146                 const struct sdma_script_start_addrs *addr)
1147 {
1148         s32 *addr_arr = (u32 *)addr;
1149         s32 *saddr_arr = (u32 *)sdma->script_addrs;
1150         int i;
1151
1152         for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1153                 if (addr_arr[i] > 0)
1154                         saddr_arr[i] = addr_arr[i];
1155 }
1156
1157 static void sdma_load_firmware(const struct firmware *fw, void *context)
1158 {
1159         struct sdma_engine *sdma = context;
1160         const struct sdma_firmware_header *header;
1161         const struct sdma_script_start_addrs *addr;
1162         unsigned short *ram_code;
1163
1164         if (!fw) {
1165                 dev_err(sdma->dev, "firmware not found\n");
1166                 return;
1167         }
1168
1169         if (fw->size < sizeof(*header))
1170                 goto err_firmware;
1171
1172         header = (struct sdma_firmware_header *)fw->data;
1173
1174         if (header->magic != SDMA_FIRMWARE_MAGIC)
1175                 goto err_firmware;
1176         if (header->ram_code_start + header->ram_code_size > fw->size)
1177                 goto err_firmware;
1178
1179         addr = (void *)header + header->script_addrs_start;
1180         ram_code = (void *)header + header->ram_code_start;
1181
1182         clk_enable(sdma->clk);
1183         /* download the RAM image for SDMA */
1184         sdma_load_script(sdma, ram_code,
1185                         header->ram_code_size,
1186                         addr->ram_code_start_addr);
1187         clk_disable(sdma->clk);
1188
1189         sdma_add_scripts(sdma, addr);
1190
1191         dev_info(sdma->dev, "loaded firmware %d.%d\n",
1192                         header->version_major,
1193                         header->version_minor);
1194
1195 err_firmware:
1196         release_firmware(fw);
1197 }
1198
1199 static int __init sdma_get_firmware(struct sdma_engine *sdma,
1200                 const char *fw_name)
1201 {
1202         int ret;
1203
1204         ret = request_firmware_nowait(THIS_MODULE,
1205                         FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1206                         GFP_KERNEL, sdma, sdma_load_firmware);
1207
1208         return ret;
1209 }
1210
1211 static int __init sdma_init(struct sdma_engine *sdma)
1212 {
1213         int i, ret;
1214         dma_addr_t ccb_phys;
1215
1216         switch (sdma->devtype) {
1217         case IMX31_SDMA:
1218                 sdma->num_events = 32;
1219                 break;
1220         case IMX35_SDMA:
1221                 sdma->num_events = 48;
1222                 break;
1223         default:
1224                 dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
1225                         sdma->devtype);
1226                 return -ENODEV;
1227         }
1228
1229         clk_enable(sdma->clk);
1230
1231         /* Be sure SDMA has not started yet */
1232         writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1233
1234         sdma->channel_control = dma_alloc_coherent(NULL,
1235                         MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1236                         sizeof(struct sdma_context_data),
1237                         &ccb_phys, GFP_KERNEL);
1238
1239         if (!sdma->channel_control) {
1240                 ret = -ENOMEM;
1241                 goto err_dma_alloc;
1242         }
1243
1244         sdma->context = (void *)sdma->channel_control +
1245                 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1246         sdma->context_phys = ccb_phys +
1247                 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1248
1249         /* Zero-out the CCB structures array just allocated */
1250         memset(sdma->channel_control, 0,
1251                         MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1252
1253         /* disable all channels */
1254         for (i = 0; i < sdma->num_events; i++)
1255                 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1256
1257         /* All channels have priority 0 */
1258         for (i = 0; i < MAX_DMA_CHANNELS; i++)
1259                 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1260
1261         ret = sdma_request_channel(&sdma->channel[0]);
1262         if (ret)
1263                 goto err_dma_alloc;
1264
1265         sdma_config_ownership(&sdma->channel[0], false, true, false);
1266
1267         /* Set Command Channel (Channel Zero) */
1268         writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1269
1270         /* Set bits of CONFIG register but with static context switching */
1271         /* FIXME: Check whether to set ACR bit depending on clock ratios */
1272         writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1273
1274         writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1275
1276         /* Set bits of CONFIG register with given context switching mode */
1277         writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1278
1279         /* Initializes channel's priorities */
1280         sdma_set_channel_priority(&sdma->channel[0], 7);
1281
1282         clk_disable(sdma->clk);
1283
1284         return 0;
1285
1286 err_dma_alloc:
1287         clk_disable(sdma->clk);
1288         dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1289         return ret;
1290 }
1291
1292 static int __init sdma_probe(struct platform_device *pdev)
1293 {
1294         const struct of_device_id *of_id =
1295                         of_match_device(sdma_dt_ids, &pdev->dev);
1296         struct device_node *np = pdev->dev.of_node;
1297         const char *fw_name;
1298         int ret;
1299         int irq;
1300         struct resource *iores;
1301         struct sdma_platform_data *pdata = pdev->dev.platform_data;
1302         int i;
1303         struct sdma_engine *sdma;
1304         s32 *saddr_arr;
1305
1306         sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1307         if (!sdma)
1308                 return -ENOMEM;
1309
1310         mutex_init(&sdma->channel_0_lock);
1311
1312         sdma->dev = &pdev->dev;
1313
1314         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1315         irq = platform_get_irq(pdev, 0);
1316         if (!iores || irq < 0) {
1317                 ret = -EINVAL;
1318                 goto err_irq;
1319         }
1320
1321         if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1322                 ret = -EBUSY;
1323                 goto err_request_region;
1324         }
1325
1326         sdma->clk = clk_get(&pdev->dev, NULL);
1327         if (IS_ERR(sdma->clk)) {
1328                 ret = PTR_ERR(sdma->clk);
1329                 goto err_clk;
1330         }
1331
1332         sdma->regs = ioremap(iores->start, resource_size(iores));
1333         if (!sdma->regs) {
1334                 ret = -ENOMEM;
1335                 goto err_ioremap;
1336         }
1337
1338         ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1339         if (ret)
1340                 goto err_request_irq;
1341
1342         sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1343         if (!sdma->script_addrs) {
1344                 ret = -ENOMEM;
1345                 goto err_alloc;
1346         }
1347
1348         /* initially no scripts available */
1349         saddr_arr = (s32 *)sdma->script_addrs;
1350         for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1351                 saddr_arr[i] = -EINVAL;
1352
1353         if (of_id)
1354                 pdev->id_entry = of_id->data;
1355         sdma->devtype = pdev->id_entry->driver_data;
1356
1357         dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1358         dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1359
1360         INIT_LIST_HEAD(&sdma->dma_device.channels);
1361         /* Initialize channel parameters */
1362         for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1363                 struct sdma_channel *sdmac = &sdma->channel[i];
1364
1365                 sdmac->sdma = sdma;
1366                 spin_lock_init(&sdmac->lock);
1367
1368                 sdmac->chan.device = &sdma->dma_device;
1369                 sdmac->channel = i;
1370
1371                 /*
1372                  * Add the channel to the DMAC list. Do not add channel 0 though
1373                  * because we need it internally in the SDMA driver. This also means
1374                  * that channel 0 in dmaengine counting matches sdma channel 1.
1375                  */
1376                 if (i)
1377                         list_add_tail(&sdmac->chan.device_node,
1378                                         &sdma->dma_device.channels);
1379         }
1380
1381         ret = sdma_init(sdma);
1382         if (ret)
1383                 goto err_init;
1384
1385         if (pdata && pdata->script_addrs)
1386                 sdma_add_scripts(sdma, pdata->script_addrs);
1387
1388         if (pdata) {
1389                 sdma_get_firmware(sdma, pdata->fw_name);
1390         } else {
1391                 /*
1392                  * Because that device tree does not encode ROM script address,
1393                  * the RAM script in firmware is mandatory for device tree
1394                  * probe, otherwise it fails.
1395                  */
1396                 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1397                                               &fw_name);
1398                 if (ret) {
1399                         dev_err(&pdev->dev, "failed to get firmware name\n");
1400                         goto err_init;
1401                 }
1402
1403                 ret = sdma_get_firmware(sdma, fw_name);
1404                 if (ret) {
1405                         dev_err(&pdev->dev, "failed to get firmware\n");
1406                         goto err_init;
1407                 }
1408         }
1409
1410         sdma->dma_device.dev = &pdev->dev;
1411
1412         sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1413         sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1414         sdma->dma_device.device_tx_status = sdma_tx_status;
1415         sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1416         sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1417         sdma->dma_device.device_control = sdma_control;
1418         sdma->dma_device.device_issue_pending = sdma_issue_pending;
1419         sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1420         dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1421
1422         ret = dma_async_device_register(&sdma->dma_device);
1423         if (ret) {
1424                 dev_err(&pdev->dev, "unable to register\n");
1425                 goto err_init;
1426         }
1427
1428         dev_info(sdma->dev, "initialized\n");
1429
1430         return 0;
1431
1432 err_init:
1433         kfree(sdma->script_addrs);
1434 err_alloc:
1435         free_irq(irq, sdma);
1436 err_request_irq:
1437         iounmap(sdma->regs);
1438 err_ioremap:
1439         clk_put(sdma->clk);
1440 err_clk:
1441         release_mem_region(iores->start, resource_size(iores));
1442 err_request_region:
1443 err_irq:
1444         kfree(sdma);
1445         return ret;
1446 }
1447
1448 static int __exit sdma_remove(struct platform_device *pdev)
1449 {
1450         return -EBUSY;
1451 }
1452
1453 static struct platform_driver sdma_driver = {
1454         .driver         = {
1455                 .name   = "imx-sdma",
1456                 .of_match_table = sdma_dt_ids,
1457         },
1458         .id_table       = sdma_devtypes,
1459         .remove         = __exit_p(sdma_remove),
1460 };
1461
1462 static int __init sdma_module_init(void)
1463 {
1464         return platform_driver_probe(&sdma_driver, sdma_probe);
1465 }
1466 module_init(sdma_module_init);
1467
1468 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1469 MODULE_DESCRIPTION("i.MX SDMA driver");
1470 MODULE_LICENSE("GPL");