Merge branch 'master' into upstream
[pandora-kernel.git] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License version 2 as
20  * published by the Free Software Foundation.
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/io.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34
35 #include <asm/system.h>
36 #include <mach/hardware.h>
37 #include <plat/dma.h>
38
39 #include <plat/tc.h>
40
41 #undef DEBUG
42
43 #ifndef CONFIG_ARCH_OMAP1
44 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
45         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
46 };
47
48 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
49 #endif
50
51 #define OMAP_DMA_ACTIVE                 0x01
52 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
53
54 #define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
55
56 static int enable_1510_mode;
57
58 static struct omap_dma_global_context_registers {
59         u32 dma_irqenable_l0;
60         u32 dma_ocp_sysconfig;
61         u32 dma_gcr;
62 } omap_dma_global_context;
63
64 struct omap_dma_lch {
65         int next_lch;
66         int dev_id;
67         u16 saved_csr;
68         u16 enabled_irqs;
69         const char *dev_name;
70         void (*callback)(int lch, u16 ch_status, void *data);
71         void *data;
72
73 #ifndef CONFIG_ARCH_OMAP1
74         /* required for Dynamic chaining */
75         int prev_linked_ch;
76         int next_linked_ch;
77         int state;
78         int chain_id;
79
80         int status;
81 #endif
82         long flags;
83 };
84
85 struct dma_link_info {
86         int *linked_dmach_q;
87         int no_of_lchs_linked;
88
89         int q_count;
90         int q_tail;
91         int q_head;
92
93         int chain_state;
94         int chain_mode;
95
96 };
97
98 static struct dma_link_info *dma_linked_lch;
99
100 #ifndef CONFIG_ARCH_OMAP1
101
102 /* Chain handling macros */
103 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
104         do {                                                            \
105                 dma_linked_lch[chain_id].q_head =                       \
106                 dma_linked_lch[chain_id].q_tail =                       \
107                 dma_linked_lch[chain_id].q_count = 0;                   \
108         } while (0)
109 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
110                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
111                 dma_linked_lch[chain_id].q_count)
112 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
113         do {                                                            \
114                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
115                 dma_linked_lch[chain_id].q_count)                       \
116         } while (0)
117 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
118                 (0 == dma_linked_lch[chain_id].q_count)
119 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
120         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
121 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
122         do {                                                            \
123                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
124                 dma_linked_lch[chain_id].q_count--;                     \
125         } while (0)
126
127 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
128         do {                                                            \
129                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
130                 dma_linked_lch[chain_id].q_count++; \
131         } while (0)
132 #endif
133
134 static int dma_lch_count;
135 static int dma_chan_count;
136 static int omap_dma_reserve_channels;
137
138 static spinlock_t dma_chan_lock;
139 static struct omap_dma_lch *dma_chan;
140 static void __iomem *omap_dma_base;
141
142 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
143         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
144         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
145         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
146         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
147         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
148 };
149
150 static inline void disable_lnk(int lch);
151 static void omap_disable_channel_irq(int lch);
152 static inline void omap_enable_channel_irq(int lch);
153
154 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
155                                                 __func__);
156
157 #define dma_read(reg)                                                   \
158 ({                                                                      \
159         u32 __val;                                                      \
160         if (cpu_class_is_omap1())                                       \
161                 __val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg);   \
162         else                                                            \
163                 __val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg);   \
164         __val;                                                          \
165 })
166
167 #define dma_write(val, reg)                                             \
168 ({                                                                      \
169         if (cpu_class_is_omap1())                                       \
170                 __raw_writew((u16)(val), omap_dma_base + OMAP1_DMA_##reg); \
171         else                                                            \
172                 __raw_writel((val), omap_dma_base + OMAP_DMA4_##reg);   \
173 })
174
175 #ifdef CONFIG_ARCH_OMAP15XX
176 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
177 int omap_dma_in_1510_mode(void)
178 {
179         return enable_1510_mode;
180 }
181 #else
182 #define omap_dma_in_1510_mode()         0
183 #endif
184
185 #ifdef CONFIG_ARCH_OMAP1
186 static inline int get_gdma_dev(int req)
187 {
188         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
189         int shift = ((req - 1) % 5) * 6;
190
191         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
192 }
193
194 static inline void set_gdma_dev(int req, int dev)
195 {
196         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
197         int shift = ((req - 1) % 5) * 6;
198         u32 l;
199
200         l = omap_readl(reg);
201         l &= ~(0x3f << shift);
202         l |= (dev - 1) << shift;
203         omap_writel(l, reg);
204 }
205 #else
206 #define set_gdma_dev(req, dev)  do {} while (0)
207 #endif
208
209 /* Omap1 only */
210 static void clear_lch_regs(int lch)
211 {
212         int i;
213         void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
214
215         for (i = 0; i < 0x2c; i += 2)
216                 __raw_writew(0, lch_base + i);
217 }
218
219 void omap_set_dma_priority(int lch, int dst_port, int priority)
220 {
221         unsigned long reg;
222         u32 l;
223
224         if (cpu_class_is_omap1()) {
225                 switch (dst_port) {
226                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
227                         reg = OMAP_TC_OCPT1_PRIOR;
228                         break;
229                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
230                         reg = OMAP_TC_OCPT2_PRIOR;
231                         break;
232                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
233                         reg = OMAP_TC_EMIFF_PRIOR;
234                         break;
235                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
236                         reg = OMAP_TC_EMIFS_PRIOR;
237                         break;
238                 default:
239                         BUG();
240                         return;
241                 }
242                 l = omap_readl(reg);
243                 l &= ~(0xf << 8);
244                 l |= (priority & 0xf) << 8;
245                 omap_writel(l, reg);
246         }
247
248         if (cpu_class_is_omap2()) {
249                 u32 ccr;
250
251                 ccr = dma_read(CCR(lch));
252                 if (priority)
253                         ccr |= (1 << 6);
254                 else
255                         ccr &= ~(1 << 6);
256                 dma_write(ccr, CCR(lch));
257         }
258 }
259 EXPORT_SYMBOL(omap_set_dma_priority);
260
261 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
262                                   int frame_count, int sync_mode,
263                                   int dma_trigger, int src_or_dst_synch)
264 {
265         u32 l;
266
267         l = dma_read(CSDP(lch));
268         l &= ~0x03;
269         l |= data_type;
270         dma_write(l, CSDP(lch));
271
272         if (cpu_class_is_omap1()) {
273                 u16 ccr;
274
275                 ccr = dma_read(CCR(lch));
276                 ccr &= ~(1 << 5);
277                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
278                         ccr |= 1 << 5;
279                 dma_write(ccr, CCR(lch));
280
281                 ccr = dma_read(CCR2(lch));
282                 ccr &= ~(1 << 2);
283                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
284                         ccr |= 1 << 2;
285                 dma_write(ccr, CCR2(lch));
286         }
287
288         if (cpu_class_is_omap2() && dma_trigger) {
289                 u32 val;
290
291                 val = dma_read(CCR(lch));
292
293                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
294                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
295                 val |= (dma_trigger & ~0x1f) << 14;
296                 val |= dma_trigger & 0x1f;
297
298                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
299                         val |= 1 << 5;
300                 else
301                         val &= ~(1 << 5);
302
303                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
304                         val |= 1 << 18;
305                 else
306                         val &= ~(1 << 18);
307
308                 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
309                         val &= ~(1 << 24);      /* dest synch */
310                         val |= (1 << 23);       /* Prefetch */
311                 } else if (src_or_dst_synch) {
312                         val |= 1 << 24;         /* source synch */
313                 } else {
314                         val &= ~(1 << 24);      /* dest synch */
315                 }
316                 dma_write(val, CCR(lch));
317         }
318
319         dma_write(elem_count, CEN(lch));
320         dma_write(frame_count, CFN(lch));
321 }
322 EXPORT_SYMBOL(omap_set_dma_transfer_params);
323
324 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
325 {
326         BUG_ON(omap_dma_in_1510_mode());
327
328         if (cpu_class_is_omap1()) {
329                 u16 w;
330
331                 w = dma_read(CCR2(lch));
332                 w &= ~0x03;
333
334                 switch (mode) {
335                 case OMAP_DMA_CONSTANT_FILL:
336                         w |= 0x01;
337                         break;
338                 case OMAP_DMA_TRANSPARENT_COPY:
339                         w |= 0x02;
340                         break;
341                 case OMAP_DMA_COLOR_DIS:
342                         break;
343                 default:
344                         BUG();
345                 }
346                 dma_write(w, CCR2(lch));
347
348                 w = dma_read(LCH_CTRL(lch));
349                 w &= ~0x0f;
350                 /* Default is channel type 2D */
351                 if (mode) {
352                         dma_write((u16)color, COLOR_L(lch));
353                         dma_write((u16)(color >> 16), COLOR_U(lch));
354                         w |= 1;         /* Channel type G */
355                 }
356                 dma_write(w, LCH_CTRL(lch));
357         }
358
359         if (cpu_class_is_omap2()) {
360                 u32 val;
361
362                 val = dma_read(CCR(lch));
363                 val &= ~((1 << 17) | (1 << 16));
364
365                 switch (mode) {
366                 case OMAP_DMA_CONSTANT_FILL:
367                         val |= 1 << 16;
368                         break;
369                 case OMAP_DMA_TRANSPARENT_COPY:
370                         val |= 1 << 17;
371                         break;
372                 case OMAP_DMA_COLOR_DIS:
373                         break;
374                 default:
375                         BUG();
376                 }
377                 dma_write(val, CCR(lch));
378
379                 color &= 0xffffff;
380                 dma_write(color, COLOR(lch));
381         }
382 }
383 EXPORT_SYMBOL(omap_set_dma_color_mode);
384
385 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
386 {
387         if (cpu_class_is_omap2()) {
388                 u32 csdp;
389
390                 csdp = dma_read(CSDP(lch));
391                 csdp &= ~(0x3 << 16);
392                 csdp |= (mode << 16);
393                 dma_write(csdp, CSDP(lch));
394         }
395 }
396 EXPORT_SYMBOL(omap_set_dma_write_mode);
397
398 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
399 {
400         if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
401                 u32 l;
402
403                 l = dma_read(LCH_CTRL(lch));
404                 l &= ~0x7;
405                 l |= mode;
406                 dma_write(l, LCH_CTRL(lch));
407         }
408 }
409 EXPORT_SYMBOL(omap_set_dma_channel_mode);
410
411 /* Note that src_port is only for omap1 */
412 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
413                              unsigned long src_start,
414                              int src_ei, int src_fi)
415 {
416         u32 l;
417
418         if (cpu_class_is_omap1()) {
419                 u16 w;
420
421                 w = dma_read(CSDP(lch));
422                 w &= ~(0x1f << 2);
423                 w |= src_port << 2;
424                 dma_write(w, CSDP(lch));
425         }
426
427         l = dma_read(CCR(lch));
428         l &= ~(0x03 << 12);
429         l |= src_amode << 12;
430         dma_write(l, CCR(lch));
431
432         if (cpu_class_is_omap1()) {
433                 dma_write(src_start >> 16, CSSA_U(lch));
434                 dma_write((u16)src_start, CSSA_L(lch));
435         }
436
437         if (cpu_class_is_omap2())
438                 dma_write(src_start, CSSA(lch));
439
440         dma_write(src_ei, CSEI(lch));
441         dma_write(src_fi, CSFI(lch));
442 }
443 EXPORT_SYMBOL(omap_set_dma_src_params);
444
445 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
446 {
447         omap_set_dma_transfer_params(lch, params->data_type,
448                                      params->elem_count, params->frame_count,
449                                      params->sync_mode, params->trigger,
450                                      params->src_or_dst_synch);
451         omap_set_dma_src_params(lch, params->src_port,
452                                 params->src_amode, params->src_start,
453                                 params->src_ei, params->src_fi);
454
455         omap_set_dma_dest_params(lch, params->dst_port,
456                                  params->dst_amode, params->dst_start,
457                                  params->dst_ei, params->dst_fi);
458         if (params->read_prio || params->write_prio)
459                 omap_dma_set_prio_lch(lch, params->read_prio,
460                                       params->write_prio);
461 }
462 EXPORT_SYMBOL(omap_set_dma_params);
463
464 void omap_set_dma_src_index(int lch, int eidx, int fidx)
465 {
466         if (cpu_class_is_omap2())
467                 return;
468
469         dma_write(eidx, CSEI(lch));
470         dma_write(fidx, CSFI(lch));
471 }
472 EXPORT_SYMBOL(omap_set_dma_src_index);
473
474 void omap_set_dma_src_data_pack(int lch, int enable)
475 {
476         u32 l;
477
478         l = dma_read(CSDP(lch));
479         l &= ~(1 << 6);
480         if (enable)
481                 l |= (1 << 6);
482         dma_write(l, CSDP(lch));
483 }
484 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
485
486 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
487 {
488         unsigned int burst = 0;
489         u32 l;
490
491         l = dma_read(CSDP(lch));
492         l &= ~(0x03 << 7);
493
494         switch (burst_mode) {
495         case OMAP_DMA_DATA_BURST_DIS:
496                 break;
497         case OMAP_DMA_DATA_BURST_4:
498                 if (cpu_class_is_omap2())
499                         burst = 0x1;
500                 else
501                         burst = 0x2;
502                 break;
503         case OMAP_DMA_DATA_BURST_8:
504                 if (cpu_class_is_omap2()) {
505                         burst = 0x2;
506                         break;
507                 }
508                 /*
509                  * not supported by current hardware on OMAP1
510                  * w |= (0x03 << 7);
511                  * fall through
512                  */
513         case OMAP_DMA_DATA_BURST_16:
514                 if (cpu_class_is_omap2()) {
515                         burst = 0x3;
516                         break;
517                 }
518                 /*
519                  * OMAP1 don't support burst 16
520                  * fall through
521                  */
522         default:
523                 BUG();
524         }
525
526         l |= (burst << 7);
527         dma_write(l, CSDP(lch));
528 }
529 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
530
531 /* Note that dest_port is only for OMAP1 */
532 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
533                               unsigned long dest_start,
534                               int dst_ei, int dst_fi)
535 {
536         u32 l;
537
538         if (cpu_class_is_omap1()) {
539                 l = dma_read(CSDP(lch));
540                 l &= ~(0x1f << 9);
541                 l |= dest_port << 9;
542                 dma_write(l, CSDP(lch));
543         }
544
545         l = dma_read(CCR(lch));
546         l &= ~(0x03 << 14);
547         l |= dest_amode << 14;
548         dma_write(l, CCR(lch));
549
550         if (cpu_class_is_omap1()) {
551                 dma_write(dest_start >> 16, CDSA_U(lch));
552                 dma_write(dest_start, CDSA_L(lch));
553         }
554
555         if (cpu_class_is_omap2())
556                 dma_write(dest_start, CDSA(lch));
557
558         dma_write(dst_ei, CDEI(lch));
559         dma_write(dst_fi, CDFI(lch));
560 }
561 EXPORT_SYMBOL(omap_set_dma_dest_params);
562
563 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
564 {
565         if (cpu_class_is_omap2())
566                 return;
567
568         dma_write(eidx, CDEI(lch));
569         dma_write(fidx, CDFI(lch));
570 }
571 EXPORT_SYMBOL(omap_set_dma_dest_index);
572
573 void omap_set_dma_dest_data_pack(int lch, int enable)
574 {
575         u32 l;
576
577         l = dma_read(CSDP(lch));
578         l &= ~(1 << 13);
579         if (enable)
580                 l |= 1 << 13;
581         dma_write(l, CSDP(lch));
582 }
583 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
584
585 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
586 {
587         unsigned int burst = 0;
588         u32 l;
589
590         l = dma_read(CSDP(lch));
591         l &= ~(0x03 << 14);
592
593         switch (burst_mode) {
594         case OMAP_DMA_DATA_BURST_DIS:
595                 break;
596         case OMAP_DMA_DATA_BURST_4:
597                 if (cpu_class_is_omap2())
598                         burst = 0x1;
599                 else
600                         burst = 0x2;
601                 break;
602         case OMAP_DMA_DATA_BURST_8:
603                 if (cpu_class_is_omap2())
604                         burst = 0x2;
605                 else
606                         burst = 0x3;
607                 break;
608         case OMAP_DMA_DATA_BURST_16:
609                 if (cpu_class_is_omap2()) {
610                         burst = 0x3;
611                         break;
612                 }
613                 /*
614                  * OMAP1 don't support burst 16
615                  * fall through
616                  */
617         default:
618                 printk(KERN_ERR "Invalid DMA burst mode\n");
619                 BUG();
620                 return;
621         }
622         l |= (burst << 14);
623         dma_write(l, CSDP(lch));
624 }
625 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
626
627 static inline void omap_enable_channel_irq(int lch)
628 {
629         u32 status;
630
631         /* Clear CSR */
632         if (cpu_class_is_omap1())
633                 status = dma_read(CSR(lch));
634         else if (cpu_class_is_omap2())
635                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
636
637         /* Enable some nice interrupts. */
638         dma_write(dma_chan[lch].enabled_irqs, CICR(lch));
639 }
640
641 static void omap_disable_channel_irq(int lch)
642 {
643         if (cpu_class_is_omap2())
644                 dma_write(0, CICR(lch));
645 }
646
647 void omap_enable_dma_irq(int lch, u16 bits)
648 {
649         dma_chan[lch].enabled_irqs |= bits;
650 }
651 EXPORT_SYMBOL(omap_enable_dma_irq);
652
653 void omap_disable_dma_irq(int lch, u16 bits)
654 {
655         dma_chan[lch].enabled_irqs &= ~bits;
656 }
657 EXPORT_SYMBOL(omap_disable_dma_irq);
658
659 static inline void enable_lnk(int lch)
660 {
661         u32 l;
662
663         l = dma_read(CLNK_CTRL(lch));
664
665         if (cpu_class_is_omap1())
666                 l &= ~(1 << 14);
667
668         /* Set the ENABLE_LNK bits */
669         if (dma_chan[lch].next_lch != -1)
670                 l = dma_chan[lch].next_lch | (1 << 15);
671
672 #ifndef CONFIG_ARCH_OMAP1
673         if (cpu_class_is_omap2())
674                 if (dma_chan[lch].next_linked_ch != -1)
675                         l = dma_chan[lch].next_linked_ch | (1 << 15);
676 #endif
677
678         dma_write(l, CLNK_CTRL(lch));
679 }
680
681 static inline void disable_lnk(int lch)
682 {
683         u32 l;
684
685         l = dma_read(CLNK_CTRL(lch));
686
687         /* Disable interrupts */
688         if (cpu_class_is_omap1()) {
689                 dma_write(0, CICR(lch));
690                 /* Set the STOP_LNK bit */
691                 l |= 1 << 14;
692         }
693
694         if (cpu_class_is_omap2()) {
695                 omap_disable_channel_irq(lch);
696                 /* Clear the ENABLE_LNK bit */
697                 l &= ~(1 << 15);
698         }
699
700         dma_write(l, CLNK_CTRL(lch));
701         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
702 }
703
704 static inline void omap2_enable_irq_lch(int lch)
705 {
706         u32 val;
707         unsigned long flags;
708
709         if (!cpu_class_is_omap2())
710                 return;
711
712         spin_lock_irqsave(&dma_chan_lock, flags);
713         val = dma_read(IRQENABLE_L0);
714         val |= 1 << lch;
715         dma_write(val, IRQENABLE_L0);
716         spin_unlock_irqrestore(&dma_chan_lock, flags);
717 }
718
719 static inline void omap2_disable_irq_lch(int lch)
720 {
721         u32 val;
722         unsigned long flags;
723
724         if (!cpu_class_is_omap2())
725                 return;
726
727         spin_lock_irqsave(&dma_chan_lock, flags);
728         val = dma_read(IRQENABLE_L0);
729         val &= ~(1 << lch);
730         dma_write(val, IRQENABLE_L0);
731         spin_unlock_irqrestore(&dma_chan_lock, flags);
732 }
733
734 int omap_request_dma(int dev_id, const char *dev_name,
735                      void (*callback)(int lch, u16 ch_status, void *data),
736                      void *data, int *dma_ch_out)
737 {
738         int ch, free_ch = -1;
739         unsigned long flags;
740         struct omap_dma_lch *chan;
741
742         spin_lock_irqsave(&dma_chan_lock, flags);
743         for (ch = 0; ch < dma_chan_count; ch++) {
744                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
745                         free_ch = ch;
746                         if (dev_id == 0)
747                                 break;
748                 }
749         }
750         if (free_ch == -1) {
751                 spin_unlock_irqrestore(&dma_chan_lock, flags);
752                 return -EBUSY;
753         }
754         chan = dma_chan + free_ch;
755         chan->dev_id = dev_id;
756
757         if (cpu_class_is_omap1())
758                 clear_lch_regs(free_ch);
759
760         if (cpu_class_is_omap2())
761                 omap_clear_dma(free_ch);
762
763         spin_unlock_irqrestore(&dma_chan_lock, flags);
764
765         chan->dev_name = dev_name;
766         chan->callback = callback;
767         chan->data = data;
768         chan->flags = 0;
769
770 #ifndef CONFIG_ARCH_OMAP1
771         if (cpu_class_is_omap2()) {
772                 chan->chain_id = -1;
773                 chan->next_linked_ch = -1;
774         }
775 #endif
776
777         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
778
779         if (cpu_class_is_omap1())
780                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
781         else if (cpu_class_is_omap2())
782                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
783                         OMAP2_DMA_TRANS_ERR_IRQ;
784
785         if (cpu_is_omap16xx()) {
786                 /* If the sync device is set, configure it dynamically. */
787                 if (dev_id != 0) {
788                         set_gdma_dev(free_ch + 1, dev_id);
789                         dev_id = free_ch + 1;
790                 }
791                 /*
792                  * Disable the 1510 compatibility mode and set the sync device
793                  * id.
794                  */
795                 dma_write(dev_id | (1 << 10), CCR(free_ch));
796         } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
797                 dma_write(dev_id, CCR(free_ch));
798         }
799
800         if (cpu_class_is_omap2()) {
801                 omap2_enable_irq_lch(free_ch);
802                 omap_enable_channel_irq(free_ch);
803                 /* Clear the CSR register and IRQ status register */
804                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch));
805                 dma_write(1 << free_ch, IRQSTATUS_L0);
806         }
807
808         *dma_ch_out = free_ch;
809
810         return 0;
811 }
812 EXPORT_SYMBOL(omap_request_dma);
813
814 void omap_free_dma(int lch)
815 {
816         unsigned long flags;
817
818         if (dma_chan[lch].dev_id == -1) {
819                 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
820                        lch);
821                 return;
822         }
823
824         if (cpu_class_is_omap1()) {
825                 /* Disable all DMA interrupts for the channel. */
826                 dma_write(0, CICR(lch));
827                 /* Make sure the DMA transfer is stopped. */
828                 dma_write(0, CCR(lch));
829         }
830
831         if (cpu_class_is_omap2()) {
832                 omap2_disable_irq_lch(lch);
833
834                 /* Clear the CSR register and IRQ status register */
835                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
836                 dma_write(1 << lch, IRQSTATUS_L0);
837
838                 /* Disable all DMA interrupts for the channel. */
839                 dma_write(0, CICR(lch));
840
841                 /* Make sure the DMA transfer is stopped. */
842                 dma_write(0, CCR(lch));
843                 omap_clear_dma(lch);
844         }
845
846         spin_lock_irqsave(&dma_chan_lock, flags);
847         dma_chan[lch].dev_id = -1;
848         dma_chan[lch].next_lch = -1;
849         dma_chan[lch].callback = NULL;
850         spin_unlock_irqrestore(&dma_chan_lock, flags);
851 }
852 EXPORT_SYMBOL(omap_free_dma);
853
854 /**
855  * @brief omap_dma_set_global_params : Set global priority settings for dma
856  *
857  * @param arb_rate
858  * @param max_fifo_depth
859  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
860  *                                                 DMA_THREAD_RESERVE_ONET
861  *                                                 DMA_THREAD_RESERVE_TWOT
862  *                                                 DMA_THREAD_RESERVE_THREET
863  */
864 void
865 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
866 {
867         u32 reg;
868
869         if (!cpu_class_is_omap2()) {
870                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
871                 return;
872         }
873
874         if (max_fifo_depth == 0)
875                 max_fifo_depth = 1;
876         if (arb_rate == 0)
877                 arb_rate = 1;
878
879         reg = 0xff & max_fifo_depth;
880         reg |= (0x3 & tparams) << 12;
881         reg |= (arb_rate & 0xff) << 16;
882
883         dma_write(reg, GCR);
884 }
885 EXPORT_SYMBOL(omap_dma_set_global_params);
886
887 /**
888  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
889  *
890  * @param lch
891  * @param read_prio - Read priority
892  * @param write_prio - Write priority
893  * Both of the above can be set with one of the following values :
894  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
895  */
896 int
897 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
898                       unsigned char write_prio)
899 {
900         u32 l;
901
902         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
903                 printk(KERN_ERR "Invalid channel id\n");
904                 return -EINVAL;
905         }
906         l = dma_read(CCR(lch));
907         l &= ~((1 << 6) | (1 << 26));
908         if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
909                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
910         else
911                 l |= ((read_prio & 0x1) << 6);
912
913         dma_write(l, CCR(lch));
914
915         return 0;
916 }
917 EXPORT_SYMBOL(omap_dma_set_prio_lch);
918
919 /*
920  * Clears any DMA state so the DMA engine is ready to restart with new buffers
921  * through omap_start_dma(). Any buffers in flight are discarded.
922  */
923 void omap_clear_dma(int lch)
924 {
925         unsigned long flags;
926
927         local_irq_save(flags);
928
929         if (cpu_class_is_omap1()) {
930                 u32 l;
931
932                 l = dma_read(CCR(lch));
933                 l &= ~OMAP_DMA_CCR_EN;
934                 dma_write(l, CCR(lch));
935
936                 /* Clear pending interrupts */
937                 l = dma_read(CSR(lch));
938         }
939
940         if (cpu_class_is_omap2()) {
941                 int i;
942                 void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
943                 for (i = 0; i < 0x44; i += 4)
944                         __raw_writel(0, lch_base + i);
945         }
946
947         local_irq_restore(flags);
948 }
949 EXPORT_SYMBOL(omap_clear_dma);
950
951 void omap_start_dma(int lch)
952 {
953         u32 l;
954
955         /*
956          * The CPC/CDAC register needs to be initialized to zero
957          * before starting dma transfer.
958          */
959         if (cpu_is_omap15xx())
960                 dma_write(0, CPC(lch));
961         else
962                 dma_write(0, CDAC(lch));
963
964         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
965                 int next_lch, cur_lch;
966                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
967
968                 dma_chan_link_map[lch] = 1;
969                 /* Set the link register of the first channel */
970                 enable_lnk(lch);
971
972                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
973                 cur_lch = dma_chan[lch].next_lch;
974                 do {
975                         next_lch = dma_chan[cur_lch].next_lch;
976
977                         /* The loop case: we've been here already */
978                         if (dma_chan_link_map[cur_lch])
979                                 break;
980                         /* Mark the current channel */
981                         dma_chan_link_map[cur_lch] = 1;
982
983                         enable_lnk(cur_lch);
984                         omap_enable_channel_irq(cur_lch);
985
986                         cur_lch = next_lch;
987                 } while (next_lch != -1);
988         } else if (cpu_is_omap242x() ||
989                 (cpu_is_omap243x() &&  omap_type() <= OMAP2430_REV_ES1_0)) {
990
991                 /* Errata: Need to write lch even if not using chaining */
992                 dma_write(lch, CLNK_CTRL(lch));
993         }
994
995         omap_enable_channel_irq(lch);
996
997         l = dma_read(CCR(lch));
998
999         /*
1000          * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
1001          * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
1002          * bursting is enabled. This might result in data gets stalled in
1003          * FIFO at the end of the block.
1004          * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
1005          * guarantee no data will stay in the DMA FIFO in case inter frame
1006          * buffering occurs.
1007          */
1008         if (cpu_is_omap2420() ||
1009             (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
1010                 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1011
1012         l |= OMAP_DMA_CCR_EN;
1013         dma_write(l, CCR(lch));
1014
1015         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1016 }
1017 EXPORT_SYMBOL(omap_start_dma);
1018
1019 void omap_stop_dma(int lch)
1020 {
1021         u32 l;
1022
1023         /* Disable all interrupts on the channel */
1024         if (cpu_class_is_omap1())
1025                 dma_write(0, CICR(lch));
1026
1027         l = dma_read(CCR(lch));
1028         /* OMAP3 Errata i541: sDMA FIFO draining does not finish */
1029         if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1030                 int i = 0;
1031                 u32 sys_cf;
1032
1033                 /* Configure No-Standby */
1034                 l = dma_read(OCP_SYSCONFIG);
1035                 sys_cf = l;
1036                 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1037                 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1038                 dma_write(l , OCP_SYSCONFIG);
1039
1040                 l = dma_read(CCR(lch));
1041                 l &= ~OMAP_DMA_CCR_EN;
1042                 dma_write(l, CCR(lch));
1043
1044                 /* Wait for sDMA FIFO drain */
1045                 l = dma_read(CCR(lch));
1046                 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1047                                         OMAP_DMA_CCR_WR_ACTIVE))) {
1048                         udelay(5);
1049                         i++;
1050                         l = dma_read(CCR(lch));
1051                 }
1052                 if (i >= 100)
1053                         printk(KERN_ERR "DMA drain did not complete on "
1054                                         "lch %d\n", lch);
1055                 /* Restore OCP_SYSCONFIG */
1056                 dma_write(sys_cf, OCP_SYSCONFIG);
1057         } else {
1058                 l &= ~OMAP_DMA_CCR_EN;
1059                 dma_write(l, CCR(lch));
1060         }
1061
1062         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1063                 int next_lch, cur_lch = lch;
1064                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1065
1066                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1067                 do {
1068                         /* The loop case: we've been here already */
1069                         if (dma_chan_link_map[cur_lch])
1070                                 break;
1071                         /* Mark the current channel */
1072                         dma_chan_link_map[cur_lch] = 1;
1073
1074                         disable_lnk(cur_lch);
1075
1076                         next_lch = dma_chan[cur_lch].next_lch;
1077                         cur_lch = next_lch;
1078                 } while (next_lch != -1);
1079         }
1080
1081         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1082 }
1083 EXPORT_SYMBOL(omap_stop_dma);
1084
1085 /*
1086  * Allows changing the DMA callback function or data. This may be needed if
1087  * the driver shares a single DMA channel for multiple dma triggers.
1088  */
1089 int omap_set_dma_callback(int lch,
1090                           void (*callback)(int lch, u16 ch_status, void *data),
1091                           void *data)
1092 {
1093         unsigned long flags;
1094
1095         if (lch < 0)
1096                 return -ENODEV;
1097
1098         spin_lock_irqsave(&dma_chan_lock, flags);
1099         if (dma_chan[lch].dev_id == -1) {
1100                 printk(KERN_ERR "DMA callback for not set for free channel\n");
1101                 spin_unlock_irqrestore(&dma_chan_lock, flags);
1102                 return -EINVAL;
1103         }
1104         dma_chan[lch].callback = callback;
1105         dma_chan[lch].data = data;
1106         spin_unlock_irqrestore(&dma_chan_lock, flags);
1107
1108         return 0;
1109 }
1110 EXPORT_SYMBOL(omap_set_dma_callback);
1111
1112 /*
1113  * Returns current physical source address for the given DMA channel.
1114  * If the channel is running the caller must disable interrupts prior calling
1115  * this function and process the returned value before re-enabling interrupt to
1116  * prevent races with the interrupt handler. Note that in continuous mode there
1117  * is a chance for CSSA_L register overflow inbetween the two reads resulting
1118  * in incorrect return value.
1119  */
1120 dma_addr_t omap_get_dma_src_pos(int lch)
1121 {
1122         dma_addr_t offset = 0;
1123
1124         if (cpu_is_omap15xx())
1125                 offset = dma_read(CPC(lch));
1126         else
1127                 offset = dma_read(CSAC(lch));
1128
1129         /*
1130          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1131          * read before the DMA controller finished disabling the channel.
1132          */
1133         if (!cpu_is_omap15xx() && offset == 0)
1134                 offset = dma_read(CSAC(lch));
1135
1136         if (cpu_class_is_omap1())
1137                 offset |= (dma_read(CSSA_U(lch)) << 16);
1138
1139         return offset;
1140 }
1141 EXPORT_SYMBOL(omap_get_dma_src_pos);
1142
1143 /*
1144  * Returns current physical destination address for the given DMA channel.
1145  * If the channel is running the caller must disable interrupts prior calling
1146  * this function and process the returned value before re-enabling interrupt to
1147  * prevent races with the interrupt handler. Note that in continuous mode there
1148  * is a chance for CDSA_L register overflow inbetween the two reads resulting
1149  * in incorrect return value.
1150  */
1151 dma_addr_t omap_get_dma_dst_pos(int lch)
1152 {
1153         dma_addr_t offset = 0;
1154
1155         if (cpu_is_omap15xx())
1156                 offset = dma_read(CPC(lch));
1157         else
1158                 offset = dma_read(CDAC(lch));
1159
1160         /*
1161          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1162          * read before the DMA controller finished disabling the channel.
1163          */
1164         if (!cpu_is_omap15xx() && offset == 0)
1165                 offset = dma_read(CDAC(lch));
1166
1167         if (cpu_class_is_omap1())
1168                 offset |= (dma_read(CDSA_U(lch)) << 16);
1169
1170         return offset;
1171 }
1172 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1173
1174 int omap_get_dma_active_status(int lch)
1175 {
1176         return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0;
1177 }
1178 EXPORT_SYMBOL(omap_get_dma_active_status);
1179
1180 int omap_dma_running(void)
1181 {
1182         int lch;
1183
1184         if (cpu_class_is_omap1())
1185                 if (omap_lcd_dma_running())
1186                         return 1;
1187
1188         for (lch = 0; lch < dma_chan_count; lch++)
1189                 if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN)
1190                         return 1;
1191
1192         return 0;
1193 }
1194
1195 /*
1196  * lch_queue DMA will start right after lch_head one is finished.
1197  * For this DMA link to start, you still need to start (see omap_start_dma)
1198  * the first one. That will fire up the entire queue.
1199  */
1200 void omap_dma_link_lch(int lch_head, int lch_queue)
1201 {
1202         if (omap_dma_in_1510_mode()) {
1203                 if (lch_head == lch_queue) {
1204                         dma_write(dma_read(CCR(lch_head)) | (3 << 8),
1205                                                                 CCR(lch_head));
1206                         return;
1207                 }
1208                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1209                 BUG();
1210                 return;
1211         }
1212
1213         if ((dma_chan[lch_head].dev_id == -1) ||
1214             (dma_chan[lch_queue].dev_id == -1)) {
1215                 printk(KERN_ERR "omap_dma: trying to link "
1216                        "non requested channels\n");
1217                 dump_stack();
1218         }
1219
1220         dma_chan[lch_head].next_lch = lch_queue;
1221 }
1222 EXPORT_SYMBOL(omap_dma_link_lch);
1223
1224 /*
1225  * Once the DMA queue is stopped, we can destroy it.
1226  */
1227 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1228 {
1229         if (omap_dma_in_1510_mode()) {
1230                 if (lch_head == lch_queue) {
1231                         dma_write(dma_read(CCR(lch_head)) & ~(3 << 8),
1232                                                                 CCR(lch_head));
1233                         return;
1234                 }
1235                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1236                 BUG();
1237                 return;
1238         }
1239
1240         if (dma_chan[lch_head].next_lch != lch_queue ||
1241             dma_chan[lch_head].next_lch == -1) {
1242                 printk(KERN_ERR "omap_dma: trying to unlink "
1243                        "non linked channels\n");
1244                 dump_stack();
1245         }
1246
1247         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1248             (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1249                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1250                        "before unlinking\n");
1251                 dump_stack();
1252         }
1253
1254         dma_chan[lch_head].next_lch = -1;
1255 }
1256 EXPORT_SYMBOL(omap_dma_unlink_lch);
1257
1258 /*----------------------------------------------------------------------------*/
1259
1260 #ifndef CONFIG_ARCH_OMAP1
1261 /* Create chain of DMA channesls */
1262 static void create_dma_lch_chain(int lch_head, int lch_queue)
1263 {
1264         u32 l;
1265
1266         /* Check if this is the first link in chain */
1267         if (dma_chan[lch_head].next_linked_ch == -1) {
1268                 dma_chan[lch_head].next_linked_ch = lch_queue;
1269                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1270                 dma_chan[lch_queue].next_linked_ch = lch_head;
1271                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1272         }
1273
1274         /* a link exists, link the new channel in circular chain */
1275         else {
1276                 dma_chan[lch_queue].next_linked_ch =
1277                                         dma_chan[lch_head].next_linked_ch;
1278                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1279                 dma_chan[lch_head].next_linked_ch = lch_queue;
1280                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1281                                         lch_queue;
1282         }
1283
1284         l = dma_read(CLNK_CTRL(lch_head));
1285         l &= ~(0x1f);
1286         l |= lch_queue;
1287         dma_write(l, CLNK_CTRL(lch_head));
1288
1289         l = dma_read(CLNK_CTRL(lch_queue));
1290         l &= ~(0x1f);
1291         l |= (dma_chan[lch_queue].next_linked_ch);
1292         dma_write(l, CLNK_CTRL(lch_queue));
1293 }
1294
1295 /**
1296  * @brief omap_request_dma_chain : Request a chain of DMA channels
1297  *
1298  * @param dev_id - Device id using the dma channel
1299  * @param dev_name - Device name
1300  * @param callback - Call back function
1301  * @chain_id -
1302  * @no_of_chans - Number of channels requested
1303  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1304  *                                            OMAP_DMA_DYNAMIC_CHAIN
1305  * @params - Channel parameters
1306  *
1307  * @return - Success : 0
1308  *           Failure: -EINVAL/-ENOMEM
1309  */
1310 int omap_request_dma_chain(int dev_id, const char *dev_name,
1311                            void (*callback) (int lch, u16 ch_status,
1312                                              void *data),
1313                            int *chain_id, int no_of_chans, int chain_mode,
1314                            struct omap_dma_channel_params params)
1315 {
1316         int *channels;
1317         int i, err;
1318
1319         /* Is the chain mode valid ? */
1320         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1321                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1322                 printk(KERN_ERR "Invalid chain mode requested\n");
1323                 return -EINVAL;
1324         }
1325
1326         if (unlikely((no_of_chans < 1
1327                         || no_of_chans > dma_lch_count))) {
1328                 printk(KERN_ERR "Invalid Number of channels requested\n");
1329                 return -EINVAL;
1330         }
1331
1332         /*
1333          * Allocate a queue to maintain the status of the channels
1334          * in the chain
1335          */
1336         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1337         if (channels == NULL) {
1338                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1339                 return -ENOMEM;
1340         }
1341
1342         /* request and reserve DMA channels for the chain */
1343         for (i = 0; i < no_of_chans; i++) {
1344                 err = omap_request_dma(dev_id, dev_name,
1345                                         callback, NULL, &channels[i]);
1346                 if (err < 0) {
1347                         int j;
1348                         for (j = 0; j < i; j++)
1349                                 omap_free_dma(channels[j]);
1350                         kfree(channels);
1351                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1352                         return err;
1353                 }
1354                 dma_chan[channels[i]].prev_linked_ch = -1;
1355                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1356
1357                 /*
1358                  * Allowing client drivers to set common parameters now,
1359                  * so that later only relevant (src_start, dest_start
1360                  * and element count) can be set
1361                  */
1362                 omap_set_dma_params(channels[i], &params);
1363         }
1364
1365         *chain_id = channels[0];
1366         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1367         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1368         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1369         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1370
1371         for (i = 0; i < no_of_chans; i++)
1372                 dma_chan[channels[i]].chain_id = *chain_id;
1373
1374         /* Reset the Queue pointers */
1375         OMAP_DMA_CHAIN_QINIT(*chain_id);
1376
1377         /* Set up the chain */
1378         if (no_of_chans == 1)
1379                 create_dma_lch_chain(channels[0], channels[0]);
1380         else {
1381                 for (i = 0; i < (no_of_chans - 1); i++)
1382                         create_dma_lch_chain(channels[i], channels[i + 1]);
1383         }
1384
1385         return 0;
1386 }
1387 EXPORT_SYMBOL(omap_request_dma_chain);
1388
1389 /**
1390  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1391  * params after setting it. Dont do this while dma is running!!
1392  *
1393  * @param chain_id - Chained logical channel id.
1394  * @param params
1395  *
1396  * @return - Success : 0
1397  *           Failure : -EINVAL
1398  */
1399 int omap_modify_dma_chain_params(int chain_id,
1400                                 struct omap_dma_channel_params params)
1401 {
1402         int *channels;
1403         u32 i;
1404
1405         /* Check for input params */
1406         if (unlikely((chain_id < 0
1407                         || chain_id >= dma_lch_count))) {
1408                 printk(KERN_ERR "Invalid chain id\n");
1409                 return -EINVAL;
1410         }
1411
1412         /* Check if the chain exists */
1413         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1414                 printk(KERN_ERR "Chain doesn't exists\n");
1415                 return -EINVAL;
1416         }
1417         channels = dma_linked_lch[chain_id].linked_dmach_q;
1418
1419         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1420                 /*
1421                  * Allowing client drivers to set common parameters now,
1422                  * so that later only relevant (src_start, dest_start
1423                  * and element count) can be set
1424                  */
1425                 omap_set_dma_params(channels[i], &params);
1426         }
1427
1428         return 0;
1429 }
1430 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1431
1432 /**
1433  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1434  *
1435  * @param chain_id
1436  *
1437  * @return - Success : 0
1438  *           Failure : -EINVAL
1439  */
1440 int omap_free_dma_chain(int chain_id)
1441 {
1442         int *channels;
1443         u32 i;
1444
1445         /* Check for input params */
1446         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1447                 printk(KERN_ERR "Invalid chain id\n");
1448                 return -EINVAL;
1449         }
1450
1451         /* Check if the chain exists */
1452         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1453                 printk(KERN_ERR "Chain doesn't exists\n");
1454                 return -EINVAL;
1455         }
1456
1457         channels = dma_linked_lch[chain_id].linked_dmach_q;
1458         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1459                 dma_chan[channels[i]].next_linked_ch = -1;
1460                 dma_chan[channels[i]].prev_linked_ch = -1;
1461                 dma_chan[channels[i]].chain_id = -1;
1462                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1463                 omap_free_dma(channels[i]);
1464         }
1465
1466         kfree(channels);
1467
1468         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1469         dma_linked_lch[chain_id].chain_mode = -1;
1470         dma_linked_lch[chain_id].chain_state = -1;
1471
1472         return (0);
1473 }
1474 EXPORT_SYMBOL(omap_free_dma_chain);
1475
1476 /**
1477  * @brief omap_dma_chain_status - Check if the chain is in
1478  * active / inactive state.
1479  * @param chain_id
1480  *
1481  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1482  *           Failure : -EINVAL
1483  */
1484 int omap_dma_chain_status(int chain_id)
1485 {
1486         /* Check for input params */
1487         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1488                 printk(KERN_ERR "Invalid chain id\n");
1489                 return -EINVAL;
1490         }
1491
1492         /* Check if the chain exists */
1493         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1494                 printk(KERN_ERR "Chain doesn't exists\n");
1495                 return -EINVAL;
1496         }
1497         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1498                         dma_linked_lch[chain_id].q_count);
1499
1500         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1501                 return OMAP_DMA_CHAIN_INACTIVE;
1502
1503         return OMAP_DMA_CHAIN_ACTIVE;
1504 }
1505 EXPORT_SYMBOL(omap_dma_chain_status);
1506
1507 /**
1508  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1509  * set the params and start the transfer.
1510  *
1511  * @param chain_id
1512  * @param src_start - buffer start address
1513  * @param dest_start - Dest address
1514  * @param elem_count
1515  * @param frame_count
1516  * @param callbk_data - channel callback parameter data.
1517  *
1518  * @return  - Success : 0
1519  *            Failure: -EINVAL/-EBUSY
1520  */
1521 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1522                         int elem_count, int frame_count, void *callbk_data)
1523 {
1524         int *channels;
1525         u32 l, lch;
1526         int start_dma = 0;
1527
1528         /*
1529          * if buffer size is less than 1 then there is
1530          * no use of starting the chain
1531          */
1532         if (elem_count < 1) {
1533                 printk(KERN_ERR "Invalid buffer size\n");
1534                 return -EINVAL;
1535         }
1536
1537         /* Check for input params */
1538         if (unlikely((chain_id < 0
1539                         || chain_id >= dma_lch_count))) {
1540                 printk(KERN_ERR "Invalid chain id\n");
1541                 return -EINVAL;
1542         }
1543
1544         /* Check if the chain exists */
1545         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1546                 printk(KERN_ERR "Chain doesn't exist\n");
1547                 return -EINVAL;
1548         }
1549
1550         /* Check if all the channels in chain are in use */
1551         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1552                 return -EBUSY;
1553
1554         /* Frame count may be negative in case of indexed transfers */
1555         channels = dma_linked_lch[chain_id].linked_dmach_q;
1556
1557         /* Get a free channel */
1558         lch = channels[dma_linked_lch[chain_id].q_tail];
1559
1560         /* Store the callback data */
1561         dma_chan[lch].data = callbk_data;
1562
1563         /* Increment the q_tail */
1564         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1565
1566         /* Set the params to the free channel */
1567         if (src_start != 0)
1568                 dma_write(src_start, CSSA(lch));
1569         if (dest_start != 0)
1570                 dma_write(dest_start, CDSA(lch));
1571
1572         /* Write the buffer size */
1573         dma_write(elem_count, CEN(lch));
1574         dma_write(frame_count, CFN(lch));
1575
1576         /*
1577          * If the chain is dynamically linked,
1578          * then we may have to start the chain if its not active
1579          */
1580         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1581
1582                 /*
1583                  * In Dynamic chain, if the chain is not started,
1584                  * queue the channel
1585                  */
1586                 if (dma_linked_lch[chain_id].chain_state ==
1587                                                 DMA_CHAIN_NOTSTARTED) {
1588                         /* Enable the link in previous channel */
1589                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1590                                                                 DMA_CH_QUEUED)
1591                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1592                         dma_chan[lch].state = DMA_CH_QUEUED;
1593                 }
1594
1595                 /*
1596                  * Chain is already started, make sure its active,
1597                  * if not then start the chain
1598                  */
1599                 else {
1600                         start_dma = 1;
1601
1602                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1603                                                         DMA_CH_STARTED) {
1604                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1605                                 dma_chan[lch].state = DMA_CH_QUEUED;
1606                                 start_dma = 0;
1607                                 if (0 == ((1 << 7) & dma_read(
1608                                         CCR(dma_chan[lch].prev_linked_ch)))) {
1609                                         disable_lnk(dma_chan[lch].
1610                                                     prev_linked_ch);
1611                                         pr_debug("\n prev ch is stopped\n");
1612                                         start_dma = 1;
1613                                 }
1614                         }
1615
1616                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1617                                                         == DMA_CH_QUEUED) {
1618                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1619                                 dma_chan[lch].state = DMA_CH_QUEUED;
1620                                 start_dma = 0;
1621                         }
1622                         omap_enable_channel_irq(lch);
1623
1624                         l = dma_read(CCR(lch));
1625
1626                         if ((0 == (l & (1 << 24))))
1627                                 l &= ~(1 << 25);
1628                         else
1629                                 l |= (1 << 25);
1630                         if (start_dma == 1) {
1631                                 if (0 == (l & (1 << 7))) {
1632                                         l |= (1 << 7);
1633                                         dma_chan[lch].state = DMA_CH_STARTED;
1634                                         pr_debug("starting %d\n", lch);
1635                                         dma_write(l, CCR(lch));
1636                                 } else
1637                                         start_dma = 0;
1638                         } else {
1639                                 if (0 == (l & (1 << 7)))
1640                                         dma_write(l, CCR(lch));
1641                         }
1642                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1643                 }
1644         }
1645
1646         return 0;
1647 }
1648 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1649
1650 /**
1651  * @brief omap_start_dma_chain_transfers - Start the chain
1652  *
1653  * @param chain_id
1654  *
1655  * @return - Success : 0
1656  *           Failure : -EINVAL/-EBUSY
1657  */
1658 int omap_start_dma_chain_transfers(int chain_id)
1659 {
1660         int *channels;
1661         u32 l, i;
1662
1663         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1664                 printk(KERN_ERR "Invalid chain id\n");
1665                 return -EINVAL;
1666         }
1667
1668         channels = dma_linked_lch[chain_id].linked_dmach_q;
1669
1670         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1671                 printk(KERN_ERR "Chain is already started\n");
1672                 return -EBUSY;
1673         }
1674
1675         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1676                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1677                                                                         i++) {
1678                         enable_lnk(channels[i]);
1679                         omap_enable_channel_irq(channels[i]);
1680                 }
1681         } else {
1682                 omap_enable_channel_irq(channels[0]);
1683         }
1684
1685         l = dma_read(CCR(channels[0]));
1686         l |= (1 << 7);
1687         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1688         dma_chan[channels[0]].state = DMA_CH_STARTED;
1689
1690         if ((0 == (l & (1 << 24))))
1691                 l &= ~(1 << 25);
1692         else
1693                 l |= (1 << 25);
1694         dma_write(l, CCR(channels[0]));
1695
1696         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1697
1698         return 0;
1699 }
1700 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1701
1702 /**
1703  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1704  *
1705  * @param chain_id
1706  *
1707  * @return - Success : 0
1708  *           Failure : EINVAL
1709  */
1710 int omap_stop_dma_chain_transfers(int chain_id)
1711 {
1712         int *channels;
1713         u32 l, i;
1714         u32 sys_cf;
1715
1716         /* Check for input params */
1717         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1718                 printk(KERN_ERR "Invalid chain id\n");
1719                 return -EINVAL;
1720         }
1721
1722         /* Check if the chain exists */
1723         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1724                 printk(KERN_ERR "Chain doesn't exists\n");
1725                 return -EINVAL;
1726         }
1727         channels = dma_linked_lch[chain_id].linked_dmach_q;
1728
1729         /*
1730          * DMA Errata:
1731          * Special programming model needed to disable DMA before end of block
1732          */
1733         sys_cf = dma_read(OCP_SYSCONFIG);
1734         l = sys_cf;
1735         /* Middle mode reg set no Standby */
1736         l &= ~((1 << 12)|(1 << 13));
1737         dma_write(l, OCP_SYSCONFIG);
1738
1739         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1740
1741                 /* Stop the Channel transmission */
1742                 l = dma_read(CCR(channels[i]));
1743                 l &= ~(1 << 7);
1744                 dma_write(l, CCR(channels[i]));
1745
1746                 /* Disable the link in all the channels */
1747                 disable_lnk(channels[i]);
1748                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1749
1750         }
1751         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1752
1753         /* Reset the Queue pointers */
1754         OMAP_DMA_CHAIN_QINIT(chain_id);
1755
1756         /* Errata - put in the old value */
1757         dma_write(sys_cf, OCP_SYSCONFIG);
1758
1759         return 0;
1760 }
1761 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1762
1763 /* Get the index of the ongoing DMA in chain */
1764 /**
1765  * @brief omap_get_dma_chain_index - Get the element and frame index
1766  * of the ongoing DMA in chain
1767  *
1768  * @param chain_id
1769  * @param ei - Element index
1770  * @param fi - Frame index
1771  *
1772  * @return - Success : 0
1773  *           Failure : -EINVAL
1774  */
1775 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1776 {
1777         int lch;
1778         int *channels;
1779
1780         /* Check for input params */
1781         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1782                 printk(KERN_ERR "Invalid chain id\n");
1783                 return -EINVAL;
1784         }
1785
1786         /* Check if the chain exists */
1787         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1788                 printk(KERN_ERR "Chain doesn't exists\n");
1789                 return -EINVAL;
1790         }
1791         if ((!ei) || (!fi))
1792                 return -EINVAL;
1793
1794         channels = dma_linked_lch[chain_id].linked_dmach_q;
1795
1796         /* Get the current channel */
1797         lch = channels[dma_linked_lch[chain_id].q_head];
1798
1799         *ei = dma_read(CCEN(lch));
1800         *fi = dma_read(CCFN(lch));
1801
1802         return 0;
1803 }
1804 EXPORT_SYMBOL(omap_get_dma_chain_index);
1805
1806 /**
1807  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1808  * ongoing DMA in chain
1809  *
1810  * @param chain_id
1811  *
1812  * @return - Success : Destination position
1813  *           Failure : -EINVAL
1814  */
1815 int omap_get_dma_chain_dst_pos(int chain_id)
1816 {
1817         int lch;
1818         int *channels;
1819
1820         /* Check for input params */
1821         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1822                 printk(KERN_ERR "Invalid chain id\n");
1823                 return -EINVAL;
1824         }
1825
1826         /* Check if the chain exists */
1827         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1828                 printk(KERN_ERR "Chain doesn't exists\n");
1829                 return -EINVAL;
1830         }
1831
1832         channels = dma_linked_lch[chain_id].linked_dmach_q;
1833
1834         /* Get the current channel */
1835         lch = channels[dma_linked_lch[chain_id].q_head];
1836
1837         return dma_read(CDAC(lch));
1838 }
1839 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1840
1841 /**
1842  * @brief omap_get_dma_chain_src_pos - Get the source position
1843  * of the ongoing DMA in chain
1844  * @param chain_id
1845  *
1846  * @return - Success : Destination position
1847  *           Failure : -EINVAL
1848  */
1849 int omap_get_dma_chain_src_pos(int chain_id)
1850 {
1851         int lch;
1852         int *channels;
1853
1854         /* Check for input params */
1855         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1856                 printk(KERN_ERR "Invalid chain id\n");
1857                 return -EINVAL;
1858         }
1859
1860         /* Check if the chain exists */
1861         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1862                 printk(KERN_ERR "Chain doesn't exists\n");
1863                 return -EINVAL;
1864         }
1865
1866         channels = dma_linked_lch[chain_id].linked_dmach_q;
1867
1868         /* Get the current channel */
1869         lch = channels[dma_linked_lch[chain_id].q_head];
1870
1871         return dma_read(CSAC(lch));
1872 }
1873 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1874 #endif  /* ifndef CONFIG_ARCH_OMAP1 */
1875
1876 /*----------------------------------------------------------------------------*/
1877
1878 #ifdef CONFIG_ARCH_OMAP1
1879
1880 static int omap1_dma_handle_ch(int ch)
1881 {
1882         u32 csr;
1883
1884         if (enable_1510_mode && ch >= 6) {
1885                 csr = dma_chan[ch].saved_csr;
1886                 dma_chan[ch].saved_csr = 0;
1887         } else
1888                 csr = dma_read(CSR(ch));
1889         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1890                 dma_chan[ch + 6].saved_csr = csr >> 7;
1891                 csr &= 0x7f;
1892         }
1893         if ((csr & 0x3f) == 0)
1894                 return 0;
1895         if (unlikely(dma_chan[ch].dev_id == -1)) {
1896                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1897                        "%d (CSR %04x)\n", ch, csr);
1898                 return 0;
1899         }
1900         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1901                 printk(KERN_WARNING "DMA timeout with device %d\n",
1902                        dma_chan[ch].dev_id);
1903         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1904                 printk(KERN_WARNING "DMA synchronization event drop occurred "
1905                        "with device %d\n", dma_chan[ch].dev_id);
1906         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1907                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1908         if (likely(dma_chan[ch].callback != NULL))
1909                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1910
1911         return 1;
1912 }
1913
1914 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1915 {
1916         int ch = ((int) dev_id) - 1;
1917         int handled = 0;
1918
1919         for (;;) {
1920                 int handled_now = 0;
1921
1922                 handled_now += omap1_dma_handle_ch(ch);
1923                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1924                         handled_now += omap1_dma_handle_ch(ch + 6);
1925                 if (!handled_now)
1926                         break;
1927                 handled += handled_now;
1928         }
1929
1930         return handled ? IRQ_HANDLED : IRQ_NONE;
1931 }
1932
1933 #else
1934 #define omap1_dma_irq_handler   NULL
1935 #endif
1936
1937 #ifdef CONFIG_ARCH_OMAP2PLUS
1938
1939 static int omap2_dma_handle_ch(int ch)
1940 {
1941         u32 status = dma_read(CSR(ch));
1942
1943         if (!status) {
1944                 if (printk_ratelimit())
1945                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1946                                 ch);
1947                 dma_write(1 << ch, IRQSTATUS_L0);
1948                 return 0;
1949         }
1950         if (unlikely(dma_chan[ch].dev_id == -1)) {
1951                 if (printk_ratelimit())
1952                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1953                                         "channel %d\n", status, ch);
1954                 return 0;
1955         }
1956         if (unlikely(status & OMAP_DMA_DROP_IRQ))
1957                 printk(KERN_INFO
1958                        "DMA synchronization event drop occurred with device "
1959                        "%d\n", dma_chan[ch].dev_id);
1960         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1961                 printk(KERN_INFO "DMA transaction error with device %d\n",
1962                        dma_chan[ch].dev_id);
1963                 if (cpu_class_is_omap2()) {
1964                         /*
1965                          * Errata: sDMA Channel is not disabled
1966                          * after a transaction error. So we explicitely
1967                          * disable the channel
1968                          */
1969                         u32 ccr;
1970
1971                         ccr = dma_read(CCR(ch));
1972                         ccr &= ~OMAP_DMA_CCR_EN;
1973                         dma_write(ccr, CCR(ch));
1974                         dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1975                 }
1976         }
1977         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1978                 printk(KERN_INFO "DMA secure error with device %d\n",
1979                        dma_chan[ch].dev_id);
1980         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1981                 printk(KERN_INFO "DMA misaligned error with device %d\n",
1982                        dma_chan[ch].dev_id);
1983
1984         dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
1985         dma_write(1 << ch, IRQSTATUS_L0);
1986         /* read back the register to flush the write */
1987         dma_read(IRQSTATUS_L0);
1988
1989         /* If the ch is not chained then chain_id will be -1 */
1990         if (dma_chan[ch].chain_id != -1) {
1991                 int chain_id = dma_chan[ch].chain_id;
1992                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1993                 if (dma_read(CLNK_CTRL(ch)) & (1 << 15))
1994                         dma_chan[dma_chan[ch].next_linked_ch].state =
1995                                                         DMA_CH_STARTED;
1996                 if (dma_linked_lch[chain_id].chain_mode ==
1997                                                 OMAP_DMA_DYNAMIC_CHAIN)
1998                         disable_lnk(ch);
1999
2000                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2001                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2002
2003                 status = dma_read(CSR(ch));
2004         }
2005
2006         dma_write(status, CSR(ch));
2007
2008         if (likely(dma_chan[ch].callback != NULL))
2009                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
2010
2011         return 0;
2012 }
2013
2014 /* STATUS register count is from 1-32 while our is 0-31 */
2015 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2016 {
2017         u32 val, enable_reg;
2018         int i;
2019
2020         val = dma_read(IRQSTATUS_L0);
2021         if (val == 0) {
2022                 if (printk_ratelimit())
2023                         printk(KERN_WARNING "Spurious DMA IRQ\n");
2024                 return IRQ_HANDLED;
2025         }
2026         enable_reg = dma_read(IRQENABLE_L0);
2027         val &= enable_reg; /* Dispatch only relevant interrupts */
2028         for (i = 0; i < dma_lch_count && val != 0; i++) {
2029                 if (val & 1)
2030                         omap2_dma_handle_ch(i);
2031                 val >>= 1;
2032         }
2033
2034         return IRQ_HANDLED;
2035 }
2036
2037 static struct irqaction omap24xx_dma_irq = {
2038         .name = "DMA",
2039         .handler = omap2_dma_irq_handler,
2040         .flags = IRQF_DISABLED
2041 };
2042
2043 #else
2044 static struct irqaction omap24xx_dma_irq;
2045 #endif
2046
2047 /*----------------------------------------------------------------------------*/
2048
2049 void omap_dma_global_context_save(void)
2050 {
2051         omap_dma_global_context.dma_irqenable_l0 =
2052                 dma_read(IRQENABLE_L0);
2053         omap_dma_global_context.dma_ocp_sysconfig =
2054                 dma_read(OCP_SYSCONFIG);
2055         omap_dma_global_context.dma_gcr = dma_read(GCR);
2056 }
2057
2058 void omap_dma_global_context_restore(void)
2059 {
2060         int ch;
2061
2062         dma_write(omap_dma_global_context.dma_gcr, GCR);
2063         dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2064                 OCP_SYSCONFIG);
2065         dma_write(omap_dma_global_context.dma_irqenable_l0,
2066                 IRQENABLE_L0);
2067
2068         /*
2069          * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2070          * after secure sram context save and restore. Hence we need to
2071          * manually clear those IRQs to avoid spurious interrupts. This
2072          * affects only secure devices.
2073          */
2074         if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2075                 dma_write(0x3 , IRQSTATUS_L0);
2076
2077         for (ch = 0; ch < dma_chan_count; ch++)
2078                 if (dma_chan[ch].dev_id != -1)
2079                         omap_clear_dma(ch);
2080 }
2081
2082 /*----------------------------------------------------------------------------*/
2083
2084 static int __init omap_init_dma(void)
2085 {
2086         unsigned long base;
2087         int ch, r;
2088
2089         if (cpu_class_is_omap1()) {
2090                 base = OMAP1_DMA_BASE;
2091                 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2092         } else if (cpu_is_omap24xx()) {
2093                 base = OMAP24XX_DMA4_BASE;
2094                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2095         } else if (cpu_is_omap34xx()) {
2096                 base = OMAP34XX_DMA4_BASE;
2097                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2098         } else if (cpu_is_omap44xx()) {
2099                 base = OMAP44XX_DMA4_BASE;
2100                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2101         } else {
2102                 pr_err("DMA init failed for unsupported omap\n");
2103                 return -ENODEV;
2104         }
2105
2106         omap_dma_base = ioremap(base, SZ_4K);
2107         BUG_ON(!omap_dma_base);
2108
2109         if (cpu_class_is_omap2() && omap_dma_reserve_channels
2110                         && (omap_dma_reserve_channels <= dma_lch_count))
2111                 dma_lch_count = omap_dma_reserve_channels;
2112
2113         dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2114                                 GFP_KERNEL);
2115         if (!dma_chan) {
2116                 r = -ENOMEM;
2117                 goto out_unmap;
2118         }
2119
2120         if (cpu_class_is_omap2()) {
2121                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2122                                                 dma_lch_count, GFP_KERNEL);
2123                 if (!dma_linked_lch) {
2124                         r = -ENOMEM;
2125                         goto out_free;
2126                 }
2127         }
2128
2129         if (cpu_is_omap15xx()) {
2130                 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2131                 dma_chan_count = 9;
2132                 enable_1510_mode = 1;
2133         } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2134                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2135                        dma_read(HW_ID));
2136                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2137                        (dma_read(CAPS_0_U) << 16) |
2138                        dma_read(CAPS_0_L),
2139                        (dma_read(CAPS_1_U) << 16) |
2140                        dma_read(CAPS_1_L),
2141                        dma_read(CAPS_2), dma_read(CAPS_3),
2142                        dma_read(CAPS_4));
2143                 if (!enable_1510_mode) {
2144                         u16 w;
2145
2146                         /* Disable OMAP 3.0/3.1 compatibility mode. */
2147                         w = dma_read(GSCR);
2148                         w |= 1 << 3;
2149                         dma_write(w, GSCR);
2150                         dma_chan_count = 16;
2151                 } else
2152                         dma_chan_count = 9;
2153         } else if (cpu_class_is_omap2()) {
2154                 u8 revision = dma_read(REVISION) & 0xff;
2155                 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2156                        revision >> 4, revision & 0xf);
2157                 dma_chan_count = dma_lch_count;
2158         } else {
2159                 dma_chan_count = 0;
2160                 return 0;
2161         }
2162
2163         spin_lock_init(&dma_chan_lock);
2164
2165         for (ch = 0; ch < dma_chan_count; ch++) {
2166                 omap_clear_dma(ch);
2167                 if (cpu_class_is_omap2())
2168                         omap2_disable_irq_lch(ch);
2169
2170                 dma_chan[ch].dev_id = -1;
2171                 dma_chan[ch].next_lch = -1;
2172
2173                 if (ch >= 6 && enable_1510_mode)
2174                         continue;
2175
2176                 if (cpu_class_is_omap1()) {
2177                         /*
2178                          * request_irq() doesn't like dev_id (ie. ch) being
2179                          * zero, so we have to kludge around this.
2180                          */
2181                         r = request_irq(omap1_dma_irq[ch],
2182                                         omap1_dma_irq_handler, 0, "DMA",
2183                                         (void *) (ch + 1));
2184                         if (r != 0) {
2185                                 int i;
2186
2187                                 printk(KERN_ERR "unable to request IRQ %d "
2188                                        "for DMA (error %d)\n",
2189                                        omap1_dma_irq[ch], r);
2190                                 for (i = 0; i < ch; i++)
2191                                         free_irq(omap1_dma_irq[i],
2192                                                  (void *) (i + 1));
2193                                 goto out_free;
2194                         }
2195                 }
2196         }
2197
2198         if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2199                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2200                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2201
2202         if (cpu_class_is_omap2()) {
2203                 int irq;
2204                 if (cpu_is_omap44xx())
2205                         irq = OMAP44XX_IRQ_SDMA_0;
2206                 else
2207                         irq = INT_24XX_SDMA_IRQ0;
2208                 setup_irq(irq, &omap24xx_dma_irq);
2209         }
2210
2211         if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
2212                 /* Enable smartidle idlemodes and autoidle */
2213                 u32 v = dma_read(OCP_SYSCONFIG);
2214                 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2215                                 DMA_SYSCONFIG_SIDLEMODE_MASK |
2216                                 DMA_SYSCONFIG_AUTOIDLE);
2217                 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2218                         DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2219                         DMA_SYSCONFIG_AUTOIDLE);
2220                 dma_write(v , OCP_SYSCONFIG);
2221                 /* reserve dma channels 0 and 1 in high security devices */
2222                 if (cpu_is_omap34xx() &&
2223                         (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2224                         printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2225                                         "HS ROM code\n");
2226                         dma_chan[0].dev_id = 0;
2227                         dma_chan[1].dev_id = 1;
2228                 }
2229         }
2230
2231         return 0;
2232
2233 out_free:
2234         kfree(dma_chan);
2235
2236 out_unmap:
2237         iounmap(omap_dma_base);
2238
2239         return r;
2240 }
2241
2242 arch_initcall(omap_init_dma);
2243
2244 /*
2245  * Reserve the omap SDMA channels using cmdline bootarg
2246  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2247  */
2248 static int __init omap_dma_cmdline_reserve_ch(char *str)
2249 {
2250         if (get_option(&str, &omap_dma_reserve_channels) != 1)
2251                 omap_dma_reserve_channels = 0;
2252         return 1;
2253 }
2254
2255 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2256
2257