2 * arch/arm/mach-tegra/dma.c
4 * System DMA driver for NVIDIA Tegra SoCs
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/err.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
31 #include <mach/irqs.h>
32 #include <mach/iomap.h>
33 #include <mach/suspend.h>
35 #define APB_DMA_GEN 0x000
36 #define GEN_ENABLE (1<<31)
38 #define APB_DMA_CNTRL 0x010
40 #define APB_DMA_IRQ_MASK 0x01c
42 #define APB_DMA_IRQ_MASK_SET 0x020
44 #define APB_DMA_CHAN_CSR 0x000
45 #define CSR_ENB (1<<31)
46 #define CSR_IE_EOC (1<<30)
47 #define CSR_HOLD (1<<29)
48 #define CSR_DIR (1<<28)
49 #define CSR_ONCE (1<<27)
50 #define CSR_FLOW (1<<21)
51 #define CSR_REQ_SEL_SHIFT 16
52 #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
53 #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
54 #define CSR_WCOUNT_SHIFT 2
55 #define CSR_WCOUNT_MASK 0xFFFC
57 #define APB_DMA_CHAN_STA 0x004
58 #define STA_BUSY (1<<31)
59 #define STA_ISE_EOC (1<<30)
60 #define STA_HALT (1<<29)
61 #define STA_PING_PONG (1<<28)
62 #define STA_COUNT_SHIFT 2
63 #define STA_COUNT_MASK 0xFFFC
65 #define APB_DMA_CHAN_AHB_PTR 0x010
67 #define APB_DMA_CHAN_AHB_SEQ 0x014
68 #define AHB_SEQ_INTR_ENB (1<<31)
69 #define AHB_SEQ_BUS_WIDTH_SHIFT 28
70 #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
71 #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
72 #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
73 #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
74 #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
75 #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
76 #define AHB_SEQ_DATA_SWAP (1<<27)
77 #define AHB_SEQ_BURST_MASK (0x7<<24)
78 #define AHB_SEQ_BURST_1 (4<<24)
79 #define AHB_SEQ_BURST_4 (5<<24)
80 #define AHB_SEQ_BURST_8 (6<<24)
81 #define AHB_SEQ_DBL_BUF (1<<19)
82 #define AHB_SEQ_WRAP_SHIFT 16
83 #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
85 #define APB_DMA_CHAN_APB_PTR 0x018
87 #define APB_DMA_CHAN_APB_SEQ 0x01c
88 #define APB_SEQ_BUS_WIDTH_SHIFT 28
89 #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
90 #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
91 #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
92 #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
93 #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
94 #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
95 #define APB_SEQ_DATA_SWAP (1<<27)
96 #define APB_SEQ_WRAP_SHIFT 16
97 #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
99 #define TEGRA_SYSTEM_DMA_CH_NR 16
100 #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
101 #define TEGRA_SYSTEM_DMA_CH_MIN 0
102 #define TEGRA_SYSTEM_DMA_CH_MAX \
103 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
105 #define NV_DMA_MAX_TRASFER_SIZE 0x10000
107 const unsigned int ahb_addr_wrap_table[8] = {
108 0, 32, 64, 128, 256, 512, 1024, 2048
111 const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
113 const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
115 #define TEGRA_DMA_NAME_SIZE 16
116 struct tegra_dma_channel {
117 struct list_head list;
120 char name[TEGRA_DMA_NAME_SIZE];
125 /* Register shadow */
133 #define NV_DMA_MAX_CHANNELS 32
135 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
136 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
138 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
139 struct tegra_dma_req *req);
140 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
141 struct tegra_dma_req *req);
142 static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
143 static void tegra_dma_stop(struct tegra_dma_channel *ch);
145 void tegra_dma_flush(struct tegra_dma_channel *ch)
148 EXPORT_SYMBOL(tegra_dma_flush);
150 void tegra_dma_dequeue(struct tegra_dma_channel *ch)
152 struct tegra_dma_req *req;
154 req = list_entry(ch->list.next, typeof(*req), node);
156 tegra_dma_dequeue_req(ch, req);
160 void tegra_dma_stop(struct tegra_dma_channel *ch)
167 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
170 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
172 status = readl(ch->addr + APB_DMA_CHAN_STA);
173 if (status & STA_ISE_EOC)
174 writel(status, ch->addr + APB_DMA_CHAN_STA);
177 int tegra_dma_cancel(struct tegra_dma_channel *ch)
180 unsigned long irq_flags;
182 spin_lock_irqsave(&ch->lock, irq_flags);
183 while (!list_empty(&ch->list))
184 list_del(ch->list.next);
187 csr &= ~CSR_REQ_SEL_MASK;
188 csr |= CSR_REQ_SEL_INVALID;
190 /* Set the enable as that is not shadowed */
192 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
196 spin_unlock_irqrestore(&ch->lock, irq_flags);
200 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
201 struct tegra_dma_req *_req)
205 struct tegra_dma_req *req = NULL;
207 unsigned long irq_flags;
209 int req_transfer_count;
211 spin_lock_irqsave(&ch->lock, irq_flags);
212 list_for_each_entry(req, &ch->list, node) {
214 list_del(&req->node);
220 spin_unlock_irqrestore(&ch->lock, irq_flags);
224 /* STOP the DMA and get the transfer count.
225 * Getting the transfer count is tricky.
226 * - Change the source selector to invalid to stop the DMA from
228 * - Read the status register to know the number of pending
229 * bytes to be transfered.
230 * - Finally stop or program the DMA to the next buffer in the
234 csr &= ~CSR_REQ_SEL_MASK;
235 csr |= CSR_REQ_SEL_INVALID;
237 /* Set the enable as that is not shadowed */
239 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
241 /* Get the transfer count */
242 status = readl(ch->addr + APB_DMA_CHAN_STA);
243 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
244 req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
245 req_transfer_count += 1;
248 req->bytes_transferred = req_transfer_count;
250 if (status & STA_BUSY)
251 req->bytes_transferred -= to_transfer;
253 /* In continous transfer mode, DMA only tracks the count of the
254 * half DMA buffer. So, if the DMA already finished half the DMA
255 * then add the half buffer to the completed count.
257 * FIXME: There can be a race here. What if the req to
258 * dequue happens at the same time as the DMA just moved to
259 * the new buffer and SW didn't yet received the interrupt?
261 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
262 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
263 req->bytes_transferred += req_transfer_count;
265 req->bytes_transferred *= 4;
268 if (!list_empty(&ch->list)) {
269 /* if the list is not empty, queue the next request */
270 struct tegra_dma_req *next_req;
271 next_req = list_entry(ch->list.next,
272 typeof(*next_req), node);
273 tegra_dma_update_hw(ch, next_req);
275 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
277 spin_unlock_irqrestore(&ch->lock, irq_flags);
279 /* Callback should be called without any lock */
283 EXPORT_SYMBOL(tegra_dma_dequeue_req);
285 bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
287 unsigned long irq_flags;
290 spin_lock_irqsave(&ch->lock, irq_flags);
291 if (list_empty(&ch->list))
295 spin_unlock_irqrestore(&ch->lock, irq_flags);
298 EXPORT_SYMBOL(tegra_dma_is_empty);
300 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
301 struct tegra_dma_req *_req)
303 unsigned long irq_flags;
304 struct tegra_dma_req *req;
306 spin_lock_irqsave(&ch->lock, irq_flags);
307 list_for_each_entry(req, &ch->list, node) {
309 spin_unlock_irqrestore(&ch->lock, irq_flags);
313 spin_unlock_irqrestore(&ch->lock, irq_flags);
316 EXPORT_SYMBOL(tegra_dma_is_req_inflight);
318 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
319 struct tegra_dma_req *req)
321 unsigned long irq_flags;
324 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
325 req->source_addr & 0x3 || req->dest_addr & 0x3) {
326 pr_err("Invalid DMA request for channel %d\n", ch->id);
330 spin_lock_irqsave(&ch->lock, irq_flags);
332 req->bytes_transferred = 0;
334 req->buffer_status = 0;
335 if (list_empty(&ch->list))
338 list_add_tail(&req->node, &ch->list);
341 tegra_dma_update_hw(ch, req);
343 spin_unlock_irqrestore(&ch->lock, irq_flags);
347 EXPORT_SYMBOL(tegra_dma_enqueue_req);
349 struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
352 struct tegra_dma_channel *ch;
354 /* first channel is the shared channel */
355 if (mode & TEGRA_DMA_SHARED) {
356 channel = TEGRA_SYSTEM_DMA_CH_MIN;
358 channel = find_first_zero_bit(channel_usage,
359 ARRAY_SIZE(dma_channels));
360 if (channel >= ARRAY_SIZE(dma_channels))
363 __set_bit(channel, channel_usage);
364 ch = &dma_channels[channel];
368 EXPORT_SYMBOL(tegra_dma_allocate_channel);
370 void tegra_dma_free_channel(struct tegra_dma_channel *ch)
372 if (ch->mode & TEGRA_DMA_SHARED)
374 tegra_dma_cancel(ch);
375 __clear_bit(ch->id, channel_usage);
377 EXPORT_SYMBOL(tegra_dma_free_channel);
379 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
380 struct tegra_dma_req *req)
382 if (req->to_memory) {
383 ch->apb_ptr = req->source_addr;
384 ch->ahb_ptr = req->dest_addr;
386 ch->apb_ptr = req->dest_addr;
387 ch->ahb_ptr = req->source_addr;
389 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
390 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
392 req->status = TEGRA_DMA_REQ_INFLIGHT;
396 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
397 struct tegra_dma_req *req)
408 ch->csr &= ~CSR_REQ_SEL_MASK;
409 ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
410 ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
411 ch->ahb_seq |= AHB_SEQ_BURST_1;
413 /* One shot mode is always single buffered,
414 * continuous mode is always double buffered
416 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
418 ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
419 ch->csr &= ~CSR_WCOUNT_MASK;
420 ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
422 ch->csr &= ~CSR_ONCE;
423 ch->ahb_seq |= AHB_SEQ_DBL_BUF;
425 /* In double buffered mode, we set the size to half the
426 * requested size and interrupt when half the buffer
428 ch->csr &= ~CSR_WCOUNT_MASK;
429 ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
432 if (req->to_memory) {
434 ch->apb_ptr = req->source_addr;
435 ch->ahb_ptr = req->dest_addr;
437 apb_addr_wrap = req->source_wrap;
438 ahb_addr_wrap = req->dest_wrap;
439 apb_bus_width = req->source_bus_width;
440 ahb_bus_width = req->dest_bus_width;
444 ch->apb_ptr = req->dest_addr;
445 ch->ahb_ptr = req->source_addr;
447 apb_addr_wrap = req->dest_wrap;
448 ahb_addr_wrap = req->source_wrap;
449 apb_bus_width = req->dest_bus_width;
450 ahb_bus_width = req->source_bus_width;
456 /* set address wrap for APB size */
459 if (apb_addr_wrap_table[index] == apb_addr_wrap)
462 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
463 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
464 ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
465 ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
467 /* set address wrap for AHB size */
470 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
473 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
474 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
475 ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
476 ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
478 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
479 if (bus_width_table[index] == ahb_bus_width)
482 BUG_ON(index == ARRAY_SIZE(bus_width_table));
483 ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
484 ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
486 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
487 if (bus_width_table[index] == apb_bus_width)
490 BUG_ON(index == ARRAY_SIZE(bus_width_table));
491 ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
492 ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
494 ch->csr |= CSR_IE_EOC;
496 /* update hw registers with the shadow */
497 writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
498 writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
499 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
500 writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
501 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
503 csr = ch->csr | CSR_ENB;
504 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
506 req->status = TEGRA_DMA_REQ_INFLIGHT;
509 static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
511 /* One shot with an interrupt to CPU after transfer */
512 ch->csr = CSR_ONCE | CSR_IE_EOC;
513 ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
514 ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
517 static void handle_oneshot_dma(struct tegra_dma_channel *ch)
519 struct tegra_dma_req *req;
521 spin_lock(&ch->lock);
522 if (list_empty(&ch->list)) {
523 spin_unlock(&ch->lock);
527 req = list_entry(ch->list.next, typeof(*req), node);
529 int bytes_transferred;
532 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
533 bytes_transferred += 1;
534 bytes_transferred <<= 2;
536 list_del(&req->node);
537 req->bytes_transferred = bytes_transferred;
538 req->status = TEGRA_DMA_REQ_SUCCESS;
540 spin_unlock(&ch->lock);
541 /* Callback should be called without any lock */
542 pr_debug("%s: transferred %d bytes\n", __func__,
543 req->bytes_transferred);
545 spin_lock(&ch->lock);
548 if (!list_empty(&ch->list)) {
549 req = list_entry(ch->list.next, typeof(*req), node);
550 /* the complete function we just called may have enqueued
551 another req, in which case dma has already started */
552 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
553 tegra_dma_update_hw(ch, req);
555 spin_unlock(&ch->lock);
558 static void handle_continuous_dma(struct tegra_dma_channel *ch)
560 struct tegra_dma_req *req;
562 spin_lock(&ch->lock);
563 if (list_empty(&ch->list)) {
564 spin_unlock(&ch->lock);
568 req = list_entry(ch->list.next, typeof(*req), node);
570 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
571 /* Load the next request into the hardware, if available
573 if (!list_is_last(&req->node, &ch->list)) {
574 struct tegra_dma_req *next_req;
576 next_req = list_entry(req->node.next,
577 typeof(*next_req), node);
578 tegra_dma_update_hw_partial(ch, next_req);
580 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
581 req->status = TEGRA_DMA_REQ_SUCCESS;
582 /* DMA lock is NOT held when callback is called */
583 spin_unlock(&ch->lock);
584 if (likely(req->threshold))
588 } else if (req->buffer_status ==
589 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
590 /* Callback when the buffer is completely full (i.e on
591 * the second interrupt */
592 int bytes_transferred;
595 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
596 bytes_transferred += 1;
597 bytes_transferred <<= 3;
599 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
600 req->bytes_transferred = bytes_transferred;
601 req->status = TEGRA_DMA_REQ_SUCCESS;
602 list_del(&req->node);
604 /* DMA lock is NOT held when callbak is called */
605 spin_unlock(&ch->lock);
613 spin_unlock(&ch->lock);
616 static irqreturn_t dma_isr(int irq, void *data)
618 struct tegra_dma_channel *ch = data;
619 unsigned long status;
621 status = readl(ch->addr + APB_DMA_CHAN_STA);
622 if (status & STA_ISE_EOC)
623 writel(status, ch->addr + APB_DMA_CHAN_STA);
625 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
628 return IRQ_WAKE_THREAD;
631 static irqreturn_t dma_thread_fn(int irq, void *data)
633 struct tegra_dma_channel *ch = data;
635 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
636 handle_oneshot_dma(ch);
638 handle_continuous_dma(ch);
644 int __init tegra_dma_init(void)
651 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
652 writel(GEN_ENABLE, addr + APB_DMA_GEN);
653 writel(0, addr + APB_DMA_CNTRL);
654 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
655 addr + APB_DMA_IRQ_MASK_SET);
657 memset(channel_usage, 0, sizeof(channel_usage));
658 memset(dma_channels, 0, sizeof(dma_channels));
660 /* Reserve all the channels we are not supposed to touch */
661 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
662 __set_bit(i, channel_usage);
664 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
665 struct tegra_dma_channel *ch = &dma_channels[i];
667 __clear_bit(i, channel_usage);
670 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
672 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
673 TEGRA_APB_DMA_CH0_SIZE * i);
675 spin_lock_init(&ch->lock);
676 INIT_LIST_HEAD(&ch->list);
677 tegra_dma_init_hw(ch);
679 irq = INT_APB_DMA_CH0 + i;
680 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
681 dma_channels[i].name, ch);
683 pr_err("Failed to register IRQ %d for DMA %d\n",
689 /* mark the shared channel allocated */
690 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
692 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
693 __set_bit(i, channel_usage);
697 writel(0, addr + APB_DMA_GEN);
698 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
699 struct tegra_dma_channel *ch = &dma_channels[i];
701 free_irq(ch->irq, ch);
707 static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
709 void tegra_dma_suspend(void)
711 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
715 *ctx++ = readl(addr + APB_DMA_GEN);
716 *ctx++ = readl(addr + APB_DMA_CNTRL);
717 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
719 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
720 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
721 TEGRA_APB_DMA_CH0_SIZE * i);
723 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
724 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
725 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
726 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
727 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
731 void tegra_dma_resume(void)
733 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
737 writel(*ctx++, addr + APB_DMA_GEN);
738 writel(*ctx++, addr + APB_DMA_CNTRL);
739 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
741 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
742 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
743 TEGRA_APB_DMA_CH0_SIZE * i);
745 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
746 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
747 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
748 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
749 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);