2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/dmaengine.h>
11 #include <linux/platform_device.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/err.h>
16 #include <plat/ste_dma40.h>
18 #include "ste_dma40_ll.h"
20 #define D40_NAME "dma40"
22 #define D40_PHY_CHAN -1
24 /* For masking out/in 2 bit channel positions */
25 #define D40_CHAN_POS(chan) (2 * (chan / 2))
26 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
28 /* Maximum iterations taken before giving up suspending a channel */
29 #define D40_SUSPEND_MAX_IT 500
31 /* Hardware requirement on LCLA alignment */
32 #define LCLA_ALIGNMENT 0x40000
34 /* Max number of links per event group */
35 #define D40_LCLA_LINK_PER_EVENT_GRP 128
36 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
38 /* Attempts before giving up to trying to get pages that are aligned */
39 #define MAX_LCLA_ALLOC_ATTEMPTS 256
41 /* Bit markings for allocation map */
42 #define D40_ALLOC_FREE (1 << 31)
43 #define D40_ALLOC_PHY (1 << 30)
44 #define D40_ALLOC_LOG_FREE 0
46 /* Hardware designer of the block */
47 #define D40_HW_DESIGNER 0x8
50 * enum 40_command - The different commands and/or statuses.
52 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
53 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
54 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
55 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
60 D40_DMA_SUSPEND_REQ = 2,
65 * struct d40_lli_pool - Structure for keeping LLIs in memory
67 * @base: Pointer to memory area when the pre_alloc_lli's are not large
68 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
69 * pre_alloc_lli is used.
70 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
71 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
72 * one buffer to one buffer.
77 /* Space for dst and src, plus an extra for padding */
78 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
82 * struct d40_desc - A descriptor is one DMA job.
84 * @lli_phy: LLI settings for physical channel. Both src and dst=
85 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
87 * @lli_log: Same as above but for logical channels.
88 * @lli_pool: The pool with two entries pre-allocated.
89 * @lli_len: Number of llis of current descriptor.
90 * @lli_current: Number of transfered llis.
91 * @lcla_alloc: Number of LCLA entries allocated.
92 * @txd: DMA engine struct. Used for among other things for communication
95 * @is_in_client_list: true if the client owns this descriptor.
96 * @is_hw_linked: true if this job will automatically be continued for
99 * This descriptor is used for both logical and physical transfers.
103 struct d40_phy_lli_bidir lli_phy;
105 struct d40_log_lli_bidir lli_log;
107 struct d40_lli_pool lli_pool;
112 struct dma_async_tx_descriptor txd;
113 struct list_head node;
115 bool is_in_client_list;
120 * struct d40_lcla_pool - LCLA pool settings and data.
122 * @base: The virtual address of LCLA. 18 bit aligned.
123 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
124 * This pointer is only there for clean-up on error.
125 * @pages: The number of pages needed for all physical channels.
126 * Only used later for clean-up on error
127 * @lock: Lock to protect the content in this struct.
128 * @alloc_map: big map over which LCLA entry is own by which job.
130 struct d40_lcla_pool {
132 void *base_unaligned;
135 struct d40_desc **alloc_map;
139 * struct d40_phy_res - struct for handling eventlines mapped to physical
142 * @lock: A lock protection this entity.
143 * @num: The physical channel number of this entity.
144 * @allocated_src: Bit mapped to show which src event line's are mapped to
145 * this physical channel. Can also be free or physically allocated.
146 * @allocated_dst: Same as for src but is dst.
147 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
160 * struct d40_chan - Struct that describes a channel.
162 * @lock: A spinlock to protect this struct.
163 * @log_num: The logical number, if any of this channel.
164 * @completed: Starts with 1, after first interrupt it is set to dma engine's
166 * @pending_tx: The number of pending transfers. Used between interrupt handler
168 * @busy: Set to true when transfer is ongoing on this channel.
169 * @phy_chan: Pointer to physical channel which this instance runs on. If this
170 * point is NULL, then the channel is not allocated.
171 * @chan: DMA engine handle.
172 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
173 * transfer and call client callback.
174 * @client: Cliented owned descriptor list.
175 * @active: Active descriptor.
176 * @queue: Queued jobs.
177 * @dma_cfg: The client configuration of this dma channel.
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
185 * This struct can either "be" a logical or a physical channel.
190 /* ID of the most recent completed transfer */
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
200 struct stedma40_chan_cfg dma_cfg;
201 struct d40_base *base;
202 /* Default register configurations */
205 struct d40_def_lcsp log_def;
206 struct d40_log_lli_full *lcpa;
207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
213 * struct d40_base - The big global struct, one for each probe'd instance.
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
220 * @rev: silicon revision detected.
221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
245 * @desc_slab: cache for descriptors.
248 spinlock_t interrupt_lock;
249 spinlock_t execmd_lock;
251 void __iomem *virtbase;
254 phys_addr_t phy_start;
255 resource_size_t phy_size;
259 struct dma_device dma_both;
260 struct dma_device dma_slave;
261 struct dma_device dma_memcpy;
262 struct d40_chan *phy_chans;
263 struct d40_chan *log_chans;
264 struct d40_chan **lookup_log_chans;
265 struct d40_chan **lookup_phy_chans;
266 struct stedma40_platform_data *plat_data;
267 /* Physical half channels */
268 struct d40_phy_res *phy_res;
269 struct d40_lcla_pool lcla_pool;
272 resource_size_t lcpa_size;
273 struct kmem_cache *desc_slab;
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
285 struct d40_interrupt_lookup {
293 * struct d40_reg_val - simple lookup struct
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
303 static int d40_pool_lli_alloc(struct d40_desc *d40d,
304 int lli_len, bool is_log)
310 align = sizeof(struct d40_log_lli);
312 align = sizeof(struct d40_phy_lli);
315 base = d40d->lli_pool.pre_alloc_lli;
316 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
317 d40d->lli_pool.base = NULL;
319 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
321 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
322 d40d->lli_pool.base = base;
324 if (d40d->lli_pool.base == NULL)
329 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
331 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
334 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
343 static void d40_pool_lli_free(struct d40_desc *d40d)
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
354 static int d40_lcla_alloc_one(struct d40_chan *d40c,
355 struct d40_desc *d40d)
362 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
364 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
367 * Allocate both src and dst at the same time, therefore the half
368 * start on 1 since 0 can't be used since zero is used as end marker.
370 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
371 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
372 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
379 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
384 static int d40_lcla_free_all(struct d40_chan *d40c,
385 struct d40_desc *d40d)
391 if (d40c->log_num == D40_PHY_CHAN)
394 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
396 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
397 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
398 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
399 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
400 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
402 if (d40d->lcla_alloc == 0) {
409 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
415 static void d40_desc_remove(struct d40_desc *d40d)
417 list_del(&d40d->node);
420 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
422 struct d40_desc *desc = NULL;
424 if (!list_empty(&d40c->client)) {
428 list_for_each_entry_safe(d, _d, &d40c->client, node)
429 if (async_tx_test_ack(&d->txd)) {
430 d40_pool_lli_free(d);
433 memset(desc, 0, sizeof(*desc));
439 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
442 INIT_LIST_HEAD(&desc->node);
447 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
450 d40_lcla_free_all(d40c, d40d);
451 kmem_cache_free(d40c->base->desc_slab, d40d);
454 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
456 list_add_tail(&desc->node, &d40c->active);
459 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
461 int curr_lcla = -EINVAL, next_lcla;
463 if (d40c->log_num == D40_PHY_CHAN) {
464 d40_phy_lli_write(d40c->base->virtbase,
468 d40d->lli_current = d40d->lli_len;
471 if ((d40d->lli_len - d40d->lli_current) > 1)
472 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
474 d40_log_lli_lcpa_write(d40c->lcpa,
475 &d40d->lli_log.dst[d40d->lli_current],
476 &d40d->lli_log.src[d40d->lli_current],
480 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
481 struct d40_log_lli *lcla;
483 if (d40d->lli_current + 1 < d40d->lli_len)
484 next_lcla = d40_lcla_alloc_one(d40c, d40d);
488 lcla = d40c->base->lcla_pool.base +
489 d40c->phy_chan->num * 1024 +
492 d40_log_lli_lcla_write(lcla,
493 &d40d->lli_log.dst[d40d->lli_current],
494 &d40d->lli_log.src[d40d->lli_current],
497 (void) dma_map_single(d40c->base->dev, lcla,
498 2 * sizeof(struct d40_log_lli),
501 curr_lcla = next_lcla;
503 if (curr_lcla == -EINVAL) {
512 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
516 if (list_empty(&d40c->active))
519 d = list_first_entry(&d40c->active,
525 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
527 list_add_tail(&desc->node, &d40c->queue);
530 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
534 if (list_empty(&d40c->queue))
537 d = list_first_entry(&d40c->queue,
543 static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
547 if (list_empty(&d40c->queue))
549 list_for_each_entry(d, &d40c->queue, node)
550 if (list_is_last(&d->node, &d40c->queue))
555 /* Support functions for logical channels */
558 static int d40_channel_execute_command(struct d40_chan *d40c,
559 enum d40_command command)
563 void __iomem *active_reg;
568 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
570 if (d40c->phy_chan->num % 2 == 0)
571 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
573 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
575 if (command == D40_DMA_SUSPEND_REQ) {
576 status = (readl(active_reg) &
577 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
578 D40_CHAN_POS(d40c->phy_chan->num);
580 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
584 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
585 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
588 if (command == D40_DMA_SUSPEND_REQ) {
590 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
591 status = (readl(active_reg) &
592 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
593 D40_CHAN_POS(d40c->phy_chan->num);
597 * Reduce the number of bus accesses while
598 * waiting for the DMA to suspend.
602 if (status == D40_DMA_STOP ||
603 status == D40_DMA_SUSPENDED)
607 if (i == D40_SUSPEND_MAX_IT) {
608 dev_err(&d40c->chan.dev->device,
609 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
610 __func__, d40c->phy_chan->num, d40c->log_num,
618 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
622 static void d40_term_all(struct d40_chan *d40c)
624 struct d40_desc *d40d;
626 /* Release active descriptors */
627 while ((d40d = d40_first_active_get(d40c))) {
628 d40_desc_remove(d40d);
629 d40_desc_free(d40c, d40d);
632 /* Release queued descriptors waiting for transfer */
633 while ((d40d = d40_first_queued(d40c))) {
634 d40_desc_remove(d40d);
635 d40_desc_free(d40c, d40d);
639 d40c->pending_tx = 0;
643 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
648 /* Notice, that disable requires the physical channel to be stopped */
650 val = D40_ACTIVATE_EVENTLINE;
652 val = D40_DEACTIVATE_EVENTLINE;
654 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
656 /* Enable event line connected to device (or memcpy) */
657 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
658 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
659 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
661 writel((val << D40_EVENTLINE_POS(event)) |
662 ~D40_EVENTLINE_MASK(event),
663 d40c->base->virtbase + D40_DREG_PCBASE +
664 d40c->phy_chan->num * D40_DREG_PCDELTA +
667 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
668 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
670 writel((val << D40_EVENTLINE_POS(event)) |
671 ~D40_EVENTLINE_MASK(event),
672 d40c->base->virtbase + D40_DREG_PCBASE +
673 d40c->phy_chan->num * D40_DREG_PCDELTA +
677 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
680 static u32 d40_chan_has_events(struct d40_chan *d40c)
684 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
685 d40c->phy_chan->num * D40_DREG_PCDELTA +
688 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
689 d40c->phy_chan->num * D40_DREG_PCDELTA +
694 static void d40_config_write(struct d40_chan *d40c)
699 /* Odd addresses are even addresses + 4 */
700 addr_base = (d40c->phy_chan->num % 2) * 4;
701 /* Setup channel mode to logical or physical */
702 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
703 D40_CHAN_POS(d40c->phy_chan->num);
704 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
706 /* Setup operational mode option register */
707 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
708 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
710 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
712 if (d40c->log_num != D40_PHY_CHAN) {
713 /* Set default config for CFG reg */
714 writel(d40c->src_def_cfg,
715 d40c->base->virtbase + D40_DREG_PCBASE +
716 d40c->phy_chan->num * D40_DREG_PCDELTA +
718 writel(d40c->dst_def_cfg,
719 d40c->base->virtbase + D40_DREG_PCBASE +
720 d40c->phy_chan->num * D40_DREG_PCDELTA +
723 /* Set LIDX for lcla */
724 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
725 D40_SREG_ELEM_LOG_LIDX_MASK,
726 d40c->base->virtbase + D40_DREG_PCBASE +
727 d40c->phy_chan->num * D40_DREG_PCDELTA +
730 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
731 D40_SREG_ELEM_LOG_LIDX_MASK,
732 d40c->base->virtbase + D40_DREG_PCBASE +
733 d40c->phy_chan->num * D40_DREG_PCDELTA +
739 static u32 d40_residue(struct d40_chan *d40c)
743 if (d40c->log_num != D40_PHY_CHAN)
744 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
745 >> D40_MEM_LCSP2_ECNT_POS;
747 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
748 d40c->phy_chan->num * D40_DREG_PCDELTA +
749 D40_CHAN_REG_SDELT) &
750 D40_SREG_ELEM_PHY_ECNT_MASK) >>
751 D40_SREG_ELEM_PHY_ECNT_POS;
752 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
755 static bool d40_tx_is_linked(struct d40_chan *d40c)
759 if (d40c->log_num != D40_PHY_CHAN)
760 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
762 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
763 d40c->phy_chan->num * D40_DREG_PCDELTA +
764 D40_CHAN_REG_SDLNK) &
765 D40_SREG_LNK_PHYS_LNK_MASK;
769 static int d40_pause(struct dma_chan *chan)
771 struct d40_chan *d40c =
772 container_of(chan, struct d40_chan, chan);
779 spin_lock_irqsave(&d40c->lock, flags);
781 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
783 if (d40c->log_num != D40_PHY_CHAN) {
784 d40_config_set_event(d40c, false);
785 /* Resume the other logical channels if any */
786 if (d40_chan_has_events(d40c))
787 res = d40_channel_execute_command(d40c,
792 spin_unlock_irqrestore(&d40c->lock, flags);
796 static int d40_resume(struct dma_chan *chan)
798 struct d40_chan *d40c =
799 container_of(chan, struct d40_chan, chan);
806 spin_lock_irqsave(&d40c->lock, flags);
808 if (d40c->base->rev == 0)
809 if (d40c->log_num != D40_PHY_CHAN) {
810 res = d40_channel_execute_command(d40c,
811 D40_DMA_SUSPEND_REQ);
815 /* If bytes left to transfer or linked tx resume job */
816 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
818 if (d40c->log_num != D40_PHY_CHAN)
819 d40_config_set_event(d40c, true);
821 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
825 spin_unlock_irqrestore(&d40c->lock, flags);
829 static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
834 static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
836 struct d40_desc *d40d_prev = NULL;
840 if (!list_empty(&d40c->queue))
841 d40d_prev = d40_last_queued(d40c);
842 else if (!list_empty(&d40c->active))
843 d40d_prev = d40_first_active_get(d40c);
848 /* Here we try to join this job with previous jobs */
849 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
850 d40c->phy_chan->num * D40_DREG_PCDELTA +
853 /* Figure out which link we're currently transmitting */
854 for (i = 0; i < d40d_prev->lli_len; i++)
855 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
858 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
859 d40c->phy_chan->num * D40_DREG_PCDELTA +
860 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
862 if (i == (d40d_prev->lli_len - 1) && val > 0) {
863 /* Change the current one */
864 writel(virt_to_phys(d40d->lli_phy.src),
865 d40c->base->virtbase + D40_DREG_PCBASE +
866 d40c->phy_chan->num * D40_DREG_PCDELTA +
868 writel(virt_to_phys(d40d->lli_phy.dst),
869 d40c->base->virtbase + D40_DREG_PCBASE +
870 d40c->phy_chan->num * D40_DREG_PCDELTA +
873 d40d->is_hw_linked = true;
875 } else if (i < d40d_prev->lli_len) {
876 (void) dma_unmap_single(d40c->base->dev,
877 virt_to_phys(d40d_prev->lli_phy.src),
878 d40d_prev->lli_pool.size,
881 /* Keep the settings */
882 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
883 ~D40_SREG_LNK_PHYS_LNK_MASK;
884 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
885 val | virt_to_phys(d40d->lli_phy.src);
887 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
888 ~D40_SREG_LNK_PHYS_LNK_MASK;
889 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
890 val | virt_to_phys(d40d->lli_phy.dst);
892 (void) dma_map_single(d40c->base->dev,
893 d40d_prev->lli_phy.src,
894 d40d_prev->lli_pool.size,
896 d40d->is_hw_linked = true;
900 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
902 struct d40_chan *d40c = container_of(tx->chan,
905 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
908 (void) d40_pause(&d40c->chan);
910 spin_lock_irqsave(&d40c->lock, flags);
914 if (d40c->chan.cookie < 0)
915 d40c->chan.cookie = 1;
917 d40d->txd.cookie = d40c->chan.cookie;
919 if (d40c->log_num == D40_PHY_CHAN)
920 d40_tx_submit_phy(d40c, d40d);
922 d40_tx_submit_log(d40c, d40d);
924 d40_desc_queue(d40c, d40d);
926 spin_unlock_irqrestore(&d40c->lock, flags);
928 (void) d40_resume(&d40c->chan);
933 static int d40_start(struct d40_chan *d40c)
935 if (d40c->base->rev == 0) {
938 if (d40c->log_num != D40_PHY_CHAN) {
939 err = d40_channel_execute_command(d40c,
940 D40_DMA_SUSPEND_REQ);
946 if (d40c->log_num != D40_PHY_CHAN)
947 d40_config_set_event(d40c, true);
949 return d40_channel_execute_command(d40c, D40_DMA_RUN);
952 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
954 struct d40_desc *d40d;
957 /* Start queued jobs, if any */
958 d40d = d40_first_queued(d40c);
963 /* Remove from queue */
964 d40_desc_remove(d40d);
966 /* Add to active queue */
967 d40_desc_submit(d40c, d40d);
970 * If this job is already linked in hw,
974 if (!d40d->is_hw_linked) {
975 /* Initiate DMA job */
976 d40_desc_load(d40c, d40d);
979 err = d40_start(d40c);
989 /* called from interrupt context */
990 static void dma_tc_handle(struct d40_chan *d40c)
992 struct d40_desc *d40d;
994 /* Get first active entry from list */
995 d40d = d40_first_active_get(d40c);
1000 d40_lcla_free_all(d40c, d40d);
1002 if (d40d->lli_current < d40d->lli_len) {
1003 d40_desc_load(d40c, d40d);
1005 (void) d40_start(d40c);
1009 if (d40_queue_start(d40c) == NULL)
1013 tasklet_schedule(&d40c->tasklet);
1017 static void dma_tasklet(unsigned long data)
1019 struct d40_chan *d40c = (struct d40_chan *) data;
1020 struct d40_desc *d40d;
1021 unsigned long flags;
1022 dma_async_tx_callback callback;
1023 void *callback_param;
1025 spin_lock_irqsave(&d40c->lock, flags);
1027 /* Get first active entry from list */
1028 d40d = d40_first_active_get(d40c);
1033 d40c->completed = d40d->txd.cookie;
1036 * If terminating a channel pending_tx is set to zero.
1037 * This prevents any finished active jobs to return to the client.
1039 if (d40c->pending_tx == 0) {
1040 spin_unlock_irqrestore(&d40c->lock, flags);
1044 /* Callback to client */
1045 callback = d40d->txd.callback;
1046 callback_param = d40d->txd.callback_param;
1048 if (async_tx_test_ack(&d40d->txd)) {
1049 d40_pool_lli_free(d40d);
1050 d40_desc_remove(d40d);
1051 d40_desc_free(d40c, d40d);
1053 if (!d40d->is_in_client_list) {
1054 d40_desc_remove(d40d);
1055 d40_lcla_free_all(d40c, d40d);
1056 list_add_tail(&d40d->node, &d40c->client);
1057 d40d->is_in_client_list = true;
1063 if (d40c->pending_tx)
1064 tasklet_schedule(&d40c->tasklet);
1066 spin_unlock_irqrestore(&d40c->lock, flags);
1068 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1069 callback(callback_param);
1074 /* Rescue manouver if receiving double interrupts */
1075 if (d40c->pending_tx > 0)
1077 spin_unlock_irqrestore(&d40c->lock, flags);
1080 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1082 static const struct d40_interrupt_lookup il[] = {
1083 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1084 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1085 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1086 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1087 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1088 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1089 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1090 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1091 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1092 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1096 u32 regs[ARRAY_SIZE(il)];
1100 struct d40_chan *d40c;
1101 unsigned long flags;
1102 struct d40_base *base = data;
1104 spin_lock_irqsave(&base->interrupt_lock, flags);
1106 /* Read interrupt status of both logical and physical channels */
1107 for (i = 0; i < ARRAY_SIZE(il); i++)
1108 regs[i] = readl(base->virtbase + il[i].src);
1112 chan = find_next_bit((unsigned long *)regs,
1113 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1115 /* No more set bits found? */
1116 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1119 row = chan / BITS_PER_LONG;
1120 idx = chan & (BITS_PER_LONG - 1);
1123 writel(1 << idx, base->virtbase + il[row].clr);
1125 if (il[row].offset == D40_PHY_CHAN)
1126 d40c = base->lookup_phy_chans[idx];
1128 d40c = base->lookup_log_chans[il[row].offset + idx];
1129 spin_lock(&d40c->lock);
1131 if (!il[row].is_error)
1132 dma_tc_handle(d40c);
1135 "[%s] IRQ chan: %ld offset %d idx %d\n",
1136 __func__, chan, il[row].offset, idx);
1138 spin_unlock(&d40c->lock);
1141 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1146 static int d40_validate_conf(struct d40_chan *d40c,
1147 struct stedma40_chan_cfg *conf)
1150 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1151 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1152 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1153 == STEDMA40_CHANNEL_IN_LOG_MODE;
1156 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1161 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1162 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1163 d40c->runtime_addr == 0) {
1165 dev_err(&d40c->chan.dev->device,
1166 "[%s] Invalid TX channel address (%d)\n",
1167 __func__, conf->dst_dev_type);
1171 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1172 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1173 d40c->runtime_addr == 0) {
1174 dev_err(&d40c->chan.dev->device,
1175 "[%s] Invalid RX channel address (%d)\n",
1176 __func__, conf->src_dev_type);
1180 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1181 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1182 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1187 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1188 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1189 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1194 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1195 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1196 dev_err(&d40c->chan.dev->device,
1197 "[%s] No event line\n", __func__);
1201 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1202 (src_event_group != dst_event_group)) {
1203 dev_err(&d40c->chan.dev->device,
1204 "[%s] Invalid event group\n", __func__);
1208 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1210 * DMAC HW supports it. Will be added to this driver,
1211 * in case any dma client requires it.
1213 dev_err(&d40c->chan.dev->device,
1214 "[%s] periph to periph not supported\n",
1222 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1223 int log_event_line, bool is_log)
1225 unsigned long flags;
1226 spin_lock_irqsave(&phy->lock, flags);
1228 /* Physical interrupts are masked per physical full channel */
1229 if (phy->allocated_src == D40_ALLOC_FREE &&
1230 phy->allocated_dst == D40_ALLOC_FREE) {
1231 phy->allocated_dst = D40_ALLOC_PHY;
1232 phy->allocated_src = D40_ALLOC_PHY;
1238 /* Logical channel */
1240 if (phy->allocated_src == D40_ALLOC_PHY)
1243 if (phy->allocated_src == D40_ALLOC_FREE)
1244 phy->allocated_src = D40_ALLOC_LOG_FREE;
1246 if (!(phy->allocated_src & (1 << log_event_line))) {
1247 phy->allocated_src |= 1 << log_event_line;
1252 if (phy->allocated_dst == D40_ALLOC_PHY)
1255 if (phy->allocated_dst == D40_ALLOC_FREE)
1256 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1258 if (!(phy->allocated_dst & (1 << log_event_line))) {
1259 phy->allocated_dst |= 1 << log_event_line;
1266 spin_unlock_irqrestore(&phy->lock, flags);
1269 spin_unlock_irqrestore(&phy->lock, flags);
1273 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1276 unsigned long flags;
1277 bool is_free = false;
1279 spin_lock_irqsave(&phy->lock, flags);
1280 if (!log_event_line) {
1281 phy->allocated_dst = D40_ALLOC_FREE;
1282 phy->allocated_src = D40_ALLOC_FREE;
1287 /* Logical channel */
1289 phy->allocated_src &= ~(1 << log_event_line);
1290 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1291 phy->allocated_src = D40_ALLOC_FREE;
1293 phy->allocated_dst &= ~(1 << log_event_line);
1294 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1295 phy->allocated_dst = D40_ALLOC_FREE;
1298 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1302 spin_unlock_irqrestore(&phy->lock, flags);
1307 static int d40_allocate_channel(struct d40_chan *d40c)
1312 struct d40_phy_res *phys;
1317 bool is_log = (d40c->dma_cfg.channel_type &
1318 STEDMA40_CHANNEL_IN_OPER_MODE)
1319 == STEDMA40_CHANNEL_IN_LOG_MODE;
1322 phys = d40c->base->phy_res;
1324 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1325 dev_type = d40c->dma_cfg.src_dev_type;
1326 log_num = 2 * dev_type;
1328 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1329 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1330 /* dst event lines are used for logical memcpy */
1331 dev_type = d40c->dma_cfg.dst_dev_type;
1332 log_num = 2 * dev_type + 1;
1337 event_group = D40_TYPE_TO_GROUP(dev_type);
1338 event_line = D40_TYPE_TO_EVENT(dev_type);
1341 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1342 /* Find physical half channel */
1343 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1345 if (d40_alloc_mask_set(&phys[i], is_src,
1350 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1351 int phy_num = j + event_group * 2;
1352 for (i = phy_num; i < phy_num + 2; i++) {
1353 if (d40_alloc_mask_set(&phys[i],
1362 d40c->phy_chan = &phys[i];
1363 d40c->log_num = D40_PHY_CHAN;
1369 /* Find logical channel */
1370 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1371 int phy_num = j + event_group * 2;
1373 * Spread logical channels across all available physical rather
1374 * than pack every logical channel at the first available phy
1378 for (i = phy_num; i < phy_num + 2; i++) {
1379 if (d40_alloc_mask_set(&phys[i], is_src,
1380 event_line, is_log))
1384 for (i = phy_num + 1; i >= phy_num; i--) {
1385 if (d40_alloc_mask_set(&phys[i], is_src,
1386 event_line, is_log))
1394 d40c->phy_chan = &phys[i];
1395 d40c->log_num = log_num;
1399 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1401 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1407 static int d40_config_memcpy(struct d40_chan *d40c)
1409 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1411 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1412 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1413 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1414 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1415 memcpy[d40c->chan.chan_id];
1417 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1418 dma_has_cap(DMA_SLAVE, cap)) {
1419 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1421 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1430 static int d40_free_dma(struct d40_chan *d40c)
1435 struct d40_phy_res *phy = d40c->phy_chan;
1438 struct d40_desc *_d;
1441 /* Terminate all queued and active transfers */
1444 /* Release client owned descriptors */
1445 if (!list_empty(&d40c->client))
1446 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1447 d40_pool_lli_free(d);
1449 d40_desc_free(d40c, d);
1453 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1458 if (phy->allocated_src == D40_ALLOC_FREE &&
1459 phy->allocated_dst == D40_ALLOC_FREE) {
1460 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1465 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1466 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1467 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1469 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1470 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1473 dev_err(&d40c->chan.dev->device,
1474 "[%s] Unknown direction\n", __func__);
1478 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1480 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1485 if (d40c->log_num != D40_PHY_CHAN) {
1486 /* Release logical channel, deactivate the event line */
1488 d40_config_set_event(d40c, false);
1489 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1492 * Check if there are more logical allocation
1493 * on this phy channel.
1495 if (!d40_alloc_mask_free(phy, is_src, event)) {
1496 /* Resume the other logical channels if any */
1497 if (d40_chan_has_events(d40c)) {
1498 res = d40_channel_execute_command(d40c,
1501 dev_err(&d40c->chan.dev->device,
1502 "[%s] Executing RUN command\n",
1510 (void) d40_alloc_mask_free(phy, is_src, 0);
1513 /* Release physical channel */
1514 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1516 dev_err(&d40c->chan.dev->device,
1517 "[%s] Failed to stop channel\n", __func__);
1520 d40c->phy_chan = NULL;
1521 /* Invalidate channel type */
1522 d40c->dma_cfg.channel_type = 0;
1523 d40c->base->lookup_phy_chans[phy->num] = NULL;
1528 static bool d40_is_paused(struct d40_chan *d40c)
1530 bool is_paused = false;
1531 unsigned long flags;
1532 void __iomem *active_reg;
1536 spin_lock_irqsave(&d40c->lock, flags);
1538 if (d40c->log_num == D40_PHY_CHAN) {
1539 if (d40c->phy_chan->num % 2 == 0)
1540 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1542 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1544 status = (readl(active_reg) &
1545 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1546 D40_CHAN_POS(d40c->phy_chan->num);
1547 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1553 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1554 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1555 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1556 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1557 d40c->phy_chan->num * D40_DREG_PCDELTA +
1558 D40_CHAN_REG_SDLNK);
1559 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1560 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1561 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1562 d40c->phy_chan->num * D40_DREG_PCDELTA +
1563 D40_CHAN_REG_SSLNK);
1565 dev_err(&d40c->chan.dev->device,
1566 "[%s] Unknown direction\n", __func__);
1570 status = (status & D40_EVENTLINE_MASK(event)) >>
1571 D40_EVENTLINE_POS(event);
1573 if (status != D40_DMA_RUN)
1576 spin_unlock_irqrestore(&d40c->lock, flags);
1582 static u32 stedma40_residue(struct dma_chan *chan)
1584 struct d40_chan *d40c =
1585 container_of(chan, struct d40_chan, chan);
1587 unsigned long flags;
1589 spin_lock_irqsave(&d40c->lock, flags);
1590 bytes_left = d40_residue(d40c);
1591 spin_unlock_irqrestore(&d40c->lock, flags);
1596 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1597 struct scatterlist *sgl_dst,
1598 struct scatterlist *sgl_src,
1599 unsigned int sgl_len,
1600 unsigned long dma_flags)
1603 struct d40_desc *d40d;
1604 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1606 unsigned long flags;
1608 if (d40c->phy_chan == NULL) {
1609 dev_err(&d40c->chan.dev->device,
1610 "[%s] Unallocated channel.\n", __func__);
1611 return ERR_PTR(-EINVAL);
1614 spin_lock_irqsave(&d40c->lock, flags);
1615 d40d = d40_desc_get(d40c);
1620 d40d->lli_len = sgl_len;
1621 d40d->lli_current = 0;
1622 d40d->txd.flags = dma_flags;
1624 if (d40c->log_num != D40_PHY_CHAN) {
1626 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1627 dev_err(&d40c->chan.dev->device,
1628 "[%s] Out of memory\n", __func__);
1632 (void) d40_log_sg_to_lli(sgl_src,
1635 d40c->log_def.lcsp1,
1636 d40c->dma_cfg.src_info.data_width);
1638 (void) d40_log_sg_to_lli(sgl_dst,
1641 d40c->log_def.lcsp3,
1642 d40c->dma_cfg.dst_info.data_width);
1644 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1645 dev_err(&d40c->chan.dev->device,
1646 "[%s] Out of memory\n", __func__);
1650 res = d40_phy_sg_to_lli(sgl_src,
1654 virt_to_phys(d40d->lli_phy.src),
1656 d40c->dma_cfg.src_info.data_width,
1657 d40c->dma_cfg.src_info.psize);
1662 res = d40_phy_sg_to_lli(sgl_dst,
1666 virt_to_phys(d40d->lli_phy.dst),
1668 d40c->dma_cfg.dst_info.data_width,
1669 d40c->dma_cfg.dst_info.psize);
1674 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1675 d40d->lli_pool.size, DMA_TO_DEVICE);
1678 dma_async_tx_descriptor_init(&d40d->txd, chan);
1680 d40d->txd.tx_submit = d40_tx_submit;
1682 spin_unlock_irqrestore(&d40c->lock, flags);
1687 d40_desc_free(d40c, d40d);
1688 spin_unlock_irqrestore(&d40c->lock, flags);
1691 EXPORT_SYMBOL(stedma40_memcpy_sg);
1693 bool stedma40_filter(struct dma_chan *chan, void *data)
1695 struct stedma40_chan_cfg *info = data;
1696 struct d40_chan *d40c =
1697 container_of(chan, struct d40_chan, chan);
1701 err = d40_validate_conf(d40c, info);
1703 d40c->dma_cfg = *info;
1705 err = d40_config_memcpy(d40c);
1709 EXPORT_SYMBOL(stedma40_filter);
1711 /* DMA ENGINE functions */
1712 static int d40_alloc_chan_resources(struct dma_chan *chan)
1715 unsigned long flags;
1716 struct d40_chan *d40c =
1717 container_of(chan, struct d40_chan, chan);
1719 spin_lock_irqsave(&d40c->lock, flags);
1721 d40c->completed = chan->cookie = 1;
1724 * If no dma configuration is set (channel_type == 0)
1725 * use default configuration (memcpy)
1727 if (d40c->dma_cfg.channel_type == 0) {
1729 err = d40_config_memcpy(d40c);
1731 dev_err(&d40c->chan.dev->device,
1732 "[%s] Failed to configure memcpy channel\n",
1737 is_free_phy = (d40c->phy_chan == NULL);
1739 err = d40_allocate_channel(d40c);
1741 dev_err(&d40c->chan.dev->device,
1742 "[%s] Failed to allocate channel\n", __func__);
1746 /* Fill in basic CFG register values */
1747 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1748 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1750 if (d40c->log_num != D40_PHY_CHAN) {
1751 d40_log_cfg(&d40c->dma_cfg,
1752 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1754 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1755 d40c->lcpa = d40c->base->lcpa_base +
1756 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1758 d40c->lcpa = d40c->base->lcpa_base +
1759 d40c->dma_cfg.dst_dev_type *
1760 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1764 * Only write channel configuration to the DMA if the physical
1765 * resource is free. In case of multiple logical channels
1766 * on the same physical resource, only the first write is necessary.
1769 d40_config_write(d40c);
1771 spin_unlock_irqrestore(&d40c->lock, flags);
1775 static void d40_free_chan_resources(struct dma_chan *chan)
1777 struct d40_chan *d40c =
1778 container_of(chan, struct d40_chan, chan);
1780 unsigned long flags;
1782 if (d40c->phy_chan == NULL) {
1783 dev_err(&d40c->chan.dev->device,
1784 "[%s] Cannot free unallocated channel\n", __func__);
1789 spin_lock_irqsave(&d40c->lock, flags);
1791 err = d40_free_dma(d40c);
1794 dev_err(&d40c->chan.dev->device,
1795 "[%s] Failed to free channel\n", __func__);
1796 spin_unlock_irqrestore(&d40c->lock, flags);
1799 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1803 unsigned long dma_flags)
1805 struct d40_desc *d40d;
1806 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1808 unsigned long flags;
1811 if (d40c->phy_chan == NULL) {
1812 dev_err(&d40c->chan.dev->device,
1813 "[%s] Channel is not allocated.\n", __func__);
1814 return ERR_PTR(-EINVAL);
1817 spin_lock_irqsave(&d40c->lock, flags);
1818 d40d = d40_desc_get(d40c);
1821 dev_err(&d40c->chan.dev->device,
1822 "[%s] Descriptor is NULL\n", __func__);
1826 d40d->txd.flags = dma_flags;
1828 dma_async_tx_descriptor_init(&d40d->txd, chan);
1830 d40d->txd.tx_submit = d40_tx_submit;
1832 if (d40c->log_num != D40_PHY_CHAN) {
1834 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1835 dev_err(&d40c->chan.dev->device,
1836 "[%s] Out of memory\n", __func__);
1840 d40d->lli_current = 0;
1842 d40_log_fill_lli(d40d->lli_log.src,
1845 d40c->log_def.lcsp1,
1846 d40c->dma_cfg.src_info.data_width,
1849 d40_log_fill_lli(d40d->lli_log.dst,
1852 d40c->log_def.lcsp3,
1853 d40c->dma_cfg.dst_info.data_width,
1858 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1859 dev_err(&d40c->chan.dev->device,
1860 "[%s] Out of memory\n", __func__);
1864 err = d40_phy_fill_lli(d40d->lli_phy.src,
1867 d40c->dma_cfg.src_info.psize,
1871 d40c->dma_cfg.src_info.data_width,
1876 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1879 d40c->dma_cfg.dst_info.psize,
1883 d40c->dma_cfg.dst_info.data_width,
1889 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1890 d40d->lli_pool.size, DMA_TO_DEVICE);
1893 spin_unlock_irqrestore(&d40c->lock, flags);
1897 dev_err(&d40c->chan.dev->device,
1898 "[%s] Failed filling in PHY LLI\n", __func__);
1901 d40_desc_free(d40c, d40d);
1902 spin_unlock_irqrestore(&d40c->lock, flags);
1906 static struct dma_async_tx_descriptor *
1907 d40_prep_sg(struct dma_chan *chan,
1908 struct scatterlist *dst_sg, unsigned int dst_nents,
1909 struct scatterlist *src_sg, unsigned int src_nents,
1910 unsigned long dma_flags)
1912 if (dst_nents != src_nents)
1915 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
1918 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1919 struct d40_chan *d40c,
1920 struct scatterlist *sgl,
1921 unsigned int sg_len,
1922 enum dma_data_direction direction,
1923 unsigned long dma_flags)
1925 dma_addr_t dev_addr = 0;
1928 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1929 dev_err(&d40c->chan.dev->device,
1930 "[%s] Out of memory\n", __func__);
1934 d40d->lli_len = sg_len;
1935 d40d->lli_current = 0;
1937 if (direction == DMA_FROM_DEVICE)
1938 if (d40c->runtime_addr)
1939 dev_addr = d40c->runtime_addr;
1941 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1942 else if (direction == DMA_TO_DEVICE)
1943 if (d40c->runtime_addr)
1944 dev_addr = d40c->runtime_addr;
1946 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1951 total_size = d40_log_sg_to_dev(sgl, sg_len,
1954 d40c->dma_cfg.src_info.data_width,
1955 d40c->dma_cfg.dst_info.data_width,
1965 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1966 struct d40_chan *d40c,
1967 struct scatterlist *sgl,
1968 unsigned int sgl_len,
1969 enum dma_data_direction direction,
1970 unsigned long dma_flags)
1972 dma_addr_t src_dev_addr;
1973 dma_addr_t dst_dev_addr;
1976 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1977 dev_err(&d40c->chan.dev->device,
1978 "[%s] Out of memory\n", __func__);
1982 d40d->lli_len = sgl_len;
1983 d40d->lli_current = 0;
1985 if (direction == DMA_FROM_DEVICE) {
1987 if (d40c->runtime_addr)
1988 src_dev_addr = d40c->runtime_addr;
1990 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1991 } else if (direction == DMA_TO_DEVICE) {
1992 if (d40c->runtime_addr)
1993 dst_dev_addr = d40c->runtime_addr;
1995 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2000 res = d40_phy_sg_to_lli(sgl,
2004 virt_to_phys(d40d->lli_phy.src),
2006 d40c->dma_cfg.src_info.data_width,
2007 d40c->dma_cfg.src_info.psize);
2011 res = d40_phy_sg_to_lli(sgl,
2015 virt_to_phys(d40d->lli_phy.dst),
2017 d40c->dma_cfg.dst_info.data_width,
2018 d40c->dma_cfg.dst_info.psize);
2022 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2023 d40d->lli_pool.size, DMA_TO_DEVICE);
2027 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2028 struct scatterlist *sgl,
2029 unsigned int sg_len,
2030 enum dma_data_direction direction,
2031 unsigned long dma_flags)
2033 struct d40_desc *d40d;
2034 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2036 unsigned long flags;
2039 if (d40c->phy_chan == NULL) {
2040 dev_err(&d40c->chan.dev->device,
2041 "[%s] Cannot prepare unallocated channel\n", __func__);
2042 return ERR_PTR(-EINVAL);
2045 spin_lock_irqsave(&d40c->lock, flags);
2046 d40d = d40_desc_get(d40c);
2051 if (d40c->log_num != D40_PHY_CHAN)
2052 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2053 direction, dma_flags);
2055 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2056 direction, dma_flags);
2058 dev_err(&d40c->chan.dev->device,
2059 "[%s] Failed to prepare %s slave sg job: %d\n",
2061 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2065 d40d->txd.flags = dma_flags;
2067 dma_async_tx_descriptor_init(&d40d->txd, chan);
2069 d40d->txd.tx_submit = d40_tx_submit;
2071 spin_unlock_irqrestore(&d40c->lock, flags);
2076 d40_desc_free(d40c, d40d);
2077 spin_unlock_irqrestore(&d40c->lock, flags);
2081 static enum dma_status d40_tx_status(struct dma_chan *chan,
2082 dma_cookie_t cookie,
2083 struct dma_tx_state *txstate)
2085 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2086 dma_cookie_t last_used;
2087 dma_cookie_t last_complete;
2090 if (d40c->phy_chan == NULL) {
2091 dev_err(&d40c->chan.dev->device,
2092 "[%s] Cannot read status of unallocated channel\n",
2097 last_complete = d40c->completed;
2098 last_used = chan->cookie;
2100 if (d40_is_paused(d40c))
2103 ret = dma_async_is_complete(cookie, last_complete, last_used);
2105 dma_set_tx_state(txstate, last_complete, last_used,
2106 stedma40_residue(chan));
2111 static void d40_issue_pending(struct dma_chan *chan)
2113 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2114 unsigned long flags;
2116 if (d40c->phy_chan == NULL) {
2117 dev_err(&d40c->chan.dev->device,
2118 "[%s] Channel is not allocated!\n", __func__);
2122 spin_lock_irqsave(&d40c->lock, flags);
2124 /* Busy means that pending jobs are already being processed */
2126 (void) d40_queue_start(d40c);
2128 spin_unlock_irqrestore(&d40c->lock, flags);
2131 /* Runtime reconfiguration extension */
2132 static void d40_set_runtime_config(struct dma_chan *chan,
2133 struct dma_slave_config *config)
2135 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2136 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2137 enum dma_slave_buswidth config_addr_width;
2138 dma_addr_t config_addr;
2139 u32 config_maxburst;
2140 enum stedma40_periph_data_width addr_width;
2143 if (config->direction == DMA_FROM_DEVICE) {
2144 dma_addr_t dev_addr_rx =
2145 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2147 config_addr = config->src_addr;
2149 dev_dbg(d40c->base->dev,
2150 "channel has a pre-wired RX address %08x "
2151 "overriding with %08x\n",
2152 dev_addr_rx, config_addr);
2153 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2154 dev_dbg(d40c->base->dev,
2155 "channel was not configured for peripheral "
2156 "to memory transfer (%d) overriding\n",
2158 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2160 config_addr_width = config->src_addr_width;
2161 config_maxburst = config->src_maxburst;
2163 } else if (config->direction == DMA_TO_DEVICE) {
2164 dma_addr_t dev_addr_tx =
2165 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2167 config_addr = config->dst_addr;
2169 dev_dbg(d40c->base->dev,
2170 "channel has a pre-wired TX address %08x "
2171 "overriding with %08x\n",
2172 dev_addr_tx, config_addr);
2173 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2174 dev_dbg(d40c->base->dev,
2175 "channel was not configured for memory "
2176 "to peripheral transfer (%d) overriding\n",
2178 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2180 config_addr_width = config->dst_addr_width;
2181 config_maxburst = config->dst_maxburst;
2184 dev_err(d40c->base->dev,
2185 "unrecognized channel direction %d\n",
2190 switch (config_addr_width) {
2191 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2192 addr_width = STEDMA40_BYTE_WIDTH;
2194 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2195 addr_width = STEDMA40_HALFWORD_WIDTH;
2197 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2198 addr_width = STEDMA40_WORD_WIDTH;
2200 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2201 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2204 dev_err(d40c->base->dev,
2205 "illegal peripheral address width "
2207 config->src_addr_width);
2211 if (d40c->log_num != D40_PHY_CHAN) {
2212 if (config_maxburst >= 16)
2213 psize = STEDMA40_PSIZE_LOG_16;
2214 else if (config_maxburst >= 8)
2215 psize = STEDMA40_PSIZE_LOG_8;
2216 else if (config_maxburst >= 4)
2217 psize = STEDMA40_PSIZE_LOG_4;
2219 psize = STEDMA40_PSIZE_LOG_1;
2221 if (config_maxburst >= 16)
2222 psize = STEDMA40_PSIZE_PHY_16;
2223 else if (config_maxburst >= 8)
2224 psize = STEDMA40_PSIZE_PHY_8;
2225 else if (config_maxburst >= 4)
2226 psize = STEDMA40_PSIZE_PHY_4;
2228 psize = STEDMA40_PSIZE_PHY_1;
2231 /* Set up all the endpoint configs */
2232 cfg->src_info.data_width = addr_width;
2233 cfg->src_info.psize = psize;
2234 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
2235 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2236 cfg->dst_info.data_width = addr_width;
2237 cfg->dst_info.psize = psize;
2238 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2239 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2241 /* Fill in register values */
2242 if (d40c->log_num != D40_PHY_CHAN)
2243 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2245 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2246 &d40c->dst_def_cfg, false);
2248 /* These settings will take precedence later */
2249 d40c->runtime_addr = config_addr;
2250 d40c->runtime_direction = config->direction;
2251 dev_dbg(d40c->base->dev,
2252 "configured channel %s for %s, data width %d, "
2253 "maxburst %d bytes, LE, no flow control\n",
2254 dma_chan_name(chan),
2255 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2260 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2263 unsigned long flags;
2264 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2266 if (d40c->phy_chan == NULL) {
2267 dev_err(&d40c->chan.dev->device,
2268 "[%s] Channel is not allocated!\n", __func__);
2273 case DMA_TERMINATE_ALL:
2274 spin_lock_irqsave(&d40c->lock, flags);
2276 spin_unlock_irqrestore(&d40c->lock, flags);
2279 return d40_pause(chan);
2281 return d40_resume(chan);
2282 case DMA_SLAVE_CONFIG:
2283 d40_set_runtime_config(chan,
2284 (struct dma_slave_config *) arg);
2290 /* Other commands are unimplemented */
2294 /* Initialization functions */
2296 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2297 struct d40_chan *chans, int offset,
2301 struct d40_chan *d40c;
2303 INIT_LIST_HEAD(&dma->channels);
2305 for (i = offset; i < offset + num_chans; i++) {
2308 d40c->chan.device = dma;
2310 spin_lock_init(&d40c->lock);
2312 d40c->log_num = D40_PHY_CHAN;
2314 INIT_LIST_HEAD(&d40c->active);
2315 INIT_LIST_HEAD(&d40c->queue);
2316 INIT_LIST_HEAD(&d40c->client);
2318 tasklet_init(&d40c->tasklet, dma_tasklet,
2319 (unsigned long) d40c);
2321 list_add_tail(&d40c->chan.device_node,
2326 static int __init d40_dmaengine_init(struct d40_base *base,
2327 int num_reserved_chans)
2331 d40_chan_init(base, &base->dma_slave, base->log_chans,
2332 0, base->num_log_chans);
2334 dma_cap_zero(base->dma_slave.cap_mask);
2335 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2337 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2338 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2339 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2340 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2341 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2342 base->dma_slave.device_tx_status = d40_tx_status;
2343 base->dma_slave.device_issue_pending = d40_issue_pending;
2344 base->dma_slave.device_control = d40_control;
2345 base->dma_slave.dev = base->dev;
2347 err = dma_async_device_register(&base->dma_slave);
2351 "[%s] Failed to register slave channels\n",
2356 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2357 base->num_log_chans, base->plat_data->memcpy_len);
2359 dma_cap_zero(base->dma_memcpy.cap_mask);
2360 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2361 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2363 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2364 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2365 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2366 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2367 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2368 base->dma_memcpy.device_tx_status = d40_tx_status;
2369 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2370 base->dma_memcpy.device_control = d40_control;
2371 base->dma_memcpy.dev = base->dev;
2373 * This controller can only access address at even
2374 * 32bit boundaries, i.e. 2^2
2376 base->dma_memcpy.copy_align = 2;
2378 err = dma_async_device_register(&base->dma_memcpy);
2382 "[%s] Failed to regsiter memcpy only channels\n",
2387 d40_chan_init(base, &base->dma_both, base->phy_chans,
2388 0, num_reserved_chans);
2390 dma_cap_zero(base->dma_both.cap_mask);
2391 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2392 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2393 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2395 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2396 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2397 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2398 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2399 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2400 base->dma_both.device_tx_status = d40_tx_status;
2401 base->dma_both.device_issue_pending = d40_issue_pending;
2402 base->dma_both.device_control = d40_control;
2403 base->dma_both.dev = base->dev;
2404 base->dma_both.copy_align = 2;
2405 err = dma_async_device_register(&base->dma_both);
2409 "[%s] Failed to register logical and physical capable channels\n",
2415 dma_async_device_unregister(&base->dma_memcpy);
2417 dma_async_device_unregister(&base->dma_slave);
2422 /* Initialization functions. */
2424 static int __init d40_phy_res_init(struct d40_base *base)
2427 int num_phy_chans_avail = 0;
2429 int odd_even_bit = -2;
2431 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2432 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2434 for (i = 0; i < base->num_phy_chans; i++) {
2435 base->phy_res[i].num = i;
2436 odd_even_bit += 2 * ((i % 2) == 0);
2437 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2438 /* Mark security only channels as occupied */
2439 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2440 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2442 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2443 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2444 num_phy_chans_avail++;
2446 spin_lock_init(&base->phy_res[i].lock);
2449 /* Mark disabled channels as occupied */
2450 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2451 int chan = base->plat_data->disabled_channels[i];
2453 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2454 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2455 num_phy_chans_avail--;
2458 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2459 num_phy_chans_avail, base->num_phy_chans);
2461 /* Verify settings extended vs standard */
2462 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2464 for (i = 0; i < base->num_phy_chans; i++) {
2466 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2467 (val[0] & 0x3) != 1)
2469 "[%s] INFO: channel %d is misconfigured (%d)\n",
2470 __func__, i, val[0] & 0x3);
2472 val[0] = val[0] >> 2;
2475 return num_phy_chans_avail;
2478 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2480 static const struct d40_reg_val dma_id_regs[] = {
2482 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2483 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2485 * D40_DREG_PERIPHID2 Depends on HW revision:
2486 * MOP500/HREF ED has 0x0008,
2488 * HREF V1 has 0x0028
2490 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2493 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2494 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2495 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2496 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2498 struct stedma40_platform_data *plat_data;
2499 struct clk *clk = NULL;
2500 void __iomem *virtbase = NULL;
2501 struct resource *res = NULL;
2502 struct d40_base *base = NULL;
2503 int num_log_chans = 0;
2509 clk = clk_get(&pdev->dev, NULL);
2512 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2519 /* Get IO for DMAC base address */
2520 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2524 if (request_mem_region(res->start, resource_size(res),
2525 D40_NAME " I/O base") == NULL)
2528 virtbase = ioremap(res->start, resource_size(res));
2532 /* HW version check */
2533 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2534 if (dma_id_regs[i].val !=
2535 readl(virtbase + dma_id_regs[i].reg)) {
2537 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2541 readl(virtbase + dma_id_regs[i].reg));
2546 /* Get silicon revision and designer */
2547 val = readl(virtbase + D40_DREG_PERIPHID2);
2549 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2552 "[%s] Unknown designer! Got %x wanted %x\n",
2553 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2558 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2559 D40_DREG_PERIPHID2_REV_POS;
2561 /* The number of physical channels on this HW */
2562 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2564 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2567 plat_data = pdev->dev.platform_data;
2569 /* Count the number of logical channels in use */
2570 for (i = 0; i < plat_data->dev_len; i++)
2571 if (plat_data->dev_rx[i] != 0)
2574 for (i = 0; i < plat_data->dev_len; i++)
2575 if (plat_data->dev_tx[i] != 0)
2578 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2579 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2580 sizeof(struct d40_chan), GFP_KERNEL);
2583 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2589 base->num_phy_chans = num_phy_chans;
2590 base->num_log_chans = num_log_chans;
2591 base->phy_start = res->start;
2592 base->phy_size = resource_size(res);
2593 base->virtbase = virtbase;
2594 base->plat_data = plat_data;
2595 base->dev = &pdev->dev;
2596 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2597 base->log_chans = &base->phy_chans[num_phy_chans];
2599 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2604 base->lookup_phy_chans = kzalloc(num_phy_chans *
2605 sizeof(struct d40_chan *),
2607 if (!base->lookup_phy_chans)
2610 if (num_log_chans + plat_data->memcpy_len) {
2612 * The max number of logical channels are event lines for all
2613 * src devices and dst devices
2615 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2616 sizeof(struct d40_chan *),
2618 if (!base->lookup_log_chans)
2622 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2623 sizeof(struct d40_desc *) *
2624 D40_LCLA_LINK_PER_EVENT_GRP,
2626 if (!base->lcla_pool.alloc_map)
2629 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2630 0, SLAB_HWCACHE_ALIGN,
2632 if (base->desc_slab == NULL)
2645 release_mem_region(res->start,
2646 resource_size(res));
2651 kfree(base->lcla_pool.alloc_map);
2652 kfree(base->lookup_log_chans);
2653 kfree(base->lookup_phy_chans);
2654 kfree(base->phy_res);
2661 static void __init d40_hw_init(struct d40_base *base)
2664 static const struct d40_reg_val dma_init_reg[] = {
2665 /* Clock every part of the DMA block from start */
2666 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2668 /* Interrupts on all logical channels */
2669 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2670 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2671 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2672 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2673 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2674 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2675 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2676 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2677 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2678 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2679 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2680 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2683 u32 prmseo[2] = {0, 0};
2684 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2688 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2689 writel(dma_init_reg[i].val,
2690 base->virtbase + dma_init_reg[i].reg);
2692 /* Configure all our dma channels to default settings */
2693 for (i = 0; i < base->num_phy_chans; i++) {
2695 activeo[i % 2] = activeo[i % 2] << 2;
2697 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2699 activeo[i % 2] |= 3;
2703 /* Enable interrupt # */
2704 pcmis = (pcmis << 1) | 1;
2706 /* Clear interrupt # */
2707 pcicr = (pcicr << 1) | 1;
2709 /* Set channel to physical mode */
2710 prmseo[i % 2] = prmseo[i % 2] << 2;
2715 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2716 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2717 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2718 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2720 /* Write which interrupt to enable */
2721 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2723 /* Write which interrupt to clear */
2724 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2728 static int __init d40_lcla_allocate(struct d40_base *base)
2730 unsigned long *page_list;
2735 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2736 * To full fill this hardware requirement without wasting 256 kb
2737 * we allocate pages until we get an aligned one.
2739 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2747 /* Calculating how many pages that are required */
2748 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2750 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2751 page_list[i] = __get_free_pages(GFP_KERNEL,
2752 base->lcla_pool.pages);
2753 if (!page_list[i]) {
2756 "[%s] Failed to allocate %d pages.\n",
2757 __func__, base->lcla_pool.pages);
2759 for (j = 0; j < i; j++)
2760 free_pages(page_list[j], base->lcla_pool.pages);
2764 if ((virt_to_phys((void *)page_list[i]) &
2765 (LCLA_ALIGNMENT - 1)) == 0)
2769 for (j = 0; j < i; j++)
2770 free_pages(page_list[j], base->lcla_pool.pages);
2772 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2773 base->lcla_pool.base = (void *)page_list[i];
2776 * After many attempts and no succees with finding the correct
2777 * alignment, try with allocating a big buffer.
2780 "[%s] Failed to get %d pages @ 18 bit align.\n",
2781 __func__, base->lcla_pool.pages);
2782 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2783 base->num_phy_chans +
2786 if (!base->lcla_pool.base_unaligned) {
2791 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2795 writel(virt_to_phys(base->lcla_pool.base),
2796 base->virtbase + D40_DREG_LCLA);
2802 static int __init d40_probe(struct platform_device *pdev)
2806 struct d40_base *base;
2807 struct resource *res = NULL;
2808 int num_reserved_chans;
2811 base = d40_hw_detect_init(pdev);
2816 num_reserved_chans = d40_phy_res_init(base);
2818 platform_set_drvdata(pdev, base);
2820 spin_lock_init(&base->interrupt_lock);
2821 spin_lock_init(&base->execmd_lock);
2823 /* Get IO for logical channel parameter address */
2824 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2828 "[%s] No \"lcpa\" memory resource\n",
2832 base->lcpa_size = resource_size(res);
2833 base->phy_lcpa = res->start;
2835 if (request_mem_region(res->start, resource_size(res),
2836 D40_NAME " I/O lcpa") == NULL) {
2839 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2840 __func__, res->start, res->end);
2844 /* We make use of ESRAM memory for this. */
2845 val = readl(base->virtbase + D40_DREG_LCPA);
2846 if (res->start != val && val != 0) {
2847 dev_warn(&pdev->dev,
2848 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2849 __func__, val, res->start);
2851 writel(res->start, base->virtbase + D40_DREG_LCPA);
2853 base->lcpa_base = ioremap(res->start, resource_size(res));
2854 if (!base->lcpa_base) {
2857 "[%s] Failed to ioremap LCPA region\n",
2862 ret = d40_lcla_allocate(base);
2864 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
2869 spin_lock_init(&base->lcla_pool.lock);
2871 base->irq = platform_get_irq(pdev, 0);
2873 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2876 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2880 err = d40_dmaengine_init(base, num_reserved_chans);
2886 dev_info(base->dev, "initialized\n");
2891 if (base->desc_slab)
2892 kmem_cache_destroy(base->desc_slab);
2894 iounmap(base->virtbase);
2895 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2896 free_pages((unsigned long)base->lcla_pool.base,
2897 base->lcla_pool.pages);
2899 kfree(base->lcla_pool.base_unaligned);
2902 release_mem_region(base->phy_lcpa,
2904 if (base->phy_start)
2905 release_mem_region(base->phy_start,
2908 clk_disable(base->clk);
2912 kfree(base->lcla_pool.alloc_map);
2913 kfree(base->lookup_log_chans);
2914 kfree(base->lookup_phy_chans);
2915 kfree(base->phy_res);
2919 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2923 static struct platform_driver d40_driver = {
2925 .owner = THIS_MODULE,
2930 int __init stedma40_init(void)
2932 return platform_driver_probe(&d40_driver, d40_probe);
2934 arch_initcall(stedma40_init);