2 * driver/dma/ste_dma40.c
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 /* Hardware requirement on LCLA alignment */
34 #define LCLA_ALIGNMENT 0x40000
35 /* Attempts before giving up to trying to get pages that are aligned */
36 #define MAX_LCLA_ALLOC_ATTEMPTS 256
38 /* Bit markings for allocation map */
39 #define D40_ALLOC_FREE (1 << 31)
40 #define D40_ALLOC_PHY (1 << 30)
41 #define D40_ALLOC_LOG_FREE 0
43 /* Hardware designer of the block */
44 #define D40_PERIPHID2_DESIGNER 0x8
47 * enum 40_command - The different commands and/or statuses.
49 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
50 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
51 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
52 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
57 D40_DMA_SUSPEND_REQ = 2,
62 * struct d40_lli_pool - Structure for keeping LLIs in memory
64 * @base: Pointer to memory area when the pre_alloc_lli's are not large
65 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
66 * pre_alloc_lli is used.
67 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
68 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
69 * one buffer to one buffer.
74 /* Space for dst and src, plus an extra for padding */
75 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
79 * struct d40_desc - A descriptor is one DMA job.
81 * @lli_phy: LLI settings for physical channel. Both src and dst=
82 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
84 * @lli_log: Same as above but for logical channels.
85 * @lli_pool: The pool with two entries pre-allocated.
86 * @lli_len: Number of llis of current descriptor.
87 * @lli_count: Number of transfered llis.
88 * @lli_tx_len: Max number of LLIs per transfer, there can be
89 * many transfer for one descriptor.
90 * @txd: DMA engine struct. Used for among other things for communication
93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor.
96 * This descriptor is used for both logical and physical transfers.
101 struct d40_phy_lli_bidir lli_phy;
103 struct d40_log_lli_bidir lli_log;
105 struct d40_lli_pool lli_pool;
110 struct dma_async_tx_descriptor txd;
111 struct list_head node;
113 enum dma_data_direction dir;
114 bool is_in_client_list;
118 * struct d40_lcla_pool - LCLA pool settings and data.
120 * @base: The virtual address of LCLA. 18 bit aligned.
121 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
122 * This pointer is only there for clean-up on error.
123 * @pages: The number of pages needed for all physical channels.
124 * Only used later for clean-up on error
125 * @lock: Lock to protect the content in this struct.
126 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
127 * @num_blocks: The number of entries of alloc_map. Equals to the
128 * number of physical channels.
130 struct d40_lcla_pool {
132 void *base_unaligned;
140 * struct d40_phy_res - struct for handling eventlines mapped to physical
143 * @lock: A lock protection this entity.
144 * @num: The physical channel number of this entity.
145 * @allocated_src: Bit mapped to show which src event line's are mapped to
146 * this physical channel. Can also be free or physically allocated.
147 * @allocated_dst: Same as for src but is dst.
148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149 * event line number. Both allocated_src and allocated_dst can not be
150 * allocated to a physical channel, since the interrupt handler has then
151 * no way of figure out which one the interrupt belongs to.
163 * struct d40_chan - Struct that describes a channel.
165 * @lock: A spinlock to protect this struct.
166 * @log_num: The logical number, if any of this channel.
167 * @completed: Starts with 1, after first interrupt it is set to dma engine's
169 * @pending_tx: The number of pending transfers. Used between interrupt handler
171 * @busy: Set to true when transfer is ongoing on this channel.
172 * @phy_chan: Pointer to physical channel which this instance runs on. If this
173 * point is NULL, then the channel is not allocated.
174 * @chan: DMA engine handle.
175 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
176 * transfer and call client callback.
177 * @client: Cliented owned descriptor list.
178 * @active: Active descriptor.
179 * @queue: Queued jobs.
180 * @dma_cfg: The client configuration of this dma channel.
181 * @base: Pointer to the device instance struct.
182 * @src_def_cfg: Default cfg register setting for src.
183 * @dst_def_cfg: Default cfg register setting for dst.
184 * @log_def: Default logical channel settings.
185 * @lcla: Space for one dst src pair for logical channel transfers.
186 * @lcpa: Pointer to dst and src lcpa settings.
188 * This struct can either "be" a logical or a physical channel.
193 /* ID of the most recent completed transfer */
197 struct d40_phy_res *phy_chan;
198 struct dma_chan chan;
199 struct tasklet_struct tasklet;
200 struct list_head client;
201 struct list_head active;
202 struct list_head queue;
203 struct stedma40_chan_cfg dma_cfg;
204 struct d40_base *base;
205 /* Default register configurations */
208 struct d40_def_lcsp log_def;
209 struct d40_lcla_elem lcla;
210 struct d40_log_lli_full *lcpa;
211 /* Runtime reconfiguration */
212 dma_addr_t runtime_addr;
213 enum dma_data_direction runtime_direction;
217 * struct d40_base - The big global struct, one for each probe'd instance.
219 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
220 * @execmd_lock: Lock for execute command usage since several channels share
221 * the same physical register.
222 * @dev: The device structure.
223 * @virtbase: The virtual base address of the DMA's register.
224 * @rev: silicon revision detected.
225 * @clk: Pointer to the DMA clock structure.
226 * @phy_start: Physical memory start of the DMA registers.
227 * @phy_size: Size of the DMA register map.
228 * @irq: The IRQ number.
229 * @num_phy_chans: The number of physical channels. Read from HW. This
230 * is the number of available channels for this driver, not counting "Secure
231 * mode" allocated physical channels.
232 * @num_log_chans: The number of logical channels. Calculated from
234 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
235 * @dma_slave: dma_device channels that can do only do slave transfers.
236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
237 * @phy_chans: Room for all possible physical channels in system.
238 * @log_chans: Room for all possible logical channels in system.
239 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
240 * to log_chans entries.
241 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
242 * to phy_chans entries.
243 * @plat_data: Pointer to provided platform_data which is the driver
245 * @phy_res: Vector containing all physical channels.
246 * @lcla_pool: lcla pool settings and data.
247 * @lcpa_base: The virtual mapped address of LCPA.
248 * @phy_lcpa: The physical address of the LCPA.
249 * @lcpa_size: The size of the LCPA area.
250 * @desc_slab: cache for descriptors.
253 spinlock_t interrupt_lock;
254 spinlock_t execmd_lock;
256 void __iomem *virtbase;
259 phys_addr_t phy_start;
260 resource_size_t phy_size;
264 struct dma_device dma_both;
265 struct dma_device dma_slave;
266 struct dma_device dma_memcpy;
267 struct d40_chan *phy_chans;
268 struct d40_chan *log_chans;
269 struct d40_chan **lookup_log_chans;
270 struct d40_chan **lookup_phy_chans;
271 struct stedma40_platform_data *plat_data;
272 /* Physical half channels */
273 struct d40_phy_res *phy_res;
274 struct d40_lcla_pool lcla_pool;
277 resource_size_t lcpa_size;
278 struct kmem_cache *desc_slab;
282 * struct d40_interrupt_lookup - lookup table for interrupt handler
284 * @src: Interrupt mask register.
285 * @clr: Interrupt clear register.
286 * @is_error: true if this is an error interrupt.
287 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
288 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
290 struct d40_interrupt_lookup {
298 * struct d40_reg_val - simple lookup struct
300 * @reg: The register.
301 * @val: The value that belongs to the register in reg.
308 static int d40_pool_lli_alloc(struct d40_desc *d40d,
309 int lli_len, bool is_log)
315 align = sizeof(struct d40_log_lli);
317 align = sizeof(struct d40_phy_lli);
320 base = d40d->lli_pool.pre_alloc_lli;
321 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
322 d40d->lli_pool.base = NULL;
324 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
326 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
327 d40d->lli_pool.base = base;
329 if (d40d->lli_pool.base == NULL)
334 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
336 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
339 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
341 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
344 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
345 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
351 static void d40_pool_lli_free(struct d40_desc *d40d)
353 kfree(d40d->lli_pool.base);
354 d40d->lli_pool.base = NULL;
355 d40d->lli_pool.size = 0;
356 d40d->lli_log.src = NULL;
357 d40d->lli_log.dst = NULL;
358 d40d->lli_phy.src = NULL;
359 d40d->lli_phy.dst = NULL;
360 d40d->lli_phy.src_addr = 0;
361 d40d->lli_phy.dst_addr = 0;
364 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
365 struct d40_desc *desc)
367 dma_cookie_t cookie = d40c->chan.cookie;
372 d40c->chan.cookie = cookie;
373 desc->txd.cookie = cookie;
378 static void d40_desc_remove(struct d40_desc *d40d)
380 list_del(&d40d->node);
383 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
388 if (!list_empty(&d40c->client)) {
389 list_for_each_entry_safe(d, _d, &d40c->client, node)
390 if (async_tx_test_ack(&d->txd)) {
391 d40_pool_lli_free(d);
396 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
398 memset(d, 0, sizeof(struct d40_desc));
399 INIT_LIST_HEAD(&d->node);
405 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
407 kmem_cache_free(d40c->base->desc_slab, d40d);
410 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
412 list_add_tail(&desc->node, &d40c->active);
415 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
419 if (list_empty(&d40c->active))
422 d = list_first_entry(&d40c->active,
428 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
430 list_add_tail(&desc->node, &d40c->queue);
433 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
437 if (list_empty(&d40c->queue))
440 d = list_first_entry(&d40c->queue,
446 /* Support functions for logical channels */
448 static int d40_lcla_id_get(struct d40_chan *d40c)
452 struct d40_log_lli *lcla_lidx_base =
453 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
455 int lli_per_log = d40c->base->plat_data->llis_per_log;
458 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
461 if (d40c->base->lcla_pool.num_blocks > 32)
464 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
466 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
475 if (src_id >= d40c->base->lcla_pool.num_blocks)
478 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
479 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
481 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
488 if (dst_id == src_id)
491 d40c->lcla.src_id = src_id;
492 d40c->lcla.dst_id = dst_id;
493 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
494 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
496 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
499 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
504 static int d40_channel_execute_command(struct d40_chan *d40c,
505 enum d40_command command)
508 void __iomem *active_reg;
513 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
515 if (d40c->phy_chan->num % 2 == 0)
516 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
518 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
520 if (command == D40_DMA_SUSPEND_REQ) {
521 status = (readl(active_reg) &
522 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
523 D40_CHAN_POS(d40c->phy_chan->num);
525 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
529 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
530 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
533 if (command == D40_DMA_SUSPEND_REQ) {
535 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
536 status = (readl(active_reg) &
537 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
538 D40_CHAN_POS(d40c->phy_chan->num);
542 * Reduce the number of bus accesses while
543 * waiting for the DMA to suspend.
547 if (status == D40_DMA_STOP ||
548 status == D40_DMA_SUSPENDED)
552 if (i == D40_SUSPEND_MAX_IT) {
553 dev_err(&d40c->chan.dev->device,
554 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
555 __func__, d40c->phy_chan->num, d40c->log_num,
563 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
567 static void d40_term_all(struct d40_chan *d40c)
569 struct d40_desc *d40d;
572 /* Release active descriptors */
573 while ((d40d = d40_first_active_get(d40c))) {
574 d40_desc_remove(d40d);
576 /* Return desc to free-list */
577 d40_desc_free(d40c, d40d);
580 /* Release queued descriptors waiting for transfer */
581 while ((d40d = d40_first_queued(d40c))) {
582 d40_desc_remove(d40d);
584 /* Return desc to free-list */
585 d40_desc_free(d40c, d40d);
588 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
590 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
591 (~(0x1 << d40c->lcla.dst_id));
592 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
593 (~(0x1 << d40c->lcla.src_id));
595 d40c->lcla.src_id = -1;
596 d40c->lcla.dst_id = -1;
598 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
600 d40c->pending_tx = 0;
604 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
609 /* Notice, that disable requires the physical channel to be stopped */
611 val = D40_ACTIVATE_EVENTLINE;
613 val = D40_DEACTIVATE_EVENTLINE;
615 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
617 /* Enable event line connected to device (or memcpy) */
618 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
619 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
620 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
622 writel((val << D40_EVENTLINE_POS(event)) |
623 ~D40_EVENTLINE_MASK(event),
624 d40c->base->virtbase + D40_DREG_PCBASE +
625 d40c->phy_chan->num * D40_DREG_PCDELTA +
628 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
629 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
631 writel((val << D40_EVENTLINE_POS(event)) |
632 ~D40_EVENTLINE_MASK(event),
633 d40c->base->virtbase + D40_DREG_PCBASE +
634 d40c->phy_chan->num * D40_DREG_PCDELTA +
638 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
641 static u32 d40_chan_has_events(struct d40_chan *d40c)
645 /* If SSLNK or SDLNK is zero all events are disabled */
646 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
647 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
648 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
649 d40c->phy_chan->num * D40_DREG_PCDELTA +
652 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
653 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
654 d40c->phy_chan->num * D40_DREG_PCDELTA +
659 static void d40_config_enable_lidx(struct d40_chan *d40c)
661 /* Set LIDX for lcla */
662 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
663 D40_SREG_ELEM_LOG_LIDX_MASK,
664 d40c->base->virtbase + D40_DREG_PCBASE +
665 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
667 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
668 D40_SREG_ELEM_LOG_LIDX_MASK,
669 d40c->base->virtbase + D40_DREG_PCBASE +
670 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
673 static int d40_config_write(struct d40_chan *d40c)
679 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
683 /* Odd addresses are even addresses + 4 */
684 addr_base = (d40c->phy_chan->num % 2) * 4;
685 /* Setup channel mode to logical or physical */
686 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
687 D40_CHAN_POS(d40c->phy_chan->num);
688 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
690 /* Setup operational mode option register */
691 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
692 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
694 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
696 if (d40c->log_num != D40_PHY_CHAN) {
697 /* Set default config for CFG reg */
698 writel(d40c->src_def_cfg,
699 d40c->base->virtbase + D40_DREG_PCBASE +
700 d40c->phy_chan->num * D40_DREG_PCDELTA +
702 writel(d40c->dst_def_cfg,
703 d40c->base->virtbase + D40_DREG_PCBASE +
704 d40c->phy_chan->num * D40_DREG_PCDELTA +
707 d40_config_enable_lidx(d40c);
712 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
714 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
715 d40_phy_lli_write(d40c->base->virtbase,
719 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
720 struct d40_log_lli *src = d40d->lli_log.src;
721 struct d40_log_lli *dst = d40d->lli_log.dst;
724 src += d40d->lli_count;
725 dst += d40d->lli_count;
726 s = d40_log_lli_write(d40c->lcpa,
727 d40c->lcla.src, d40c->lcla.dst,
729 d40c->base->plat_data->llis_per_log);
731 /* If s equals to zero, the job is not linked */
733 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
734 s * sizeof(struct d40_log_lli),
736 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
737 s * sizeof(struct d40_log_lli),
741 d40d->lli_count += d40d->lli_tx_len;
744 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
746 struct d40_chan *d40c = container_of(tx->chan,
749 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
752 spin_lock_irqsave(&d40c->lock, flags);
754 tx->cookie = d40_assign_cookie(d40c, d40d);
756 d40_desc_queue(d40c, d40d);
758 spin_unlock_irqrestore(&d40c->lock, flags);
763 static int d40_start(struct d40_chan *d40c)
765 if (d40c->base->rev == 0) {
768 if (d40c->log_num != D40_PHY_CHAN) {
769 err = d40_channel_execute_command(d40c,
770 D40_DMA_SUSPEND_REQ);
776 if (d40c->log_num != D40_PHY_CHAN)
777 d40_config_set_event(d40c, true);
779 return d40_channel_execute_command(d40c, D40_DMA_RUN);
782 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
784 struct d40_desc *d40d;
787 /* Start queued jobs, if any */
788 d40d = d40_first_queued(d40c);
793 /* Remove from queue */
794 d40_desc_remove(d40d);
796 /* Add to active queue */
797 d40_desc_submit(d40c, d40d);
799 /* Initiate DMA job */
800 d40_desc_load(d40c, d40d);
803 err = d40_start(d40c);
812 /* called from interrupt context */
813 static void dma_tc_handle(struct d40_chan *d40c)
815 struct d40_desc *d40d;
820 /* Get first active entry from list */
821 d40d = d40_first_active_get(d40c);
826 if (d40d->lli_count < d40d->lli_len) {
828 d40_desc_load(d40c, d40d);
830 (void) d40_start(d40c);
834 if (d40_queue_start(d40c) == NULL)
838 tasklet_schedule(&d40c->tasklet);
842 static void dma_tasklet(unsigned long data)
844 struct d40_chan *d40c = (struct d40_chan *) data;
845 struct d40_desc *d40d_fin;
847 dma_async_tx_callback callback;
848 void *callback_param;
850 spin_lock_irqsave(&d40c->lock, flags);
852 /* Get first active entry from list */
853 d40d_fin = d40_first_active_get(d40c);
855 if (d40d_fin == NULL)
858 d40c->completed = d40d_fin->txd.cookie;
861 * If terminating a channel pending_tx is set to zero.
862 * This prevents any finished active jobs to return to the client.
864 if (d40c->pending_tx == 0) {
865 spin_unlock_irqrestore(&d40c->lock, flags);
869 /* Callback to client */
870 callback = d40d_fin->txd.callback;
871 callback_param = d40d_fin->txd.callback_param;
873 if (async_tx_test_ack(&d40d_fin->txd)) {
874 d40_pool_lli_free(d40d_fin);
875 d40_desc_remove(d40d_fin);
876 /* Return desc to free-list */
877 d40_desc_free(d40c, d40d_fin);
879 if (!d40d_fin->is_in_client_list) {
880 d40_desc_remove(d40d_fin);
881 list_add_tail(&d40d_fin->node, &d40c->client);
882 d40d_fin->is_in_client_list = true;
888 if (d40c->pending_tx)
889 tasklet_schedule(&d40c->tasklet);
891 spin_unlock_irqrestore(&d40c->lock, flags);
894 callback(callback_param);
899 /* Rescue manouver if receiving double interrupts */
900 if (d40c->pending_tx > 0)
902 spin_unlock_irqrestore(&d40c->lock, flags);
905 static irqreturn_t d40_handle_interrupt(int irq, void *data)
907 static const struct d40_interrupt_lookup il[] = {
908 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
909 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
910 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
911 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
912 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
913 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
914 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
915 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
916 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
917 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
921 u32 regs[ARRAY_SIZE(il)];
926 struct d40_chan *d40c;
928 struct d40_base *base = data;
930 spin_lock_irqsave(&base->interrupt_lock, flags);
932 /* Read interrupt status of both logical and physical channels */
933 for (i = 0; i < ARRAY_SIZE(il); i++)
934 regs[i] = readl(base->virtbase + il[i].src);
938 chan = find_next_bit((unsigned long *)regs,
939 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
941 /* No more set bits found? */
942 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
945 row = chan / BITS_PER_LONG;
946 idx = chan & (BITS_PER_LONG - 1);
949 tmp = readl(base->virtbase + il[row].clr);
951 writel(tmp, base->virtbase + il[row].clr);
953 if (il[row].offset == D40_PHY_CHAN)
954 d40c = base->lookup_phy_chans[idx];
956 d40c = base->lookup_log_chans[il[row].offset + idx];
957 spin_lock(&d40c->lock);
959 if (!il[row].is_error)
963 "[%s] IRQ chan: %ld offset %d idx %d\n",
964 __func__, chan, il[row].offset, idx);
966 spin_unlock(&d40c->lock);
969 spin_unlock_irqrestore(&base->interrupt_lock, flags);
975 static int d40_validate_conf(struct d40_chan *d40c,
976 struct stedma40_chan_cfg *conf)
979 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
980 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
981 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
982 == STEDMA40_CHANNEL_IN_LOG_MODE;
985 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
990 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
991 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
992 d40c->runtime_addr == 0) {
994 dev_err(&d40c->chan.dev->device,
995 "[%s] Invalid TX channel address (%d)\n",
996 __func__, conf->dst_dev_type);
1000 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1001 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1002 d40c->runtime_addr == 0) {
1003 dev_err(&d40c->chan.dev->device,
1004 "[%s] Invalid RX channel address (%d)\n",
1005 __func__, conf->src_dev_type);
1009 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1010 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1011 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1016 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1017 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1018 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1023 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1024 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1025 dev_err(&d40c->chan.dev->device,
1026 "[%s] No event line\n", __func__);
1030 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1031 (src_event_group != dst_event_group)) {
1032 dev_err(&d40c->chan.dev->device,
1033 "[%s] Invalid event group\n", __func__);
1037 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1039 * DMAC HW supports it. Will be added to this driver,
1040 * in case any dma client requires it.
1042 dev_err(&d40c->chan.dev->device,
1043 "[%s] periph to periph not supported\n",
1051 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1052 int log_event_line, bool is_log)
1054 unsigned long flags;
1055 spin_lock_irqsave(&phy->lock, flags);
1057 /* Physical interrupts are masked per physical full channel */
1058 if (phy->allocated_src == D40_ALLOC_FREE &&
1059 phy->allocated_dst == D40_ALLOC_FREE) {
1060 phy->allocated_dst = D40_ALLOC_PHY;
1061 phy->allocated_src = D40_ALLOC_PHY;
1067 /* Logical channel */
1069 if (phy->allocated_src == D40_ALLOC_PHY)
1072 if (phy->allocated_src == D40_ALLOC_FREE)
1073 phy->allocated_src = D40_ALLOC_LOG_FREE;
1075 if (!(phy->allocated_src & (1 << log_event_line))) {
1076 phy->allocated_src |= 1 << log_event_line;
1081 if (phy->allocated_dst == D40_ALLOC_PHY)
1084 if (phy->allocated_dst == D40_ALLOC_FREE)
1085 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1087 if (!(phy->allocated_dst & (1 << log_event_line))) {
1088 phy->allocated_dst |= 1 << log_event_line;
1095 spin_unlock_irqrestore(&phy->lock, flags);
1098 spin_unlock_irqrestore(&phy->lock, flags);
1102 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1105 unsigned long flags;
1106 bool is_free = false;
1108 spin_lock_irqsave(&phy->lock, flags);
1109 if (!log_event_line) {
1110 /* Physical interrupts are masked per physical full channel */
1111 phy->allocated_dst = D40_ALLOC_FREE;
1112 phy->allocated_src = D40_ALLOC_FREE;
1117 /* Logical channel */
1119 phy->allocated_src &= ~(1 << log_event_line);
1120 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1121 phy->allocated_src = D40_ALLOC_FREE;
1123 phy->allocated_dst &= ~(1 << log_event_line);
1124 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1125 phy->allocated_dst = D40_ALLOC_FREE;
1128 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1132 spin_unlock_irqrestore(&phy->lock, flags);
1137 static int d40_allocate_channel(struct d40_chan *d40c)
1142 struct d40_phy_res *phys;
1147 bool is_log = (d40c->dma_cfg.channel_type &
1148 STEDMA40_CHANNEL_IN_OPER_MODE)
1149 == STEDMA40_CHANNEL_IN_LOG_MODE;
1152 phys = d40c->base->phy_res;
1154 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1155 dev_type = d40c->dma_cfg.src_dev_type;
1156 log_num = 2 * dev_type;
1158 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1159 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1160 /* dst event lines are used for logical memcpy */
1161 dev_type = d40c->dma_cfg.dst_dev_type;
1162 log_num = 2 * dev_type + 1;
1167 event_group = D40_TYPE_TO_GROUP(dev_type);
1168 event_line = D40_TYPE_TO_EVENT(dev_type);
1171 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1172 /* Find physical half channel */
1173 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1175 if (d40_alloc_mask_set(&phys[i], is_src,
1180 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1181 int phy_num = j + event_group * 2;
1182 for (i = phy_num; i < phy_num + 2; i++) {
1183 if (d40_alloc_mask_set(&phys[i],
1192 d40c->phy_chan = &phys[i];
1193 d40c->log_num = D40_PHY_CHAN;
1199 /* Find logical channel */
1200 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1201 int phy_num = j + event_group * 2;
1203 * Spread logical channels across all available physical rather
1204 * than pack every logical channel at the first available phy
1208 for (i = phy_num; i < phy_num + 2; i++) {
1209 if (d40_alloc_mask_set(&phys[i], is_src,
1210 event_line, is_log))
1214 for (i = phy_num + 1; i >= phy_num; i--) {
1215 if (d40_alloc_mask_set(&phys[i], is_src,
1216 event_line, is_log))
1224 d40c->phy_chan = &phys[i];
1225 d40c->log_num = log_num;
1229 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1231 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1237 static int d40_config_memcpy(struct d40_chan *d40c)
1239 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1241 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1242 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1243 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1244 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1245 memcpy[d40c->chan.chan_id];
1247 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1248 dma_has_cap(DMA_SLAVE, cap)) {
1249 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1251 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1260 static int d40_free_dma(struct d40_chan *d40c)
1265 struct d40_phy_res *phy = d40c->phy_chan;
1268 struct d40_desc *_d;
1271 /* Terminate all queued and active transfers */
1274 /* Release client owned descriptors */
1275 if (!list_empty(&d40c->client))
1276 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1277 d40_pool_lli_free(d);
1279 /* Return desc to free-list */
1280 d40_desc_free(d40c, d);
1284 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1289 if (phy->allocated_src == D40_ALLOC_FREE &&
1290 phy->allocated_dst == D40_ALLOC_FREE) {
1291 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1296 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1297 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1298 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1300 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1301 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1304 dev_err(&d40c->chan.dev->device,
1305 "[%s] Unknown direction\n", __func__);
1309 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1311 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1316 if (d40c->log_num != D40_PHY_CHAN) {
1317 /* Release logical channel, deactivate the event line */
1319 d40_config_set_event(d40c, false);
1320 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1323 * Check if there are more logical allocation
1324 * on this phy channel.
1326 if (!d40_alloc_mask_free(phy, is_src, event)) {
1327 /* Resume the other logical channels if any */
1328 if (d40_chan_has_events(d40c)) {
1329 res = d40_channel_execute_command(d40c,
1332 dev_err(&d40c->chan.dev->device,
1333 "[%s] Executing RUN command\n",
1341 (void) d40_alloc_mask_free(phy, is_src, 0);
1344 /* Release physical channel */
1345 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1347 dev_err(&d40c->chan.dev->device,
1348 "[%s] Failed to stop channel\n", __func__);
1351 d40c->phy_chan = NULL;
1352 /* Invalidate channel type */
1353 d40c->dma_cfg.channel_type = 0;
1354 d40c->base->lookup_phy_chans[phy->num] = NULL;
1359 static int d40_pause(struct dma_chan *chan)
1361 struct d40_chan *d40c =
1362 container_of(chan, struct d40_chan, chan);
1364 unsigned long flags;
1366 spin_lock_irqsave(&d40c->lock, flags);
1368 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1370 if (d40c->log_num != D40_PHY_CHAN) {
1371 d40_config_set_event(d40c, false);
1372 /* Resume the other logical channels if any */
1373 if (d40_chan_has_events(d40c))
1374 res = d40_channel_execute_command(d40c,
1379 spin_unlock_irqrestore(&d40c->lock, flags);
1383 static bool d40_is_paused(struct d40_chan *d40c)
1385 bool is_paused = false;
1386 unsigned long flags;
1387 void __iomem *active_reg;
1391 spin_lock_irqsave(&d40c->lock, flags);
1393 if (d40c->log_num == D40_PHY_CHAN) {
1394 if (d40c->phy_chan->num % 2 == 0)
1395 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1397 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1399 status = (readl(active_reg) &
1400 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1401 D40_CHAN_POS(d40c->phy_chan->num);
1402 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1408 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1409 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1410 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1411 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1412 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1414 dev_err(&d40c->chan.dev->device,
1415 "[%s] Unknown direction\n", __func__);
1418 status = d40_chan_has_events(d40c);
1419 status = (status & D40_EVENTLINE_MASK(event)) >>
1420 D40_EVENTLINE_POS(event);
1422 if (status != D40_DMA_RUN)
1425 spin_unlock_irqrestore(&d40c->lock, flags);
1431 static bool d40_tx_is_linked(struct d40_chan *d40c)
1435 if (d40c->log_num != D40_PHY_CHAN)
1436 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1438 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1439 d40c->phy_chan->num * D40_DREG_PCDELTA +
1440 D40_CHAN_REG_SDLNK) &
1441 D40_SREG_LNK_PHYS_LNK_MASK;
1445 static u32 d40_residue(struct d40_chan *d40c)
1449 if (d40c->log_num != D40_PHY_CHAN)
1450 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1451 >> D40_MEM_LCSP2_ECNT_POS;
1453 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1454 d40c->phy_chan->num * D40_DREG_PCDELTA +
1455 D40_CHAN_REG_SDELT) &
1456 D40_SREG_ELEM_PHY_ECNT_MASK) >>
1457 D40_SREG_ELEM_PHY_ECNT_POS;
1458 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1461 static int d40_resume(struct dma_chan *chan)
1463 struct d40_chan *d40c =
1464 container_of(chan, struct d40_chan, chan);
1466 unsigned long flags;
1468 spin_lock_irqsave(&d40c->lock, flags);
1470 if (d40c->base->rev == 0)
1471 if (d40c->log_num != D40_PHY_CHAN) {
1472 res = d40_channel_execute_command(d40c,
1473 D40_DMA_SUSPEND_REQ);
1477 /* If bytes left to transfer or linked tx resume job */
1478 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1479 if (d40c->log_num != D40_PHY_CHAN)
1480 d40_config_set_event(d40c, true);
1481 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1485 spin_unlock_irqrestore(&d40c->lock, flags);
1489 static u32 stedma40_residue(struct dma_chan *chan)
1491 struct d40_chan *d40c =
1492 container_of(chan, struct d40_chan, chan);
1494 unsigned long flags;
1496 spin_lock_irqsave(&d40c->lock, flags);
1497 bytes_left = d40_residue(d40c);
1498 spin_unlock_irqrestore(&d40c->lock, flags);
1503 /* Public DMA functions in addition to the DMA engine framework */
1505 int stedma40_set_psize(struct dma_chan *chan,
1509 struct d40_chan *d40c =
1510 container_of(chan, struct d40_chan, chan);
1511 unsigned long flags;
1513 spin_lock_irqsave(&d40c->lock, flags);
1515 if (d40c->log_num != D40_PHY_CHAN) {
1516 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1517 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1518 d40c->log_def.lcsp1 |= src_psize <<
1519 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1520 d40c->log_def.lcsp3 |= dst_psize <<
1521 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1525 if (src_psize == STEDMA40_PSIZE_PHY_1)
1526 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1528 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1529 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1530 D40_SREG_CFG_PSIZE_POS);
1531 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1534 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1535 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1537 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1538 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1539 D40_SREG_CFG_PSIZE_POS);
1540 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1543 spin_unlock_irqrestore(&d40c->lock, flags);
1546 EXPORT_SYMBOL(stedma40_set_psize);
1548 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1549 struct scatterlist *sgl_dst,
1550 struct scatterlist *sgl_src,
1551 unsigned int sgl_len,
1552 unsigned long dma_flags)
1555 struct d40_desc *d40d;
1556 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1558 unsigned long flags;
1560 if (d40c->phy_chan == NULL) {
1561 dev_err(&d40c->chan.dev->device,
1562 "[%s] Unallocated channel.\n", __func__);
1563 return ERR_PTR(-EINVAL);
1566 spin_lock_irqsave(&d40c->lock, flags);
1567 d40d = d40_desc_get(d40c);
1572 d40d->lli_len = sgl_len;
1573 d40d->lli_tx_len = d40d->lli_len;
1574 d40d->txd.flags = dma_flags;
1576 if (d40c->log_num != D40_PHY_CHAN) {
1577 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1578 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1582 * Check if there is space available in lcla. If not,
1583 * split list into 1-length and run only in lcpa
1586 if (d40_lcla_id_get(d40c) != 0)
1587 d40d->lli_tx_len = 1;
1589 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1590 dev_err(&d40c->chan.dev->device,
1591 "[%s] Out of memory\n", __func__);
1595 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1599 d40c->log_def.lcsp1,
1600 d40c->dma_cfg.src_info.data_width,
1601 dma_flags & DMA_PREP_INTERRUPT,
1603 d40c->base->plat_data->llis_per_log);
1605 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1609 d40c->log_def.lcsp3,
1610 d40c->dma_cfg.dst_info.data_width,
1611 dma_flags & DMA_PREP_INTERRUPT,
1613 d40c->base->plat_data->llis_per_log);
1617 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1618 dev_err(&d40c->chan.dev->device,
1619 "[%s] Out of memory\n", __func__);
1623 res = d40_phy_sg_to_lli(sgl_src,
1627 d40d->lli_phy.src_addr,
1629 d40c->dma_cfg.src_info.data_width,
1630 d40c->dma_cfg.src_info.psize,
1636 res = d40_phy_sg_to_lli(sgl_dst,
1640 d40d->lli_phy.dst_addr,
1642 d40c->dma_cfg.dst_info.data_width,
1643 d40c->dma_cfg.dst_info.psize,
1649 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1650 d40d->lli_pool.size, DMA_TO_DEVICE);
1653 dma_async_tx_descriptor_init(&d40d->txd, chan);
1655 d40d->txd.tx_submit = d40_tx_submit;
1657 spin_unlock_irqrestore(&d40c->lock, flags);
1661 spin_unlock_irqrestore(&d40c->lock, flags);
1664 EXPORT_SYMBOL(stedma40_memcpy_sg);
1666 bool stedma40_filter(struct dma_chan *chan, void *data)
1668 struct stedma40_chan_cfg *info = data;
1669 struct d40_chan *d40c =
1670 container_of(chan, struct d40_chan, chan);
1674 err = d40_validate_conf(d40c, info);
1676 d40c->dma_cfg = *info;
1678 err = d40_config_memcpy(d40c);
1682 EXPORT_SYMBOL(stedma40_filter);
1684 /* DMA ENGINE functions */
1685 static int d40_alloc_chan_resources(struct dma_chan *chan)
1688 unsigned long flags;
1689 struct d40_chan *d40c =
1690 container_of(chan, struct d40_chan, chan);
1692 spin_lock_irqsave(&d40c->lock, flags);
1694 d40c->completed = chan->cookie = 1;
1697 * If no dma configuration is set (channel_type == 0)
1698 * use default configuration (memcpy)
1700 if (d40c->dma_cfg.channel_type == 0) {
1701 err = d40_config_memcpy(d40c);
1703 dev_err(&d40c->chan.dev->device,
1704 "[%s] Failed to configure memcpy channel\n",
1709 is_free_phy = (d40c->phy_chan == NULL);
1711 err = d40_allocate_channel(d40c);
1713 dev_err(&d40c->chan.dev->device,
1714 "[%s] Failed to allocate channel\n", __func__);
1718 /* Fill in basic CFG register values */
1719 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1720 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1722 if (d40c->log_num != D40_PHY_CHAN) {
1723 d40_log_cfg(&d40c->dma_cfg,
1724 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1726 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1727 d40c->lcpa = d40c->base->lcpa_base +
1728 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1730 d40c->lcpa = d40c->base->lcpa_base +
1731 d40c->dma_cfg.dst_dev_type *
1732 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1736 * Only write channel configuration to the DMA if the physical
1737 * resource is free. In case of multiple logical channels
1738 * on the same physical resource, only the first write is necessary.
1741 err = d40_config_write(d40c);
1743 dev_err(&d40c->chan.dev->device,
1744 "[%s] Failed to configure channel\n",
1749 spin_unlock_irqrestore(&d40c->lock, flags);
1753 static void d40_free_chan_resources(struct dma_chan *chan)
1755 struct d40_chan *d40c =
1756 container_of(chan, struct d40_chan, chan);
1758 unsigned long flags;
1760 if (d40c->phy_chan == NULL) {
1761 dev_err(&d40c->chan.dev->device,
1762 "[%s] Cannot free unallocated channel\n", __func__);
1767 spin_lock_irqsave(&d40c->lock, flags);
1769 err = d40_free_dma(d40c);
1772 dev_err(&d40c->chan.dev->device,
1773 "[%s] Failed to free channel\n", __func__);
1774 spin_unlock_irqrestore(&d40c->lock, flags);
1777 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1781 unsigned long dma_flags)
1783 struct d40_desc *d40d;
1784 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1786 unsigned long flags;
1789 if (d40c->phy_chan == NULL) {
1790 dev_err(&d40c->chan.dev->device,
1791 "[%s] Channel is not allocated.\n", __func__);
1792 return ERR_PTR(-EINVAL);
1795 spin_lock_irqsave(&d40c->lock, flags);
1796 d40d = d40_desc_get(d40c);
1799 dev_err(&d40c->chan.dev->device,
1800 "[%s] Descriptor is NULL\n", __func__);
1804 d40d->txd.flags = dma_flags;
1806 dma_async_tx_descriptor_init(&d40d->txd, chan);
1808 d40d->txd.tx_submit = d40_tx_submit;
1810 if (d40c->log_num != D40_PHY_CHAN) {
1812 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1813 dev_err(&d40c->chan.dev->device,
1814 "[%s] Out of memory\n", __func__);
1818 d40d->lli_tx_len = 1;
1820 d40_log_fill_lli(d40d->lli_log.src,
1824 d40c->log_def.lcsp1,
1825 d40c->dma_cfg.src_info.data_width,
1828 d40_log_fill_lli(d40d->lli_log.dst,
1832 d40c->log_def.lcsp3,
1833 d40c->dma_cfg.dst_info.data_width,
1838 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1839 dev_err(&d40c->chan.dev->device,
1840 "[%s] Out of memory\n", __func__);
1844 err = d40_phy_fill_lli(d40d->lli_phy.src,
1847 d40c->dma_cfg.src_info.psize,
1851 d40c->dma_cfg.src_info.data_width,
1856 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1859 d40c->dma_cfg.dst_info.psize,
1863 d40c->dma_cfg.dst_info.data_width,
1869 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1870 d40d->lli_pool.size, DMA_TO_DEVICE);
1873 spin_unlock_irqrestore(&d40c->lock, flags);
1877 dev_err(&d40c->chan.dev->device,
1878 "[%s] Failed filling in PHY LLI\n", __func__);
1879 d40_pool_lli_free(d40d);
1881 spin_unlock_irqrestore(&d40c->lock, flags);
1885 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1886 struct d40_chan *d40c,
1887 struct scatterlist *sgl,
1888 unsigned int sg_len,
1889 enum dma_data_direction direction,
1890 unsigned long dma_flags)
1892 dma_addr_t dev_addr = 0;
1895 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1896 dev_err(&d40c->chan.dev->device,
1897 "[%s] Out of memory\n", __func__);
1901 d40d->lli_len = sg_len;
1902 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1903 d40d->lli_tx_len = d40d->lli_len;
1905 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1909 * Check if there is space available in lcla.
1910 * If not, split list into 1-length and run only
1913 if (d40_lcla_id_get(d40c) != 0)
1914 d40d->lli_tx_len = 1;
1916 if (direction == DMA_FROM_DEVICE)
1917 if (d40c->runtime_addr)
1918 dev_addr = d40c->runtime_addr;
1920 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1921 else if (direction == DMA_TO_DEVICE)
1922 if (d40c->runtime_addr)
1923 dev_addr = d40c->runtime_addr;
1925 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1930 total_size = d40_log_sg_to_dev(&d40c->lcla,
1934 d40c->dma_cfg.src_info.data_width,
1935 d40c->dma_cfg.dst_info.data_width,
1937 dma_flags & DMA_PREP_INTERRUPT,
1938 dev_addr, d40d->lli_tx_len,
1939 d40c->base->plat_data->llis_per_log);
1947 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1948 struct d40_chan *d40c,
1949 struct scatterlist *sgl,
1950 unsigned int sgl_len,
1951 enum dma_data_direction direction,
1952 unsigned long dma_flags)
1954 dma_addr_t src_dev_addr;
1955 dma_addr_t dst_dev_addr;
1958 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1959 dev_err(&d40c->chan.dev->device,
1960 "[%s] Out of memory\n", __func__);
1964 d40d->lli_len = sgl_len;
1965 d40d->lli_tx_len = sgl_len;
1967 if (direction == DMA_FROM_DEVICE) {
1969 if (d40c->runtime_addr)
1970 src_dev_addr = d40c->runtime_addr;
1972 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1973 } else if (direction == DMA_TO_DEVICE) {
1974 if (d40c->runtime_addr)
1975 dst_dev_addr = d40c->runtime_addr;
1977 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1982 res = d40_phy_sg_to_lli(sgl,
1986 d40d->lli_phy.src_addr,
1988 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.src_info.psize,
1994 res = d40_phy_sg_to_lli(sgl,
1998 d40d->lli_phy.dst_addr,
2000 d40c->dma_cfg.dst_info.data_width,
2001 d40c->dma_cfg.dst_info.psize,
2006 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2007 d40d->lli_pool.size, DMA_TO_DEVICE);
2011 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2012 struct scatterlist *sgl,
2013 unsigned int sg_len,
2014 enum dma_data_direction direction,
2015 unsigned long dma_flags)
2017 struct d40_desc *d40d;
2018 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2020 unsigned long flags;
2023 if (d40c->phy_chan == NULL) {
2024 dev_err(&d40c->chan.dev->device,
2025 "[%s] Cannot prepare unallocated channel\n", __func__);
2026 return ERR_PTR(-EINVAL);
2029 if (d40c->dma_cfg.pre_transfer)
2030 d40c->dma_cfg.pre_transfer(chan,
2031 d40c->dma_cfg.pre_transfer_data,
2034 spin_lock_irqsave(&d40c->lock, flags);
2035 d40d = d40_desc_get(d40c);
2036 spin_unlock_irqrestore(&d40c->lock, flags);
2041 if (d40c->log_num != D40_PHY_CHAN)
2042 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2043 direction, dma_flags);
2045 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2046 direction, dma_flags);
2048 dev_err(&d40c->chan.dev->device,
2049 "[%s] Failed to prepare %s slave sg job: %d\n",
2051 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2055 d40d->txd.flags = dma_flags;
2057 dma_async_tx_descriptor_init(&d40d->txd, chan);
2059 d40d->txd.tx_submit = d40_tx_submit;
2064 static enum dma_status d40_tx_status(struct dma_chan *chan,
2065 dma_cookie_t cookie,
2066 struct dma_tx_state *txstate)
2068 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2069 dma_cookie_t last_used;
2070 dma_cookie_t last_complete;
2073 if (d40c->phy_chan == NULL) {
2074 dev_err(&d40c->chan.dev->device,
2075 "[%s] Cannot read status of unallocated channel\n",
2080 last_complete = d40c->completed;
2081 last_used = chan->cookie;
2083 if (d40_is_paused(d40c))
2086 ret = dma_async_is_complete(cookie, last_complete, last_used);
2088 dma_set_tx_state(txstate, last_complete, last_used,
2089 stedma40_residue(chan));
2094 static void d40_issue_pending(struct dma_chan *chan)
2096 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2097 unsigned long flags;
2099 if (d40c->phy_chan == NULL) {
2100 dev_err(&d40c->chan.dev->device,
2101 "[%s] Channel is not allocated!\n", __func__);
2105 spin_lock_irqsave(&d40c->lock, flags);
2107 /* Busy means that pending jobs are already being processed */
2109 (void) d40_queue_start(d40c);
2111 spin_unlock_irqrestore(&d40c->lock, flags);
2114 /* Runtime reconfiguration extension */
2115 static void d40_set_runtime_config(struct dma_chan *chan,
2116 struct dma_slave_config *config)
2118 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2119 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2120 enum dma_slave_buswidth config_addr_width;
2121 dma_addr_t config_addr;
2122 u32 config_maxburst;
2123 enum stedma40_periph_data_width addr_width;
2126 if (config->direction == DMA_FROM_DEVICE) {
2127 dma_addr_t dev_addr_rx =
2128 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2130 config_addr = config->src_addr;
2132 dev_dbg(d40c->base->dev,
2133 "channel has a pre-wired RX address %08x "
2134 "overriding with %08x\n",
2135 dev_addr_rx, config_addr);
2136 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2137 dev_dbg(d40c->base->dev,
2138 "channel was not configured for peripheral "
2139 "to memory transfer (%d) overriding\n",
2141 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2143 config_addr_width = config->src_addr_width;
2144 config_maxburst = config->src_maxburst;
2146 } else if (config->direction == DMA_TO_DEVICE) {
2147 dma_addr_t dev_addr_tx =
2148 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2150 config_addr = config->dst_addr;
2152 dev_dbg(d40c->base->dev,
2153 "channel has a pre-wired TX address %08x "
2154 "overriding with %08x\n",
2155 dev_addr_tx, config_addr);
2156 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2157 dev_dbg(d40c->base->dev,
2158 "channel was not configured for memory "
2159 "to peripheral transfer (%d) overriding\n",
2161 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2163 config_addr_width = config->dst_addr_width;
2164 config_maxburst = config->dst_maxburst;
2167 dev_err(d40c->base->dev,
2168 "unrecognized channel direction %d\n",
2173 switch (config_addr_width) {
2174 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2175 addr_width = STEDMA40_BYTE_WIDTH;
2177 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2178 addr_width = STEDMA40_HALFWORD_WIDTH;
2180 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2181 addr_width = STEDMA40_WORD_WIDTH;
2183 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2184 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2187 dev_err(d40c->base->dev,
2188 "illegal peripheral address width "
2190 config->src_addr_width);
2194 if (config_maxburst >= 16)
2195 psize = STEDMA40_PSIZE_LOG_16;
2196 else if (config_maxburst >= 8)
2197 psize = STEDMA40_PSIZE_LOG_8;
2198 else if (config_maxburst >= 4)
2199 psize = STEDMA40_PSIZE_LOG_4;
2201 psize = STEDMA40_PSIZE_LOG_1;
2203 /* Set up all the endpoint configs */
2204 cfg->src_info.data_width = addr_width;
2205 cfg->src_info.psize = psize;
2206 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
2207 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2208 cfg->dst_info.data_width = addr_width;
2209 cfg->dst_info.psize = psize;
2210 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2211 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2213 /* These settings will take precedence later */
2214 d40c->runtime_addr = config_addr;
2215 d40c->runtime_direction = config->direction;
2216 dev_dbg(d40c->base->dev,
2217 "configured channel %s for %s, data width %d, "
2218 "maxburst %d bytes, LE, no flow control\n",
2219 dma_chan_name(chan),
2220 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2225 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2228 unsigned long flags;
2229 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2231 if (d40c->phy_chan == NULL) {
2232 dev_err(&d40c->chan.dev->device,
2233 "[%s] Channel is not allocated!\n", __func__);
2238 case DMA_TERMINATE_ALL:
2239 spin_lock_irqsave(&d40c->lock, flags);
2241 spin_unlock_irqrestore(&d40c->lock, flags);
2244 return d40_pause(chan);
2246 return d40_resume(chan);
2247 case DMA_SLAVE_CONFIG:
2248 d40_set_runtime_config(chan,
2249 (struct dma_slave_config *) arg);
2255 /* Other commands are unimplemented */
2259 /* Initialization functions */
2261 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2262 struct d40_chan *chans, int offset,
2266 struct d40_chan *d40c;
2268 INIT_LIST_HEAD(&dma->channels);
2270 for (i = offset; i < offset + num_chans; i++) {
2273 d40c->chan.device = dma;
2275 /* Invalidate lcla element */
2276 d40c->lcla.src_id = -1;
2277 d40c->lcla.dst_id = -1;
2279 spin_lock_init(&d40c->lock);
2281 d40c->log_num = D40_PHY_CHAN;
2283 INIT_LIST_HEAD(&d40c->active);
2284 INIT_LIST_HEAD(&d40c->queue);
2285 INIT_LIST_HEAD(&d40c->client);
2287 tasklet_init(&d40c->tasklet, dma_tasklet,
2288 (unsigned long) d40c);
2290 list_add_tail(&d40c->chan.device_node,
2295 static int __init d40_dmaengine_init(struct d40_base *base,
2296 int num_reserved_chans)
2300 d40_chan_init(base, &base->dma_slave, base->log_chans,
2301 0, base->num_log_chans);
2303 dma_cap_zero(base->dma_slave.cap_mask);
2304 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2306 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2307 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2308 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2309 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2310 base->dma_slave.device_tx_status = d40_tx_status;
2311 base->dma_slave.device_issue_pending = d40_issue_pending;
2312 base->dma_slave.device_control = d40_control;
2313 base->dma_slave.dev = base->dev;
2315 err = dma_async_device_register(&base->dma_slave);
2319 "[%s] Failed to register slave channels\n",
2324 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2325 base->num_log_chans, base->plat_data->memcpy_len);
2327 dma_cap_zero(base->dma_memcpy.cap_mask);
2328 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2330 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2331 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2332 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2333 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2334 base->dma_memcpy.device_tx_status = d40_tx_status;
2335 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2336 base->dma_memcpy.device_control = d40_control;
2337 base->dma_memcpy.dev = base->dev;
2339 * This controller can only access address at even
2340 * 32bit boundaries, i.e. 2^2
2342 base->dma_memcpy.copy_align = 2;
2344 err = dma_async_device_register(&base->dma_memcpy);
2348 "[%s] Failed to regsiter memcpy only channels\n",
2353 d40_chan_init(base, &base->dma_both, base->phy_chans,
2354 0, num_reserved_chans);
2356 dma_cap_zero(base->dma_both.cap_mask);
2357 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2358 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2360 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2361 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2362 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2363 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2364 base->dma_both.device_tx_status = d40_tx_status;
2365 base->dma_both.device_issue_pending = d40_issue_pending;
2366 base->dma_both.device_control = d40_control;
2367 base->dma_both.dev = base->dev;
2368 base->dma_both.copy_align = 2;
2369 err = dma_async_device_register(&base->dma_both);
2373 "[%s] Failed to register logical and physical capable channels\n",
2379 dma_async_device_unregister(&base->dma_memcpy);
2381 dma_async_device_unregister(&base->dma_slave);
2386 /* Initialization functions. */
2388 static int __init d40_phy_res_init(struct d40_base *base)
2391 int num_phy_chans_avail = 0;
2393 int odd_even_bit = -2;
2395 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2396 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2398 for (i = 0; i < base->num_phy_chans; i++) {
2399 base->phy_res[i].num = i;
2400 odd_even_bit += 2 * ((i % 2) == 0);
2401 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2402 /* Mark security only channels as occupied */
2403 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2404 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2406 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2407 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2408 num_phy_chans_avail++;
2410 spin_lock_init(&base->phy_res[i].lock);
2413 /* Mark disabled channels as occupied */
2414 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2415 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2416 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2417 num_phy_chans_avail--;
2420 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2421 num_phy_chans_avail, base->num_phy_chans);
2423 /* Verify settings extended vs standard */
2424 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2426 for (i = 0; i < base->num_phy_chans; i++) {
2428 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2429 (val[0] & 0x3) != 1)
2431 "[%s] INFO: channel %d is misconfigured (%d)\n",
2432 __func__, i, val[0] & 0x3);
2434 val[0] = val[0] >> 2;
2437 return num_phy_chans_avail;
2440 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2442 static const struct d40_reg_val dma_id_regs[] = {
2444 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2445 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2447 * D40_DREG_PERIPHID2 Depends on HW revision:
2448 * MOP500/HREF ED has 0x0008,
2450 * HREF V1 has 0x0028
2452 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2455 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2456 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2457 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2458 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2460 struct stedma40_platform_data *plat_data;
2461 struct clk *clk = NULL;
2462 void __iomem *virtbase = NULL;
2463 struct resource *res = NULL;
2464 struct d40_base *base = NULL;
2465 int num_log_chans = 0;
2470 clk = clk_get(&pdev->dev, NULL);
2473 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2480 /* Get IO for DMAC base address */
2481 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2485 if (request_mem_region(res->start, resource_size(res),
2486 D40_NAME " I/O base") == NULL)
2489 virtbase = ioremap(res->start, resource_size(res));
2493 /* HW version check */
2494 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2495 if (dma_id_regs[i].val !=
2496 readl(virtbase + dma_id_regs[i].reg)) {
2498 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2502 readl(virtbase + dma_id_regs[i].reg));
2507 /* Get silicon revision */
2508 val = readl(virtbase + D40_DREG_PERIPHID2);
2510 if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
2512 "[%s] Unknown designer! Got %x wanted %x\n",
2513 __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
2517 /* The number of physical channels on this HW */
2518 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2520 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2521 (val >> 4) & 0xf, res->start);
2523 plat_data = pdev->dev.platform_data;
2525 /* Count the number of logical channels in use */
2526 for (i = 0; i < plat_data->dev_len; i++)
2527 if (plat_data->dev_rx[i] != 0)
2530 for (i = 0; i < plat_data->dev_len; i++)
2531 if (plat_data->dev_tx[i] != 0)
2534 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2535 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2536 sizeof(struct d40_chan), GFP_KERNEL);
2539 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2543 base->rev = (val >> 4) & 0xf;
2545 base->num_phy_chans = num_phy_chans;
2546 base->num_log_chans = num_log_chans;
2547 base->phy_start = res->start;
2548 base->phy_size = resource_size(res);
2549 base->virtbase = virtbase;
2550 base->plat_data = plat_data;
2551 base->dev = &pdev->dev;
2552 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2553 base->log_chans = &base->phy_chans[num_phy_chans];
2555 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2560 base->lookup_phy_chans = kzalloc(num_phy_chans *
2561 sizeof(struct d40_chan *),
2563 if (!base->lookup_phy_chans)
2566 if (num_log_chans + plat_data->memcpy_len) {
2568 * The max number of logical channels are event lines for all
2569 * src devices and dst devices
2571 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2572 sizeof(struct d40_chan *),
2574 if (!base->lookup_log_chans)
2577 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2579 if (!base->lcla_pool.alloc_map)
2582 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2583 0, SLAB_HWCACHE_ALIGN,
2585 if (base->desc_slab == NULL)
2598 release_mem_region(res->start,
2599 resource_size(res));
2604 kfree(base->lcla_pool.alloc_map);
2605 kfree(base->lookup_log_chans);
2606 kfree(base->lookup_phy_chans);
2607 kfree(base->phy_res);
2614 static void __init d40_hw_init(struct d40_base *base)
2617 static const struct d40_reg_val dma_init_reg[] = {
2618 /* Clock every part of the DMA block from start */
2619 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2621 /* Interrupts on all logical channels */
2622 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2623 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2624 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2625 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2626 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2627 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2628 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2629 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2630 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2631 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2632 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2633 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2636 u32 prmseo[2] = {0, 0};
2637 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2641 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2642 writel(dma_init_reg[i].val,
2643 base->virtbase + dma_init_reg[i].reg);
2645 /* Configure all our dma channels to default settings */
2646 for (i = 0; i < base->num_phy_chans; i++) {
2648 activeo[i % 2] = activeo[i % 2] << 2;
2650 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2652 activeo[i % 2] |= 3;
2656 /* Enable interrupt # */
2657 pcmis = (pcmis << 1) | 1;
2659 /* Clear interrupt # */
2660 pcicr = (pcicr << 1) | 1;
2662 /* Set channel to physical mode */
2663 prmseo[i % 2] = prmseo[i % 2] << 2;
2668 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2669 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2670 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2671 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2673 /* Write which interrupt to enable */
2674 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2676 /* Write which interrupt to clear */
2677 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2681 static int __init d40_lcla_allocate(struct d40_base *base)
2683 unsigned long *page_list;
2688 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2689 * To full fill this hardware requirement without wasting 256 kb
2690 * we allocate pages until we get an aligned one.
2692 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2700 /* Calculating how many pages that are required */
2701 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2703 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2704 page_list[i] = __get_free_pages(GFP_KERNEL,
2705 base->lcla_pool.pages);
2706 if (!page_list[i]) {
2709 "[%s] Failed to allocate %d pages.\n",
2710 __func__, base->lcla_pool.pages);
2712 for (j = 0; j < i; j++)
2713 free_pages(page_list[j], base->lcla_pool.pages);
2717 if ((virt_to_phys((void *)page_list[i]) &
2718 (LCLA_ALIGNMENT - 1)) == 0)
2722 for (j = 0; j < i; j++)
2723 free_pages(page_list[j], base->lcla_pool.pages);
2725 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2726 base->lcla_pool.base = (void *)page_list[i];
2728 /* After many attempts, no succees with finding the correct
2729 * alignment try with allocating a big buffer */
2731 "[%s] Failed to get %d pages @ 18 bit align.\n",
2732 __func__, base->lcla_pool.pages);
2733 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2734 base->num_phy_chans +
2737 if (!base->lcla_pool.base_unaligned) {
2742 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2746 writel(virt_to_phys(base->lcla_pool.base),
2747 base->virtbase + D40_DREG_LCLA);
2753 static int __init d40_probe(struct platform_device *pdev)
2757 struct d40_base *base;
2758 struct resource *res = NULL;
2759 int num_reserved_chans;
2762 base = d40_hw_detect_init(pdev);
2767 num_reserved_chans = d40_phy_res_init(base);
2769 platform_set_drvdata(pdev, base);
2771 spin_lock_init(&base->interrupt_lock);
2772 spin_lock_init(&base->execmd_lock);
2774 /* Get IO for logical channel parameter address */
2775 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2779 "[%s] No \"lcpa\" memory resource\n",
2783 base->lcpa_size = resource_size(res);
2784 base->phy_lcpa = res->start;
2786 if (request_mem_region(res->start, resource_size(res),
2787 D40_NAME " I/O lcpa") == NULL) {
2790 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2791 __func__, res->start, res->end);
2795 /* We make use of ESRAM memory for this. */
2796 val = readl(base->virtbase + D40_DREG_LCPA);
2797 if (res->start != val && val != 0) {
2798 dev_warn(&pdev->dev,
2799 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2800 __func__, val, res->start);
2802 writel(res->start, base->virtbase + D40_DREG_LCPA);
2804 base->lcpa_base = ioremap(res->start, resource_size(res));
2805 if (!base->lcpa_base) {
2808 "[%s] Failed to ioremap LCPA region\n",
2813 ret = d40_lcla_allocate(base);
2815 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
2820 spin_lock_init(&base->lcla_pool.lock);
2822 base->lcla_pool.num_blocks = base->num_phy_chans;
2824 base->irq = platform_get_irq(pdev, 0);
2826 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2829 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2833 err = d40_dmaengine_init(base, num_reserved_chans);
2839 dev_info(base->dev, "initialized\n");
2844 if (base->desc_slab)
2845 kmem_cache_destroy(base->desc_slab);
2847 iounmap(base->virtbase);
2848 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2849 free_pages((unsigned long)base->lcla_pool.base,
2850 base->lcla_pool.pages);
2851 if (base->lcla_pool.base_unaligned)
2852 kfree(base->lcla_pool.base_unaligned);
2854 release_mem_region(base->phy_lcpa,
2856 if (base->phy_start)
2857 release_mem_region(base->phy_start,
2860 clk_disable(base->clk);
2864 kfree(base->lcla_pool.alloc_map);
2865 kfree(base->lookup_log_chans);
2866 kfree(base->lookup_phy_chans);
2867 kfree(base->phy_res);
2871 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2875 static struct platform_driver d40_driver = {
2877 .owner = THIS_MODULE,
2882 int __init stedma40_init(void)
2884 return platform_driver_probe(&d40_driver, d40_probe);
2886 arch_initcall(stedma40_init);