2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/amba/bus.h>
20 #include <plat/ste_dma40.h>
22 #include "ste_dma40_ll.h"
24 #define D40_NAME "dma40"
26 #define D40_PHY_CHAN -1
28 /* For masking out/in 2 bit channel positions */
29 #define D40_CHAN_POS(chan) (2 * (chan / 2))
30 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
32 /* Maximum iterations taken before giving up suspending a channel */
33 #define D40_SUSPEND_MAX_IT 500
35 /* Hardware requirement on LCLA alignment */
36 #define LCLA_ALIGNMENT 0x40000
38 /* Max number of links per event group */
39 #define D40_LCLA_LINK_PER_EVENT_GRP 128
40 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
42 /* Attempts before giving up to trying to get pages that are aligned */
43 #define MAX_LCLA_ALLOC_ATTEMPTS 256
45 /* Bit markings for allocation map */
46 #define D40_ALLOC_FREE (1 << 31)
47 #define D40_ALLOC_PHY (1 << 30)
48 #define D40_ALLOC_LOG_FREE 0
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ = 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @dma_addr: DMA address, if mapped
72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
74 * one buffer to one buffer.
80 /* Space for dst and src, plus an extra for padding */
81 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
85 * struct d40_desc - A descriptor is one DMA job.
87 * @lli_phy: LLI settings for physical channel. Both src and dst=
88 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
90 * @lli_log: Same as above but for logical channels.
91 * @lli_pool: The pool with two entries pre-allocated.
92 * @lli_len: Number of llis of current descriptor.
93 * @lli_current: Number of transferred llis.
94 * @lcla_alloc: Number of LCLA entries allocated.
95 * @txd: DMA engine struct. Used for among other things for communication
98 * @is_in_client_list: true if the client owns this descriptor.
101 * This descriptor is used for both logical and physical transfers.
105 struct d40_phy_lli_bidir lli_phy;
107 struct d40_log_lli_bidir lli_log;
109 struct d40_lli_pool lli_pool;
114 struct dma_async_tx_descriptor txd;
115 struct list_head node;
117 bool is_in_client_list;
122 * struct d40_lcla_pool - LCLA pool settings and data.
124 * @base: The virtual address of LCLA. 18 bit aligned.
125 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
126 * This pointer is only there for clean-up on error.
127 * @pages: The number of pages needed for all physical channels.
128 * Only used later for clean-up on error
129 * @lock: Lock to protect the content in this struct.
130 * @alloc_map: big map over which LCLA entry is own by which job.
132 struct d40_lcla_pool {
135 void *base_unaligned;
138 struct d40_desc **alloc_map;
142 * struct d40_phy_res - struct for handling eventlines mapped to physical
145 * @lock: A lock protection this entity.
146 * @num: The physical channel number of this entity.
147 * @allocated_src: Bit mapped to show which src event line's are mapped to
148 * this physical channel. Can also be free or physically allocated.
149 * @allocated_dst: Same as for src but is dst.
150 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
163 * struct d40_chan - Struct that describes a channel.
165 * @lock: A spinlock to protect this struct.
166 * @log_num: The logical number, if any of this channel.
167 * @completed: Starts with 1, after first interrupt it is set to dma engine's
169 * @pending_tx: The number of pending transfers. Used between interrupt handler
171 * @busy: Set to true when transfer is ongoing on this channel.
172 * @phy_chan: Pointer to physical channel which this instance runs on. If this
173 * point is NULL, then the channel is not allocated.
174 * @chan: DMA engine handle.
175 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
176 * transfer and call client callback.
177 * @client: Cliented owned descriptor list.
178 * @pending_queue: Submitted jobs, to be issued by issue_pending()
179 * @active: Active descriptor.
180 * @queue: Queued jobs.
181 * @prepare_queue: Prepared jobs.
182 * @dma_cfg: The client configuration of this dma channel.
183 * @configured: whether the dma_cfg configuration is valid
184 * @base: Pointer to the device instance struct.
185 * @src_def_cfg: Default cfg register setting for src.
186 * @dst_def_cfg: Default cfg register setting for dst.
187 * @log_def: Default logical channel settings.
188 * @lcla: Space for one dst src pair for logical channel transfers.
189 * @lcpa: Pointer to dst and src lcpa settings.
190 * @runtime_addr: runtime configured address.
191 * @runtime_direction: runtime configured direction.
193 * This struct can either "be" a logical or a physical channel.
198 /* ID of the most recent completed transfer */
202 struct d40_phy_res *phy_chan;
203 struct dma_chan chan;
204 struct tasklet_struct tasklet;
205 struct list_head client;
206 struct list_head pending_queue;
207 struct list_head active;
208 struct list_head queue;
209 struct list_head prepare_queue;
210 struct stedma40_chan_cfg dma_cfg;
212 struct d40_base *base;
213 /* Default register configurations */
216 struct d40_def_lcsp log_def;
217 struct d40_log_lli_full *lcpa;
218 /* Runtime reconfiguration */
219 dma_addr_t runtime_addr;
220 enum dma_data_direction runtime_direction;
224 * struct d40_base - The big global struct, one for each probe'd instance.
226 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
227 * @execmd_lock: Lock for execute command usage since several channels share
228 * the same physical register.
229 * @dev: The device structure.
230 * @virtbase: The virtual base address of the DMA's register.
231 * @rev: silicon revision detected.
232 * @clk: Pointer to the DMA clock structure.
233 * @phy_start: Physical memory start of the DMA registers.
234 * @phy_size: Size of the DMA register map.
235 * @irq: The IRQ number.
236 * @num_phy_chans: The number of physical channels. Read from HW. This
237 * is the number of available channels for this driver, not counting "Secure
238 * mode" allocated physical channels.
239 * @num_log_chans: The number of logical channels. Calculated from
241 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
242 * @dma_slave: dma_device channels that can do only do slave transfers.
243 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
244 * @log_chans: Room for all possible logical channels in system.
245 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
246 * to log_chans entries.
247 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
248 * to phy_chans entries.
249 * @plat_data: Pointer to provided platform_data which is the driver
251 * @phy_res: Vector containing all physical channels.
252 * @lcla_pool: lcla pool settings and data.
253 * @lcpa_base: The virtual mapped address of LCPA.
254 * @phy_lcpa: The physical address of the LCPA.
255 * @lcpa_size: The size of the LCPA area.
256 * @desc_slab: cache for descriptors.
259 spinlock_t interrupt_lock;
260 spinlock_t execmd_lock;
262 void __iomem *virtbase;
265 phys_addr_t phy_start;
266 resource_size_t phy_size;
270 struct dma_device dma_both;
271 struct dma_device dma_slave;
272 struct dma_device dma_memcpy;
273 struct d40_chan *phy_chans;
274 struct d40_chan *log_chans;
275 struct d40_chan **lookup_log_chans;
276 struct d40_chan **lookup_phy_chans;
277 struct stedma40_platform_data *plat_data;
278 /* Physical half channels */
279 struct d40_phy_res *phy_res;
280 struct d40_lcla_pool lcla_pool;
283 resource_size_t lcpa_size;
284 struct kmem_cache *desc_slab;
288 * struct d40_interrupt_lookup - lookup table for interrupt handler
290 * @src: Interrupt mask register.
291 * @clr: Interrupt clear register.
292 * @is_error: true if this is an error interrupt.
293 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
294 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
296 struct d40_interrupt_lookup {
304 * struct d40_reg_val - simple lookup struct
306 * @reg: The register.
307 * @val: The value that belongs to the register in reg.
314 static struct device *chan2dev(struct d40_chan *d40c)
316 return &d40c->chan.dev->device;
319 static bool chan_is_physical(struct d40_chan *chan)
321 return chan->log_num == D40_PHY_CHAN;
324 static bool chan_is_logical(struct d40_chan *chan)
326 return !chan_is_physical(chan);
329 static void __iomem *chan_base(struct d40_chan *chan)
331 return chan->base->virtbase + D40_DREG_PCBASE +
332 chan->phy_chan->num * D40_DREG_PCDELTA;
335 #define d40_err(dev, format, arg...) \
336 dev_err(dev, "[%s] " format, __func__, ## arg)
338 #define chan_err(d40c, format, arg...) \
339 d40_err(chan2dev(d40c), format, ## arg)
341 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
344 bool is_log = chan_is_logical(d40c);
349 align = sizeof(struct d40_log_lli);
351 align = sizeof(struct d40_phy_lli);
354 base = d40d->lli_pool.pre_alloc_lli;
355 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
356 d40d->lli_pool.base = NULL;
358 d40d->lli_pool.size = lli_len * 2 * align;
360 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
361 d40d->lli_pool.base = base;
363 if (d40d->lli_pool.base == NULL)
368 d40d->lli_log.src = PTR_ALIGN(base, align);
369 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
371 d40d->lli_pool.dma_addr = 0;
373 d40d->lli_phy.src = PTR_ALIGN(base, align);
374 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
376 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
381 if (dma_mapping_error(d40c->base->dev,
382 d40d->lli_pool.dma_addr)) {
383 kfree(d40d->lli_pool.base);
384 d40d->lli_pool.base = NULL;
385 d40d->lli_pool.dma_addr = 0;
393 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
395 if (d40d->lli_pool.dma_addr)
396 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
397 d40d->lli_pool.size, DMA_TO_DEVICE);
399 kfree(d40d->lli_pool.base);
400 d40d->lli_pool.base = NULL;
401 d40d->lli_pool.size = 0;
402 d40d->lli_log.src = NULL;
403 d40d->lli_log.dst = NULL;
404 d40d->lli_phy.src = NULL;
405 d40d->lli_phy.dst = NULL;
408 static int d40_lcla_alloc_one(struct d40_chan *d40c,
409 struct d40_desc *d40d)
416 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
418 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
421 * Allocate both src and dst at the same time, therefore the half
422 * start on 1 since 0 can't be used since zero is used as end marker.
424 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
425 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
426 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
433 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
438 static int d40_lcla_free_all(struct d40_chan *d40c,
439 struct d40_desc *d40d)
445 if (chan_is_physical(d40c))
448 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
450 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
451 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
452 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
453 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
454 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
456 if (d40d->lcla_alloc == 0) {
463 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
469 static void d40_desc_remove(struct d40_desc *d40d)
471 list_del(&d40d->node);
474 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
476 struct d40_desc *desc = NULL;
478 if (!list_empty(&d40c->client)) {
482 list_for_each_entry_safe(d, _d, &d40c->client, node)
483 if (async_tx_test_ack(&d->txd)) {
486 memset(desc, 0, sizeof(*desc));
492 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
495 INIT_LIST_HEAD(&desc->node);
500 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
503 d40_pool_lli_free(d40c, d40d);
504 d40_lcla_free_all(d40c, d40d);
505 kmem_cache_free(d40c->base->desc_slab, d40d);
508 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
510 list_add_tail(&desc->node, &d40c->active);
513 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
515 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
516 struct d40_phy_lli *lli_src = desc->lli_phy.src;
517 void __iomem *base = chan_base(chan);
519 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
520 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
521 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
522 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
524 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
525 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
526 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
527 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
530 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
532 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
533 struct d40_log_lli_bidir *lli = &desc->lli_log;
534 int lli_current = desc->lli_current;
535 int lli_len = desc->lli_len;
536 bool cyclic = desc->cyclic;
537 int curr_lcla = -EINVAL;
542 * We may have partially running cyclic transfers, in case we did't get
543 * enough LCLA entries.
545 linkback = cyclic && lli_current == 0;
548 * For linkback, we need one LCLA even with only one link, because we
549 * can't link back to the one in LCPA space
551 if (linkback || (lli_len - lli_current > 1)) {
552 curr_lcla = d40_lcla_alloc_one(chan, desc);
553 first_lcla = curr_lcla;
557 * For linkback, we normally load the LCPA in the loop since we need to
558 * link it to the second LCLA and not the first. However, if we
559 * couldn't even get a first LCLA, then we have to run in LCPA and
562 if (!linkback || curr_lcla == -EINVAL) {
563 unsigned int flags = 0;
565 if (curr_lcla == -EINVAL)
566 flags |= LLI_TERM_INT;
568 d40_log_lli_lcpa_write(chan->lcpa,
569 &lli->dst[lli_current],
570 &lli->src[lli_current],
579 for (; lli_current < lli_len; lli_current++) {
580 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
582 struct d40_log_lli *lcla = pool->base + lcla_offset;
583 unsigned int flags = 0;
586 if (lli_current + 1 < lli_len)
587 next_lcla = d40_lcla_alloc_one(chan, desc);
589 next_lcla = linkback ? first_lcla : -EINVAL;
591 if (cyclic || next_lcla == -EINVAL)
592 flags |= LLI_TERM_INT;
594 if (linkback && curr_lcla == first_lcla) {
595 /* First link goes in both LCPA and LCLA */
596 d40_log_lli_lcpa_write(chan->lcpa,
597 &lli->dst[lli_current],
598 &lli->src[lli_current],
603 * One unused LCLA in the cyclic case if the very first
606 d40_log_lli_lcla_write(lcla,
607 &lli->dst[lli_current],
608 &lli->src[lli_current],
611 dma_sync_single_range_for_device(chan->base->dev,
612 pool->dma_addr, lcla_offset,
613 2 * sizeof(struct d40_log_lli),
616 curr_lcla = next_lcla;
618 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
625 desc->lli_current = lli_current;
628 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
630 if (chan_is_physical(d40c)) {
631 d40_phy_lli_load(d40c, d40d);
632 d40d->lli_current = d40d->lli_len;
634 d40_log_lli_to_lcxa(d40c, d40d);
637 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
641 if (list_empty(&d40c->active))
644 d = list_first_entry(&d40c->active,
650 /* remove desc from current queue and add it to the pending_queue */
651 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
653 d40_desc_remove(desc);
654 desc->is_in_client_list = false;
655 list_add_tail(&desc->node, &d40c->pending_queue);
658 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
662 if (list_empty(&d40c->pending_queue))
665 d = list_first_entry(&d40c->pending_queue,
671 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
675 if (list_empty(&d40c->queue))
678 d = list_first_entry(&d40c->queue,
684 static int d40_psize_2_burst_size(bool is_log, int psize)
687 if (psize == STEDMA40_PSIZE_LOG_1)
690 if (psize == STEDMA40_PSIZE_PHY_1)
698 * The dma only supports transmitting packages up to
699 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
700 * dma elements required to send the entire sg list
702 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
705 u32 max_w = max(data_width1, data_width2);
706 u32 min_w = min(data_width1, data_width2);
707 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
709 if (seg_max > STEDMA40_MAX_SEG_SIZE)
710 seg_max -= (1 << max_w);
712 if (!IS_ALIGNED(size, 1 << max_w))
718 dmalen = size / seg_max;
719 if (dmalen * seg_max < size)
725 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
726 u32 data_width1, u32 data_width2)
728 struct scatterlist *sg;
733 for_each_sg(sgl, sg, sg_len, i) {
734 ret = d40_size_2_dmalen(sg_dma_len(sg),
735 data_width1, data_width2);
743 /* Support functions for logical channels */
745 static int d40_channel_execute_command(struct d40_chan *d40c,
746 enum d40_command command)
750 void __iomem *active_reg;
755 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
757 if (d40c->phy_chan->num % 2 == 0)
758 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
760 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
762 if (command == D40_DMA_SUSPEND_REQ) {
763 status = (readl(active_reg) &
764 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
765 D40_CHAN_POS(d40c->phy_chan->num);
767 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
771 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
772 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
775 if (command == D40_DMA_SUSPEND_REQ) {
777 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
778 status = (readl(active_reg) &
779 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
780 D40_CHAN_POS(d40c->phy_chan->num);
784 * Reduce the number of bus accesses while
785 * waiting for the DMA to suspend.
789 if (status == D40_DMA_STOP ||
790 status == D40_DMA_SUSPENDED)
794 if (i == D40_SUSPEND_MAX_IT) {
796 "unable to suspend the chl %d (log: %d) status %x\n",
797 d40c->phy_chan->num, d40c->log_num,
805 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
809 static void d40_term_all(struct d40_chan *d40c)
811 struct d40_desc *d40d;
814 /* Release active descriptors */
815 while ((d40d = d40_first_active_get(d40c))) {
816 d40_desc_remove(d40d);
817 d40_desc_free(d40c, d40d);
820 /* Release queued descriptors waiting for transfer */
821 while ((d40d = d40_first_queued(d40c))) {
822 d40_desc_remove(d40d);
823 d40_desc_free(d40c, d40d);
826 /* Release pending descriptors */
827 while ((d40d = d40_first_pending(d40c))) {
828 d40_desc_remove(d40d);
829 d40_desc_free(d40c, d40d);
832 /* Release client owned descriptors */
833 if (!list_empty(&d40c->client))
834 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
835 d40_desc_remove(d40d);
836 d40_desc_free(d40c, d40d);
839 /* Release descriptors in prepare queue */
840 if (!list_empty(&d40c->prepare_queue))
841 list_for_each_entry_safe(d40d, _d,
842 &d40c->prepare_queue, node) {
843 d40_desc_remove(d40d);
844 d40_desc_free(d40c, d40d);
847 d40c->pending_tx = 0;
851 static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
854 void __iomem *addr = chan_base(d40c) + reg;
858 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
859 | ~D40_EVENTLINE_MASK(event), addr);
864 * The hardware sometimes doesn't register the enable when src and dst
865 * event lines are active on the same logical channel. Retry to ensure
866 * it does. Usually only one retry is sufficient.
870 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
871 | ~D40_EVENTLINE_MASK(event), addr);
873 if (readl(addr) & D40_EVENTLINE_MASK(event))
878 dev_dbg(chan2dev(d40c),
879 "[%s] workaround enable S%cLNK (%d tries)\n",
880 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
886 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
890 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
892 /* Enable event line connected to device (or memcpy) */
893 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
894 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
895 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
897 __d40_config_set_event(d40c, do_enable, event,
901 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
902 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
904 __d40_config_set_event(d40c, do_enable, event,
908 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
911 static u32 d40_chan_has_events(struct d40_chan *d40c)
913 void __iomem *chanbase = chan_base(d40c);
916 val = readl(chanbase + D40_CHAN_REG_SSLNK);
917 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
922 static u32 d40_get_prmo(struct d40_chan *d40c)
924 static const unsigned int phy_map[] = {
925 [STEDMA40_PCHAN_BASIC_MODE]
926 = D40_DREG_PRMO_PCHAN_BASIC,
927 [STEDMA40_PCHAN_MODULO_MODE]
928 = D40_DREG_PRMO_PCHAN_MODULO,
929 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
930 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
932 static const unsigned int log_map[] = {
933 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
934 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
935 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
936 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
937 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
938 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
941 if (chan_is_physical(d40c))
942 return phy_map[d40c->dma_cfg.mode_opt];
944 return log_map[d40c->dma_cfg.mode_opt];
947 static void d40_config_write(struct d40_chan *d40c)
952 /* Odd addresses are even addresses + 4 */
953 addr_base = (d40c->phy_chan->num % 2) * 4;
954 /* Setup channel mode to logical or physical */
955 var = ((u32)(chan_is_logical(d40c)) + 1) <<
956 D40_CHAN_POS(d40c->phy_chan->num);
957 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
959 /* Setup operational mode option register */
960 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
962 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
964 if (chan_is_logical(d40c)) {
965 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
966 & D40_SREG_ELEM_LOG_LIDX_MASK;
967 void __iomem *chanbase = chan_base(d40c);
969 /* Set default config for CFG reg */
970 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
971 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
973 /* Set LIDX for lcla */
974 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
975 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
979 static u32 d40_residue(struct d40_chan *d40c)
983 if (chan_is_logical(d40c))
984 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
985 >> D40_MEM_LCSP2_ECNT_POS;
987 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
988 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
989 >> D40_SREG_ELEM_PHY_ECNT_POS;
992 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
995 static bool d40_tx_is_linked(struct d40_chan *d40c)
999 if (chan_is_logical(d40c))
1000 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1002 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1003 & D40_SREG_LNK_PHYS_LNK_MASK;
1008 static int d40_pause(struct d40_chan *d40c)
1011 unsigned long flags;
1016 spin_lock_irqsave(&d40c->lock, flags);
1018 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1020 if (chan_is_logical(d40c)) {
1021 d40_config_set_event(d40c, false);
1022 /* Resume the other logical channels if any */
1023 if (d40_chan_has_events(d40c))
1024 res = d40_channel_execute_command(d40c,
1029 spin_unlock_irqrestore(&d40c->lock, flags);
1033 static int d40_resume(struct d40_chan *d40c)
1036 unsigned long flags;
1041 spin_lock_irqsave(&d40c->lock, flags);
1043 if (d40c->base->rev == 0)
1044 if (chan_is_logical(d40c)) {
1045 res = d40_channel_execute_command(d40c,
1046 D40_DMA_SUSPEND_REQ);
1050 /* If bytes left to transfer or linked tx resume job */
1051 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1053 if (chan_is_logical(d40c))
1054 d40_config_set_event(d40c, true);
1056 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1060 spin_unlock_irqrestore(&d40c->lock, flags);
1064 static int d40_terminate_all(struct d40_chan *chan)
1066 unsigned long flags;
1069 ret = d40_pause(chan);
1070 if (!ret && chan_is_physical(chan))
1071 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1073 spin_lock_irqsave(&chan->lock, flags);
1075 spin_unlock_irqrestore(&chan->lock, flags);
1080 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1082 struct d40_chan *d40c = container_of(tx->chan,
1085 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1086 unsigned long flags;
1088 spin_lock_irqsave(&d40c->lock, flags);
1090 d40c->chan.cookie++;
1092 if (d40c->chan.cookie < 0)
1093 d40c->chan.cookie = 1;
1095 d40d->txd.cookie = d40c->chan.cookie;
1097 d40_desc_queue(d40c, d40d);
1099 spin_unlock_irqrestore(&d40c->lock, flags);
1104 static int d40_start(struct d40_chan *d40c)
1106 if (d40c->base->rev == 0) {
1109 if (chan_is_logical(d40c)) {
1110 err = d40_channel_execute_command(d40c,
1111 D40_DMA_SUSPEND_REQ);
1117 if (chan_is_logical(d40c))
1118 d40_config_set_event(d40c, true);
1120 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1123 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1125 struct d40_desc *d40d;
1128 /* Start queued jobs, if any */
1129 d40d = d40_first_queued(d40c);
1134 /* Remove from queue */
1135 d40_desc_remove(d40d);
1137 /* Add to active queue */
1138 d40_desc_submit(d40c, d40d);
1140 /* Initiate DMA job */
1141 d40_desc_load(d40c, d40d);
1144 err = d40_start(d40c);
1153 /* called from interrupt context */
1154 static void dma_tc_handle(struct d40_chan *d40c)
1156 struct d40_desc *d40d;
1158 /* Get first active entry from list */
1159 d40d = d40_first_active_get(d40c);
1166 * If this was a paritially loaded list, we need to reloaded
1167 * it, and only when the list is completed. We need to check
1168 * for done because the interrupt will hit for every link, and
1169 * not just the last one.
1171 if (d40d->lli_current < d40d->lli_len
1172 && !d40_tx_is_linked(d40c)
1173 && !d40_residue(d40c)) {
1174 d40_lcla_free_all(d40c, d40d);
1175 d40_desc_load(d40c, d40d);
1176 (void) d40_start(d40c);
1178 if (d40d->lli_current == d40d->lli_len)
1179 d40d->lli_current = 0;
1182 d40_lcla_free_all(d40c, d40d);
1184 if (d40d->lli_current < d40d->lli_len) {
1185 d40_desc_load(d40c, d40d);
1187 (void) d40_start(d40c);
1191 if (d40_queue_start(d40c) == NULL)
1196 tasklet_schedule(&d40c->tasklet);
1200 static void dma_tasklet(unsigned long data)
1202 struct d40_chan *d40c = (struct d40_chan *) data;
1203 struct d40_desc *d40d;
1204 unsigned long flags;
1205 bool callback_active;
1206 dma_async_tx_callback callback;
1207 void *callback_param;
1209 spin_lock_irqsave(&d40c->lock, flags);
1211 /* Get first active entry from list */
1212 d40d = d40_first_active_get(d40c);
1217 d40c->completed = d40d->txd.cookie;
1220 * If terminating a channel pending_tx is set to zero.
1221 * This prevents any finished active jobs to return to the client.
1223 if (d40c->pending_tx == 0) {
1224 spin_unlock_irqrestore(&d40c->lock, flags);
1228 /* Callback to client */
1229 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1230 callback = d40d->txd.callback;
1231 callback_param = d40d->txd.callback_param;
1233 if (!d40d->cyclic) {
1234 if (async_tx_test_ack(&d40d->txd)) {
1235 d40_desc_remove(d40d);
1236 d40_desc_free(d40c, d40d);
1238 if (!d40d->is_in_client_list) {
1239 d40_desc_remove(d40d);
1240 d40_lcla_free_all(d40c, d40d);
1241 list_add_tail(&d40d->node, &d40c->client);
1242 d40d->is_in_client_list = true;
1249 if (d40c->pending_tx)
1250 tasklet_schedule(&d40c->tasklet);
1252 spin_unlock_irqrestore(&d40c->lock, flags);
1254 if (callback_active && callback)
1255 callback(callback_param);
1260 /* Rescue manoeuvre if receiving double interrupts */
1261 if (d40c->pending_tx > 0)
1263 spin_unlock_irqrestore(&d40c->lock, flags);
1266 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1268 static const struct d40_interrupt_lookup il[] = {
1269 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1270 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1271 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1272 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1273 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1274 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1275 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1276 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1277 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1278 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1282 u32 regs[ARRAY_SIZE(il)];
1286 struct d40_chan *d40c;
1287 unsigned long flags;
1288 struct d40_base *base = data;
1290 spin_lock_irqsave(&base->interrupt_lock, flags);
1292 /* Read interrupt status of both logical and physical channels */
1293 for (i = 0; i < ARRAY_SIZE(il); i++)
1294 regs[i] = readl(base->virtbase + il[i].src);
1298 chan = find_next_bit((unsigned long *)regs,
1299 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1301 /* No more set bits found? */
1302 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1305 row = chan / BITS_PER_LONG;
1306 idx = chan & (BITS_PER_LONG - 1);
1309 writel(1 << idx, base->virtbase + il[row].clr);
1311 if (il[row].offset == D40_PHY_CHAN)
1312 d40c = base->lookup_phy_chans[idx];
1314 d40c = base->lookup_log_chans[il[row].offset + idx];
1315 spin_lock(&d40c->lock);
1317 if (!il[row].is_error)
1318 dma_tc_handle(d40c);
1320 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1321 chan, il[row].offset, idx);
1323 spin_unlock(&d40c->lock);
1326 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1331 static int d40_validate_conf(struct d40_chan *d40c,
1332 struct stedma40_chan_cfg *conf)
1335 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1336 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1337 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1340 chan_err(d40c, "Invalid direction.\n");
1344 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1345 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1346 d40c->runtime_addr == 0) {
1348 chan_err(d40c, "Invalid TX channel address (%d)\n",
1349 conf->dst_dev_type);
1353 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1354 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1355 d40c->runtime_addr == 0) {
1356 chan_err(d40c, "Invalid RX channel address (%d)\n",
1357 conf->src_dev_type);
1361 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1362 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1363 chan_err(d40c, "Invalid dst\n");
1367 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1368 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1369 chan_err(d40c, "Invalid src\n");
1373 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1374 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1375 chan_err(d40c, "No event line\n");
1379 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1380 (src_event_group != dst_event_group)) {
1381 chan_err(d40c, "Invalid event group\n");
1385 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1387 * DMAC HW supports it. Will be added to this driver,
1388 * in case any dma client requires it.
1390 chan_err(d40c, "periph to periph not supported\n");
1394 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1395 (1 << conf->src_info.data_width) !=
1396 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1397 (1 << conf->dst_info.data_width)) {
1399 * The DMAC hardware only supports
1400 * src (burst x width) == dst (burst x width)
1403 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1410 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1411 int log_event_line, bool is_log)
1413 unsigned long flags;
1414 spin_lock_irqsave(&phy->lock, flags);
1416 /* Physical interrupts are masked per physical full channel */
1417 if (phy->allocated_src == D40_ALLOC_FREE &&
1418 phy->allocated_dst == D40_ALLOC_FREE) {
1419 phy->allocated_dst = D40_ALLOC_PHY;
1420 phy->allocated_src = D40_ALLOC_PHY;
1426 /* Logical channel */
1428 if (phy->allocated_src == D40_ALLOC_PHY)
1431 if (phy->allocated_src == D40_ALLOC_FREE)
1432 phy->allocated_src = D40_ALLOC_LOG_FREE;
1434 if (!(phy->allocated_src & (1 << log_event_line))) {
1435 phy->allocated_src |= 1 << log_event_line;
1440 if (phy->allocated_dst == D40_ALLOC_PHY)
1443 if (phy->allocated_dst == D40_ALLOC_FREE)
1444 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1446 if (!(phy->allocated_dst & (1 << log_event_line))) {
1447 phy->allocated_dst |= 1 << log_event_line;
1454 spin_unlock_irqrestore(&phy->lock, flags);
1457 spin_unlock_irqrestore(&phy->lock, flags);
1461 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1464 unsigned long flags;
1465 bool is_free = false;
1467 spin_lock_irqsave(&phy->lock, flags);
1468 if (!log_event_line) {
1469 phy->allocated_dst = D40_ALLOC_FREE;
1470 phy->allocated_src = D40_ALLOC_FREE;
1475 /* Logical channel */
1477 phy->allocated_src &= ~(1 << log_event_line);
1478 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1479 phy->allocated_src = D40_ALLOC_FREE;
1481 phy->allocated_dst &= ~(1 << log_event_line);
1482 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1483 phy->allocated_dst = D40_ALLOC_FREE;
1486 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1490 spin_unlock_irqrestore(&phy->lock, flags);
1495 static int d40_allocate_channel(struct d40_chan *d40c)
1500 struct d40_phy_res *phys;
1505 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1507 phys = d40c->base->phy_res;
1509 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1510 dev_type = d40c->dma_cfg.src_dev_type;
1511 log_num = 2 * dev_type;
1513 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1514 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1515 /* dst event lines are used for logical memcpy */
1516 dev_type = d40c->dma_cfg.dst_dev_type;
1517 log_num = 2 * dev_type + 1;
1522 event_group = D40_TYPE_TO_GROUP(dev_type);
1523 event_line = D40_TYPE_TO_EVENT(dev_type);
1526 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1527 /* Find physical half channel */
1528 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1530 if (d40_alloc_mask_set(&phys[i], is_src,
1535 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1536 int phy_num = j + event_group * 2;
1537 for (i = phy_num; i < phy_num + 2; i++) {
1538 if (d40_alloc_mask_set(&phys[i],
1547 d40c->phy_chan = &phys[i];
1548 d40c->log_num = D40_PHY_CHAN;
1554 /* Find logical channel */
1555 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1556 int phy_num = j + event_group * 2;
1558 * Spread logical channels across all available physical rather
1559 * than pack every logical channel at the first available phy
1563 for (i = phy_num; i < phy_num + 2; i++) {
1564 if (d40_alloc_mask_set(&phys[i], is_src,
1565 event_line, is_log))
1569 for (i = phy_num + 1; i >= phy_num; i--) {
1570 if (d40_alloc_mask_set(&phys[i], is_src,
1571 event_line, is_log))
1579 d40c->phy_chan = &phys[i];
1580 d40c->log_num = log_num;
1584 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1586 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1592 static int d40_config_memcpy(struct d40_chan *d40c)
1594 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1596 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1597 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1598 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1599 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1600 memcpy[d40c->chan.chan_id];
1602 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1603 dma_has_cap(DMA_SLAVE, cap)) {
1604 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1606 chan_err(d40c, "No memcpy\n");
1614 static int d40_free_dma(struct d40_chan *d40c)
1619 struct d40_phy_res *phy = d40c->phy_chan;
1622 /* Terminate all queued and active transfers */
1626 chan_err(d40c, "phy == null\n");
1630 if (phy->allocated_src == D40_ALLOC_FREE &&
1631 phy->allocated_dst == D40_ALLOC_FREE) {
1632 chan_err(d40c, "channel already free\n");
1636 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1637 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1638 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1640 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1641 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1644 chan_err(d40c, "Unknown direction\n");
1648 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1650 chan_err(d40c, "suspend failed\n");
1654 if (chan_is_logical(d40c)) {
1655 /* Release logical channel, deactivate the event line */
1657 d40_config_set_event(d40c, false);
1658 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1661 * Check if there are more logical allocation
1662 * on this phy channel.
1664 if (!d40_alloc_mask_free(phy, is_src, event)) {
1665 /* Resume the other logical channels if any */
1666 if (d40_chan_has_events(d40c)) {
1667 res = d40_channel_execute_command(d40c,
1671 "Executing RUN command\n");
1678 (void) d40_alloc_mask_free(phy, is_src, 0);
1681 /* Release physical channel */
1682 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1684 chan_err(d40c, "Failed to stop channel\n");
1687 d40c->phy_chan = NULL;
1688 d40c->configured = false;
1689 d40c->base->lookup_phy_chans[phy->num] = NULL;
1694 static bool d40_is_paused(struct d40_chan *d40c)
1696 void __iomem *chanbase = chan_base(d40c);
1697 bool is_paused = false;
1698 unsigned long flags;
1699 void __iomem *active_reg;
1703 spin_lock_irqsave(&d40c->lock, flags);
1705 if (chan_is_physical(d40c)) {
1706 if (d40c->phy_chan->num % 2 == 0)
1707 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1709 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1711 status = (readl(active_reg) &
1712 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1713 D40_CHAN_POS(d40c->phy_chan->num);
1714 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1720 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1721 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1722 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1723 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1724 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1725 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1726 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1728 chan_err(d40c, "Unknown direction\n");
1732 status = (status & D40_EVENTLINE_MASK(event)) >>
1733 D40_EVENTLINE_POS(event);
1735 if (status != D40_DMA_RUN)
1738 spin_unlock_irqrestore(&d40c->lock, flags);
1744 static u32 stedma40_residue(struct dma_chan *chan)
1746 struct d40_chan *d40c =
1747 container_of(chan, struct d40_chan, chan);
1749 unsigned long flags;
1751 spin_lock_irqsave(&d40c->lock, flags);
1752 bytes_left = d40_residue(d40c);
1753 spin_unlock_irqrestore(&d40c->lock, flags);
1759 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1760 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1761 unsigned int sg_len, dma_addr_t src_dev_addr,
1762 dma_addr_t dst_dev_addr)
1764 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1765 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1766 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1769 ret = d40_log_sg_to_lli(sg_src, sg_len,
1772 chan->log_def.lcsp1,
1773 src_info->data_width,
1774 dst_info->data_width);
1776 ret = d40_log_sg_to_lli(sg_dst, sg_len,
1779 chan->log_def.lcsp3,
1780 dst_info->data_width,
1781 src_info->data_width);
1783 return ret < 0 ? ret : 0;
1787 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
1788 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1789 unsigned int sg_len, dma_addr_t src_dev_addr,
1790 dma_addr_t dst_dev_addr)
1792 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1793 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1794 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1795 unsigned long flags = 0;
1799 flags |= LLI_CYCLIC | LLI_TERM_INT;
1801 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
1803 virt_to_phys(desc->lli_phy.src),
1805 src_info, dst_info, flags);
1807 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
1809 virt_to_phys(desc->lli_phy.dst),
1811 dst_info, src_info, flags);
1813 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
1814 desc->lli_pool.size, DMA_TO_DEVICE);
1816 return ret < 0 ? ret : 0;
1820 static struct d40_desc *
1821 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1822 unsigned int sg_len, unsigned long dma_flags)
1824 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1825 struct d40_desc *desc;
1828 desc = d40_desc_get(chan);
1832 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
1833 cfg->dst_info.data_width);
1834 if (desc->lli_len < 0) {
1835 chan_err(chan, "Unaligned size\n");
1839 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
1841 chan_err(chan, "Could not allocate lli\n");
1846 desc->lli_current = 0;
1847 desc->txd.flags = dma_flags;
1848 desc->txd.tx_submit = d40_tx_submit;
1850 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
1855 d40_desc_free(chan, desc);
1860 d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1862 struct stedma40_platform_data *plat = chan->base->plat_data;
1863 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1864 dma_addr_t addr = 0;
1866 if (chan->runtime_addr)
1867 return chan->runtime_addr;
1869 if (direction == DMA_FROM_DEVICE)
1870 addr = plat->dev_rx[cfg->src_dev_type];
1871 else if (direction == DMA_TO_DEVICE)
1872 addr = plat->dev_tx[cfg->dst_dev_type];
1877 static struct dma_async_tx_descriptor *
1878 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1879 struct scatterlist *sg_dst, unsigned int sg_len,
1880 enum dma_data_direction direction, unsigned long dma_flags)
1882 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
1883 dma_addr_t src_dev_addr = 0;
1884 dma_addr_t dst_dev_addr = 0;
1885 struct d40_desc *desc;
1886 unsigned long flags;
1889 if (!chan->phy_chan) {
1890 chan_err(chan, "Cannot prepare unallocated channel\n");
1895 spin_lock_irqsave(&chan->lock, flags);
1897 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
1901 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
1902 desc->cyclic = true;
1904 if (direction != DMA_NONE) {
1905 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
1907 if (direction == DMA_FROM_DEVICE)
1908 src_dev_addr = dev_addr;
1909 else if (direction == DMA_TO_DEVICE)
1910 dst_dev_addr = dev_addr;
1913 if (chan_is_logical(chan))
1914 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
1915 sg_len, src_dev_addr, dst_dev_addr);
1917 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
1918 sg_len, src_dev_addr, dst_dev_addr);
1921 chan_err(chan, "Failed to prepare %s sg job: %d\n",
1922 chan_is_logical(chan) ? "log" : "phy", ret);
1927 * add descriptor to the prepare queue in order to be able
1928 * to free them later in terminate_all
1930 list_add_tail(&desc->node, &chan->prepare_queue);
1932 spin_unlock_irqrestore(&chan->lock, flags);
1938 d40_desc_free(chan, desc);
1939 spin_unlock_irqrestore(&chan->lock, flags);
1943 bool stedma40_filter(struct dma_chan *chan, void *data)
1945 struct stedma40_chan_cfg *info = data;
1946 struct d40_chan *d40c =
1947 container_of(chan, struct d40_chan, chan);
1951 err = d40_validate_conf(d40c, info);
1953 d40c->dma_cfg = *info;
1955 err = d40_config_memcpy(d40c);
1958 d40c->configured = true;
1962 EXPORT_SYMBOL(stedma40_filter);
1964 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1966 bool realtime = d40c->dma_cfg.realtime;
1967 bool highprio = d40c->dma_cfg.high_priority;
1968 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1969 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1970 u32 event = D40_TYPE_TO_EVENT(dev_type);
1971 u32 group = D40_TYPE_TO_GROUP(dev_type);
1972 u32 bit = 1 << event;
1974 /* Destination event lines are stored in the upper halfword */
1978 writel(bit, d40c->base->virtbase + prioreg + group * 4);
1979 writel(bit, d40c->base->virtbase + rtreg + group * 4);
1982 static void d40_set_prio_realtime(struct d40_chan *d40c)
1984 if (d40c->base->rev < 3)
1987 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1988 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1989 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1991 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
1992 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1993 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1996 /* DMA ENGINE functions */
1997 static int d40_alloc_chan_resources(struct dma_chan *chan)
2000 unsigned long flags;
2001 struct d40_chan *d40c =
2002 container_of(chan, struct d40_chan, chan);
2004 spin_lock_irqsave(&d40c->lock, flags);
2006 d40c->completed = chan->cookie = 1;
2008 /* If no dma configuration is set use default configuration (memcpy) */
2009 if (!d40c->configured) {
2010 err = d40_config_memcpy(d40c);
2012 chan_err(d40c, "Failed to configure memcpy channel\n");
2016 is_free_phy = (d40c->phy_chan == NULL);
2018 err = d40_allocate_channel(d40c);
2020 chan_err(d40c, "Failed to allocate channel\n");
2024 /* Fill in basic CFG register values */
2025 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2026 &d40c->dst_def_cfg, chan_is_logical(d40c));
2028 d40_set_prio_realtime(d40c);
2030 if (chan_is_logical(d40c)) {
2031 d40_log_cfg(&d40c->dma_cfg,
2032 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2034 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2035 d40c->lcpa = d40c->base->lcpa_base +
2036 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
2038 d40c->lcpa = d40c->base->lcpa_base +
2039 d40c->dma_cfg.dst_dev_type *
2040 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2044 * Only write channel configuration to the DMA if the physical
2045 * resource is free. In case of multiple logical channels
2046 * on the same physical resource, only the first write is necessary.
2049 d40_config_write(d40c);
2051 spin_unlock_irqrestore(&d40c->lock, flags);
2055 static void d40_free_chan_resources(struct dma_chan *chan)
2057 struct d40_chan *d40c =
2058 container_of(chan, struct d40_chan, chan);
2060 unsigned long flags;
2062 if (d40c->phy_chan == NULL) {
2063 chan_err(d40c, "Cannot free unallocated channel\n");
2068 spin_lock_irqsave(&d40c->lock, flags);
2070 err = d40_free_dma(d40c);
2073 chan_err(d40c, "Failed to free channel\n");
2074 spin_unlock_irqrestore(&d40c->lock, flags);
2077 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2081 unsigned long dma_flags)
2083 struct scatterlist dst_sg;
2084 struct scatterlist src_sg;
2086 sg_init_table(&dst_sg, 1);
2087 sg_init_table(&src_sg, 1);
2089 sg_dma_address(&dst_sg) = dst;
2090 sg_dma_address(&src_sg) = src;
2092 sg_dma_len(&dst_sg) = size;
2093 sg_dma_len(&src_sg) = size;
2095 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2098 static struct dma_async_tx_descriptor *
2099 d40_prep_memcpy_sg(struct dma_chan *chan,
2100 struct scatterlist *dst_sg, unsigned int dst_nents,
2101 struct scatterlist *src_sg, unsigned int src_nents,
2102 unsigned long dma_flags)
2104 if (dst_nents != src_nents)
2107 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2110 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2111 struct scatterlist *sgl,
2112 unsigned int sg_len,
2113 enum dma_data_direction direction,
2114 unsigned long dma_flags)
2116 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
2119 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2122 static struct dma_async_tx_descriptor *
2123 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2124 size_t buf_len, size_t period_len,
2125 enum dma_data_direction direction)
2127 unsigned int periods = buf_len / period_len;
2128 struct dma_async_tx_descriptor *txd;
2129 struct scatterlist *sg;
2132 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2133 for (i = 0; i < periods; i++) {
2134 sg_dma_address(&sg[i]) = dma_addr;
2135 sg_dma_len(&sg[i]) = period_len;
2136 dma_addr += period_len;
2139 sg[periods].offset = 0;
2140 sg[periods].length = 0;
2141 sg[periods].page_link =
2142 ((unsigned long)sg | 0x01) & ~0x02;
2144 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2145 DMA_PREP_INTERRUPT);
2152 static enum dma_status d40_tx_status(struct dma_chan *chan,
2153 dma_cookie_t cookie,
2154 struct dma_tx_state *txstate)
2156 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2157 dma_cookie_t last_used;
2158 dma_cookie_t last_complete;
2161 if (d40c->phy_chan == NULL) {
2162 chan_err(d40c, "Cannot read status of unallocated channel\n");
2166 last_complete = d40c->completed;
2167 last_used = chan->cookie;
2169 if (d40_is_paused(d40c))
2172 ret = dma_async_is_complete(cookie, last_complete, last_used);
2174 dma_set_tx_state(txstate, last_complete, last_used,
2175 stedma40_residue(chan));
2180 static void d40_issue_pending(struct dma_chan *chan)
2182 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2183 unsigned long flags;
2185 if (d40c->phy_chan == NULL) {
2186 chan_err(d40c, "Channel is not allocated!\n");
2190 spin_lock_irqsave(&d40c->lock, flags);
2192 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2194 /* Busy means that queued jobs are already being processed */
2196 (void) d40_queue_start(d40c);
2198 spin_unlock_irqrestore(&d40c->lock, flags);
2202 dma40_config_to_halfchannel(struct d40_chan *d40c,
2203 struct stedma40_half_channel_info *info,
2204 enum dma_slave_buswidth width,
2207 enum stedma40_periph_data_width addr_width;
2211 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2212 addr_width = STEDMA40_BYTE_WIDTH;
2214 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2215 addr_width = STEDMA40_HALFWORD_WIDTH;
2217 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2218 addr_width = STEDMA40_WORD_WIDTH;
2220 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2221 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2224 dev_err(d40c->base->dev,
2225 "illegal peripheral address width "
2231 if (chan_is_logical(d40c)) {
2233 psize = STEDMA40_PSIZE_LOG_16;
2234 else if (maxburst >= 8)
2235 psize = STEDMA40_PSIZE_LOG_8;
2236 else if (maxburst >= 4)
2237 psize = STEDMA40_PSIZE_LOG_4;
2239 psize = STEDMA40_PSIZE_LOG_1;
2242 psize = STEDMA40_PSIZE_PHY_16;
2243 else if (maxburst >= 8)
2244 psize = STEDMA40_PSIZE_PHY_8;
2245 else if (maxburst >= 4)
2246 psize = STEDMA40_PSIZE_PHY_4;
2248 psize = STEDMA40_PSIZE_PHY_1;
2251 info->data_width = addr_width;
2252 info->psize = psize;
2253 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2258 /* Runtime reconfiguration extension */
2259 static int d40_set_runtime_config(struct dma_chan *chan,
2260 struct dma_slave_config *config)
2262 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2263 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2264 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2265 dma_addr_t config_addr;
2266 u32 src_maxburst, dst_maxburst;
2269 src_addr_width = config->src_addr_width;
2270 src_maxburst = config->src_maxburst;
2271 dst_addr_width = config->dst_addr_width;
2272 dst_maxburst = config->dst_maxburst;
2274 if (config->direction == DMA_FROM_DEVICE) {
2275 dma_addr_t dev_addr_rx =
2276 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2278 config_addr = config->src_addr;
2280 dev_dbg(d40c->base->dev,
2281 "channel has a pre-wired RX address %08x "
2282 "overriding with %08x\n",
2283 dev_addr_rx, config_addr);
2284 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2285 dev_dbg(d40c->base->dev,
2286 "channel was not configured for peripheral "
2287 "to memory transfer (%d) overriding\n",
2289 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2291 /* Configure the memory side */
2292 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2293 dst_addr_width = src_addr_width;
2294 if (dst_maxburst == 0)
2295 dst_maxburst = src_maxburst;
2297 } else if (config->direction == DMA_TO_DEVICE) {
2298 dma_addr_t dev_addr_tx =
2299 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2301 config_addr = config->dst_addr;
2303 dev_dbg(d40c->base->dev,
2304 "channel has a pre-wired TX address %08x "
2305 "overriding with %08x\n",
2306 dev_addr_tx, config_addr);
2307 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2308 dev_dbg(d40c->base->dev,
2309 "channel was not configured for memory "
2310 "to peripheral transfer (%d) overriding\n",
2312 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2314 /* Configure the memory side */
2315 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2316 src_addr_width = dst_addr_width;
2317 if (src_maxburst == 0)
2318 src_maxburst = dst_maxburst;
2320 dev_err(d40c->base->dev,
2321 "unrecognized channel direction %d\n",
2326 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2327 dev_err(d40c->base->dev,
2328 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2336 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2342 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2348 /* Fill in register values */
2349 if (chan_is_logical(d40c))
2350 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2352 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2353 &d40c->dst_def_cfg, false);
2355 /* These settings will take precedence later */
2356 d40c->runtime_addr = config_addr;
2357 d40c->runtime_direction = config->direction;
2358 dev_dbg(d40c->base->dev,
2359 "configured channel %s for %s, data width %d/%d, "
2360 "maxburst %d/%d elements, LE, no flow control\n",
2361 dma_chan_name(chan),
2362 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2363 src_addr_width, dst_addr_width,
2364 src_maxburst, dst_maxburst);
2369 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2372 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2374 if (d40c->phy_chan == NULL) {
2375 chan_err(d40c, "Channel is not allocated!\n");
2380 case DMA_TERMINATE_ALL:
2381 return d40_terminate_all(d40c);
2383 return d40_pause(d40c);
2385 return d40_resume(d40c);
2386 case DMA_SLAVE_CONFIG:
2387 return d40_set_runtime_config(chan,
2388 (struct dma_slave_config *) arg);
2393 /* Other commands are unimplemented */
2397 /* Initialization functions */
2399 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2400 struct d40_chan *chans, int offset,
2404 struct d40_chan *d40c;
2406 INIT_LIST_HEAD(&dma->channels);
2408 for (i = offset; i < offset + num_chans; i++) {
2411 d40c->chan.device = dma;
2413 spin_lock_init(&d40c->lock);
2415 d40c->log_num = D40_PHY_CHAN;
2417 INIT_LIST_HEAD(&d40c->active);
2418 INIT_LIST_HEAD(&d40c->queue);
2419 INIT_LIST_HEAD(&d40c->pending_queue);
2420 INIT_LIST_HEAD(&d40c->client);
2421 INIT_LIST_HEAD(&d40c->prepare_queue);
2423 tasklet_init(&d40c->tasklet, dma_tasklet,
2424 (unsigned long) d40c);
2426 list_add_tail(&d40c->chan.device_node,
2431 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2433 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2434 dev->device_prep_slave_sg = d40_prep_slave_sg;
2436 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2437 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2440 * This controller can only access address at even
2441 * 32bit boundaries, i.e. 2^2
2443 dev->copy_align = 2;
2446 if (dma_has_cap(DMA_SG, dev->cap_mask))
2447 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2449 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2450 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2452 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2453 dev->device_free_chan_resources = d40_free_chan_resources;
2454 dev->device_issue_pending = d40_issue_pending;
2455 dev->device_tx_status = d40_tx_status;
2456 dev->device_control = d40_control;
2457 dev->dev = base->dev;
2460 static int __init d40_dmaengine_init(struct d40_base *base,
2461 int num_reserved_chans)
2465 d40_chan_init(base, &base->dma_slave, base->log_chans,
2466 0, base->num_log_chans);
2468 dma_cap_zero(base->dma_slave.cap_mask);
2469 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2470 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2472 d40_ops_init(base, &base->dma_slave);
2474 err = dma_async_device_register(&base->dma_slave);
2477 d40_err(base->dev, "Failed to register slave channels\n");
2481 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2482 base->num_log_chans, base->plat_data->memcpy_len);
2484 dma_cap_zero(base->dma_memcpy.cap_mask);
2485 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2486 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2488 d40_ops_init(base, &base->dma_memcpy);
2490 err = dma_async_device_register(&base->dma_memcpy);
2494 "Failed to regsiter memcpy only channels\n");
2498 d40_chan_init(base, &base->dma_both, base->phy_chans,
2499 0, num_reserved_chans);
2501 dma_cap_zero(base->dma_both.cap_mask);
2502 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2503 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2504 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2505 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2507 d40_ops_init(base, &base->dma_both);
2508 err = dma_async_device_register(&base->dma_both);
2512 "Failed to register logical and physical capable channels\n");
2517 dma_async_device_unregister(&base->dma_memcpy);
2519 dma_async_device_unregister(&base->dma_slave);
2524 /* Initialization functions. */
2526 static int __init d40_phy_res_init(struct d40_base *base)
2529 int num_phy_chans_avail = 0;
2531 int odd_even_bit = -2;
2533 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2534 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2536 for (i = 0; i < base->num_phy_chans; i++) {
2537 base->phy_res[i].num = i;
2538 odd_even_bit += 2 * ((i % 2) == 0);
2539 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2540 /* Mark security only channels as occupied */
2541 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2542 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2544 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2545 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2546 num_phy_chans_avail++;
2548 spin_lock_init(&base->phy_res[i].lock);
2551 /* Mark disabled channels as occupied */
2552 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2553 int chan = base->plat_data->disabled_channels[i];
2555 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2556 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2557 num_phy_chans_avail--;
2560 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2561 num_phy_chans_avail, base->num_phy_chans);
2563 /* Verify settings extended vs standard */
2564 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2566 for (i = 0; i < base->num_phy_chans; i++) {
2568 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2569 (val[0] & 0x3) != 1)
2571 "[%s] INFO: channel %d is misconfigured (%d)\n",
2572 __func__, i, val[0] & 0x3);
2574 val[0] = val[0] >> 2;
2577 return num_phy_chans_avail;
2580 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2582 struct stedma40_platform_data *plat_data;
2583 struct clk *clk = NULL;
2584 void __iomem *virtbase = NULL;
2585 struct resource *res = NULL;
2586 struct d40_base *base = NULL;
2587 int num_log_chans = 0;
2594 clk = clk_get(&pdev->dev, NULL);
2597 d40_err(&pdev->dev, "No matching clock found\n");
2603 /* Get IO for DMAC base address */
2604 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2608 if (request_mem_region(res->start, resource_size(res),
2609 D40_NAME " I/O base") == NULL)
2612 virtbase = ioremap(res->start, resource_size(res));
2616 /* This is just a regular AMBA PrimeCell ID actually */
2617 for (pid = 0, i = 0; i < 4; i++)
2618 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2620 for (cid = 0, i = 0; i < 4; i++)
2621 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2624 if (cid != AMBA_CID) {
2625 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2628 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
2629 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2630 AMBA_MANF_BITS(pid),
2636 * DB8500ed has revision 0
2638 * DB8500v1 has revision 2
2639 * DB8500v2 has revision 3
2641 rev = AMBA_REV_BITS(pid);
2643 /* The number of physical channels on this HW */
2644 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2646 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2649 plat_data = pdev->dev.platform_data;
2651 /* Count the number of logical channels in use */
2652 for (i = 0; i < plat_data->dev_len; i++)
2653 if (plat_data->dev_rx[i] != 0)
2656 for (i = 0; i < plat_data->dev_len; i++)
2657 if (plat_data->dev_tx[i] != 0)
2660 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2661 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2662 sizeof(struct d40_chan), GFP_KERNEL);
2665 d40_err(&pdev->dev, "Out of memory\n");
2671 base->num_phy_chans = num_phy_chans;
2672 base->num_log_chans = num_log_chans;
2673 base->phy_start = res->start;
2674 base->phy_size = resource_size(res);
2675 base->virtbase = virtbase;
2676 base->plat_data = plat_data;
2677 base->dev = &pdev->dev;
2678 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2679 base->log_chans = &base->phy_chans[num_phy_chans];
2681 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2686 base->lookup_phy_chans = kzalloc(num_phy_chans *
2687 sizeof(struct d40_chan *),
2689 if (!base->lookup_phy_chans)
2692 if (num_log_chans + plat_data->memcpy_len) {
2694 * The max number of logical channels are event lines for all
2695 * src devices and dst devices
2697 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2698 sizeof(struct d40_chan *),
2700 if (!base->lookup_log_chans)
2704 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2705 sizeof(struct d40_desc *) *
2706 D40_LCLA_LINK_PER_EVENT_GRP,
2708 if (!base->lcla_pool.alloc_map)
2711 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2712 0, SLAB_HWCACHE_ALIGN,
2714 if (base->desc_slab == NULL)
2727 release_mem_region(res->start,
2728 resource_size(res));
2733 kfree(base->lcla_pool.alloc_map);
2734 kfree(base->lookup_log_chans);
2735 kfree(base->lookup_phy_chans);
2736 kfree(base->phy_res);
2743 static void __init d40_hw_init(struct d40_base *base)
2746 static const struct d40_reg_val dma_init_reg[] = {
2747 /* Clock every part of the DMA block from start */
2748 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2750 /* Interrupts on all logical channels */
2751 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2752 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2753 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2754 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2755 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2756 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2757 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2758 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2759 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2760 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2761 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2762 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2765 u32 prmseo[2] = {0, 0};
2766 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2770 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2771 writel(dma_init_reg[i].val,
2772 base->virtbase + dma_init_reg[i].reg);
2774 /* Configure all our dma channels to default settings */
2775 for (i = 0; i < base->num_phy_chans; i++) {
2777 activeo[i % 2] = activeo[i % 2] << 2;
2779 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2781 activeo[i % 2] |= 3;
2785 /* Enable interrupt # */
2786 pcmis = (pcmis << 1) | 1;
2788 /* Clear interrupt # */
2789 pcicr = (pcicr << 1) | 1;
2791 /* Set channel to physical mode */
2792 prmseo[i % 2] = prmseo[i % 2] << 2;
2797 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2798 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2799 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2800 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2802 /* Write which interrupt to enable */
2803 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2805 /* Write which interrupt to clear */
2806 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2810 static int __init d40_lcla_allocate(struct d40_base *base)
2812 struct d40_lcla_pool *pool = &base->lcla_pool;
2813 unsigned long *page_list;
2818 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2819 * To full fill this hardware requirement without wasting 256 kb
2820 * we allocate pages until we get an aligned one.
2822 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2830 /* Calculating how many pages that are required */
2831 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2833 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2834 page_list[i] = __get_free_pages(GFP_KERNEL,
2835 base->lcla_pool.pages);
2836 if (!page_list[i]) {
2838 d40_err(base->dev, "Failed to allocate %d pages.\n",
2839 base->lcla_pool.pages);
2841 for (j = 0; j < i; j++)
2842 free_pages(page_list[j], base->lcla_pool.pages);
2846 if ((virt_to_phys((void *)page_list[i]) &
2847 (LCLA_ALIGNMENT - 1)) == 0)
2851 for (j = 0; j < i; j++)
2852 free_pages(page_list[j], base->lcla_pool.pages);
2854 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2855 base->lcla_pool.base = (void *)page_list[i];
2858 * After many attempts and no succees with finding the correct
2859 * alignment, try with allocating a big buffer.
2862 "[%s] Failed to get %d pages @ 18 bit align.\n",
2863 __func__, base->lcla_pool.pages);
2864 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2865 base->num_phy_chans +
2868 if (!base->lcla_pool.base_unaligned) {
2873 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2877 pool->dma_addr = dma_map_single(base->dev, pool->base,
2878 SZ_1K * base->num_phy_chans,
2880 if (dma_mapping_error(base->dev, pool->dma_addr)) {
2886 writel(virt_to_phys(base->lcla_pool.base),
2887 base->virtbase + D40_DREG_LCLA);
2893 static int __init d40_probe(struct platform_device *pdev)
2897 struct d40_base *base;
2898 struct resource *res = NULL;
2899 int num_reserved_chans;
2902 base = d40_hw_detect_init(pdev);
2907 num_reserved_chans = d40_phy_res_init(base);
2909 platform_set_drvdata(pdev, base);
2911 spin_lock_init(&base->interrupt_lock);
2912 spin_lock_init(&base->execmd_lock);
2914 /* Get IO for logical channel parameter address */
2915 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2918 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2921 base->lcpa_size = resource_size(res);
2922 base->phy_lcpa = res->start;
2924 if (request_mem_region(res->start, resource_size(res),
2925 D40_NAME " I/O lcpa") == NULL) {
2928 "Failed to request LCPA region 0x%x-0x%x\n",
2929 res->start, res->end);
2933 /* We make use of ESRAM memory for this. */
2934 val = readl(base->virtbase + D40_DREG_LCPA);
2935 if (res->start != val && val != 0) {
2936 dev_warn(&pdev->dev,
2937 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2938 __func__, val, res->start);
2940 writel(res->start, base->virtbase + D40_DREG_LCPA);
2942 base->lcpa_base = ioremap(res->start, resource_size(res));
2943 if (!base->lcpa_base) {
2945 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2949 ret = d40_lcla_allocate(base);
2951 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2955 spin_lock_init(&base->lcla_pool.lock);
2957 base->irq = platform_get_irq(pdev, 0);
2959 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2961 d40_err(&pdev->dev, "No IRQ defined\n");
2965 err = d40_dmaengine_init(base, num_reserved_chans);
2971 dev_info(base->dev, "initialized\n");
2976 if (base->desc_slab)
2977 kmem_cache_destroy(base->desc_slab);
2979 iounmap(base->virtbase);
2981 if (base->lcla_pool.dma_addr)
2982 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2983 SZ_1K * base->num_phy_chans,
2986 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2987 free_pages((unsigned long)base->lcla_pool.base,
2988 base->lcla_pool.pages);
2990 kfree(base->lcla_pool.base_unaligned);
2993 release_mem_region(base->phy_lcpa,
2995 if (base->phy_start)
2996 release_mem_region(base->phy_start,
2999 clk_disable(base->clk);
3003 kfree(base->lcla_pool.alloc_map);
3004 kfree(base->lookup_log_chans);
3005 kfree(base->lookup_phy_chans);
3006 kfree(base->phy_res);
3010 d40_err(&pdev->dev, "probe failed\n");
3014 static struct platform_driver d40_driver = {
3016 .owner = THIS_MODULE,
3021 static int __init stedma40_init(void)
3023 return platform_driver_probe(&d40_driver, d40_probe);
3025 subsys_initcall(stedma40_init);