1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 #define pr_fmt(fmt) "udma: " fmt
11 #include <asm/cache.h>
13 #include <asm/bitops.h>
15 #include <linux/bitops.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/sizes.h>
19 #include <dm/device_compat.h>
20 #include <dm/devres.h>
22 #include <dm/of_access.h>
24 #include <dma-uclass.h>
25 #include <linux/delay.h>
26 #include <linux/bitmap.h>
27 #include <linux/err.h>
28 #include <linux/printk.h>
29 #include <linux/soc/ti/k3-navss-ringacc.h>
30 #include <linux/soc/ti/cppi5.h>
31 #include <linux/soc/ti/ti-udma.h>
32 #include <linux/soc/ti/ti_sci_protocol.h>
33 #include <linux/soc/ti/cppi5.h>
35 #include "k3-udma-hwdef.h"
36 #include "k3-psil-priv.h"
38 #define K3_UDMA_MAX_RFLOWS 1024
59 static const char * const mmr_names[] = {
61 [MMR_BCHANRT] = "bchanrt",
62 [MMR_RCHANRT] = "rchanrt",
63 [MMR_TCHANRT] = "tchanrt",
64 [MMR_RCHAN] = "rchan",
65 [MMR_TCHAN] = "tchan",
66 [MMR_RFLOW] = "rflow",
70 void __iomem *reg_chan;
74 struct k3_nav_ring *t_ring; /* Transmit ring */
75 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
76 int tflow_id; /* applicable only for PKTDMA */
80 #define udma_bchan udma_tchan
83 void __iomem *reg_rflow;
85 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
86 struct k3_nav_ring *r_ring; /* Receive ring */
90 void __iomem *reg_chan;
96 struct udma_oes_offsets {
97 /* K3 UDMA Output Event Offset */
100 /* BCDMA Output Event Offsets */
101 u32 bcdma_bchan_data;
102 u32 bcdma_bchan_ring;
103 u32 bcdma_tchan_data;
104 u32 bcdma_tchan_ring;
105 u32 bcdma_rchan_data;
106 u32 bcdma_rchan_ring;
108 /* PKTDMA Output Event Offsets */
109 u32 pktdma_tchan_flow;
110 u32 pktdma_rchan_flow;
113 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
114 #define UDMA_FLAG_PDMA_BURST BIT(1)
115 #define UDMA_FLAG_TDTYPE BIT(2)
117 struct udma_match_data {
118 enum k3_dma_type type;
120 bool enable_memcpy_support;
123 struct udma_oes_offsets oes;
126 u32 level_start_idx[];
138 struct udma_tisci_rm {
139 const struct ti_sci_handle *tisci;
140 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
143 /* tisci information for PSI-L thread pairing/unpairing */
144 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
145 u32 tisci_navss_dev_id;
147 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
152 void __iomem *mmrs[MMR_LAST];
154 struct udma_tisci_rm tisci_rm;
155 struct k3_nav_ringacc *ringacc;
165 unsigned long *bchan_map;
166 unsigned long *tchan_map;
167 unsigned long *rchan_map;
168 unsigned long *rflow_map;
169 unsigned long *rflow_map_reserved;
170 unsigned long *tflow_map;
172 struct udma_bchan *bchans;
173 struct udma_tchan *tchans;
174 struct udma_rchan *rchans;
175 struct udma_rflow *rflows;
177 struct udma_match_data *match_data;
179 struct udma_chan *channels;
185 struct udma_chan_config {
186 u32 psd_size; /* size of Protocol Specific Data */
187 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
188 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
189 int remote_thread_id;
193 enum psil_endpoint_type ep_type;
194 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
196 /* PKTDMA mapped channel */
197 int mapped_channel_id;
198 /* PKTDMA default tflow or rflow for mapped channel */
201 enum dma_direction dir;
203 unsigned int pkt_mode:1; /* TR or packet */
204 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
205 unsigned int enable_acc32:1;
206 unsigned int enable_burst:1;
207 unsigned int notdpkt:1; /* Suppress sending TDC packet */
214 struct udma_bchan *bchan;
215 struct udma_tchan *tchan;
216 struct udma_rchan *rchan;
217 struct udma_rflow *rflow;
219 struct ti_udma_drv_chan_cfg_data cfg_data;
221 u32 bcnt; /* number of bytes completed since the start of the channel */
223 struct udma_chan_config config;
227 struct cppi5_host_desc_t *desc_tx;
235 #define UDMA_CH_1000(ch) (ch * 0x1000)
236 #define UDMA_CH_100(ch) (ch * 0x100)
237 #define UDMA_CH_40(ch) (ch * 0x40)
240 #define UDMA_RX_DESC_NUM PKTBUFSRX
242 #define UDMA_RX_DESC_NUM 4
245 /* Generic register access functions */
246 static inline u32 udma_read(void __iomem *base, int reg)
250 v = __raw_readl(base + reg);
251 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
255 static inline void udma_write(void __iomem *base, int reg, u32 val)
257 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
258 __raw_writel(val, base + reg);
261 static inline void udma_update_bits(void __iomem *base, int reg,
266 orig = udma_read(base, reg);
271 udma_write(base, reg, tmp);
275 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
279 return udma_read(tchan->reg_rt, reg);
282 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
287 udma_write(tchan->reg_rt, reg, val);
291 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
295 return udma_read(rchan->reg_rt, reg);
298 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
303 udma_write(rchan->reg_rt, reg, val);
306 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
309 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
311 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
313 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
314 tisci_rm->tisci_navss_dev_id,
315 src_thread, dst_thread);
318 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
321 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
323 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
325 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
326 tisci_rm->tisci_navss_dev_id,
327 src_thread, dst_thread);
330 static inline char *udma_get_dir_text(enum dma_direction dir)
348 #include "k3-udma-u-boot.c"
350 static void udma_reset_uchan(struct udma_chan *uc)
352 memset(&uc->config, 0, sizeof(uc->config));
353 uc->config.remote_thread_id = -1;
354 uc->config.mapped_channel_id = -1;
355 uc->config.default_flow_id = -1;
358 static inline bool udma_is_chan_running(struct udma_chan *uc)
363 switch (uc->config.dir) {
365 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
366 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
368 udma_rchanrt_read(uc->rchan,
369 UDMA_RCHAN_RT_PEER_RT_EN_REG));
372 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
373 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
375 udma_tchanrt_read(uc->tchan,
376 UDMA_TCHAN_RT_PEER_RT_EN_REG));
379 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
380 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
386 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
392 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
394 struct k3_nav_ring *ring = NULL;
397 switch (uc->config.dir) {
399 ring = uc->rflow->r_ring;
402 ring = uc->tchan->tc_ring;
405 ring = uc->tchan->tc_ring;
411 if (ring && k3_nav_ringacc_ring_get_occ(ring))
412 ret = k3_nav_ringacc_ring_pop(ring, addr);
417 static void udma_reset_rings(struct udma_chan *uc)
419 struct k3_nav_ring *ring1 = NULL;
420 struct k3_nav_ring *ring2 = NULL;
422 switch (uc->config.dir) {
424 ring1 = uc->rflow->fd_ring;
425 ring2 = uc->rflow->r_ring;
428 ring1 = uc->tchan->t_ring;
429 ring2 = uc->tchan->tc_ring;
432 ring1 = uc->tchan->t_ring;
433 ring2 = uc->tchan->tc_ring;
440 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
442 k3_nav_ringacc_ring_reset(ring2);
445 static void udma_reset_counters(struct udma_chan *uc)
450 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
451 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
453 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
454 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
456 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
457 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
460 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
461 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
466 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
467 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
469 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
470 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
472 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
473 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
475 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
476 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
482 static inline int udma_stop_hard(struct udma_chan *uc)
484 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
486 switch (uc->config.dir) {
488 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
489 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
492 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
493 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
496 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
497 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
506 static int udma_start(struct udma_chan *uc)
508 /* Channel is already running, no need to proceed further */
509 if (udma_is_chan_running(uc))
512 pr_debug("%s: chan:%d dir:%s\n",
513 __func__, uc->id, udma_get_dir_text(uc->config.dir));
515 /* Make sure that we clear the teardown bit, if it is set */
518 /* Reset all counters */
519 udma_reset_counters(uc);
521 switch (uc->config.dir) {
523 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
524 UDMA_CHAN_RT_CTL_EN);
527 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
528 UDMA_PEER_RT_EN_ENABLE);
530 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
532 udma_rchanrt_read(uc->rchan,
533 UDMA_RCHAN_RT_CTL_REG),
534 udma_rchanrt_read(uc->rchan,
535 UDMA_RCHAN_RT_PEER_RT_EN_REG));
539 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
540 UDMA_PEER_RT_EN_ENABLE);
542 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
543 UDMA_CHAN_RT_CTL_EN);
545 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
547 udma_tchanrt_read(uc->tchan,
548 UDMA_TCHAN_RT_CTL_REG),
549 udma_tchanrt_read(uc->tchan,
550 UDMA_TCHAN_RT_PEER_RT_EN_REG));
553 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
554 UDMA_CHAN_RT_CTL_EN);
555 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
556 UDMA_CHAN_RT_CTL_EN);
563 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
568 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
573 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
574 UDMA_CHAN_RT_CTL_EN |
575 UDMA_CHAN_RT_CTL_TDOWN);
577 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
579 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
580 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
583 printf(" %s TIMEOUT !\n", __func__);
589 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
590 if (val & UDMA_PEER_RT_EN_ENABLE)
591 printf("%s: peer not stopped TIMEOUT !\n", __func__);
594 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
599 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
600 UDMA_PEER_RT_EN_ENABLE |
601 UDMA_PEER_RT_EN_TEARDOWN);
603 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
605 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
606 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
609 printf("%s TIMEOUT !\n", __func__);
615 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
616 if (val & UDMA_PEER_RT_EN_ENABLE)
617 printf("%s: peer not stopped TIMEOUT !\n", __func__);
620 static inline int udma_stop(struct udma_chan *uc)
622 pr_debug("%s: chan:%d dir:%s\n",
623 __func__, uc->id, udma_get_dir_text(uc->config.dir));
625 udma_reset_counters(uc);
626 switch (uc->config.dir) {
628 udma_stop_dev2mem(uc, true);
631 udma_stop_mem2dev(uc, true);
634 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
635 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
644 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
648 while (udma_pop_from_ring(uc, paddr)) {
656 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
658 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
661 if (test_bit(id, ud->rflow_map)) {
662 dev_err(ud->dev, "rflow%d is in use\n", id);
663 return ERR_PTR(-ENOENT);
666 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
669 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
670 if (id >= ud->rflow_cnt)
671 return ERR_PTR(-ENOENT);
674 __set_bit(id, ud->rflow_map);
675 return &ud->rflows[id];
678 #define UDMA_RESERVE_RESOURCE(res) \
679 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
683 if (test_bit(id, ud->res##_map)) { \
684 dev_err(ud->dev, "res##%d is in use\n", id); \
685 return ERR_PTR(-ENOENT); \
688 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
689 if (id == ud->res##_cnt) { \
690 return ERR_PTR(-ENOENT); \
694 __set_bit(id, ud->res##_map); \
695 return &ud->res##s[id]; \
698 UDMA_RESERVE_RESOURCE(tchan);
699 UDMA_RESERVE_RESOURCE(rchan);
701 static int udma_get_tchan(struct udma_chan *uc)
703 struct udma_dev *ud = uc->ud;
706 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
707 uc->id, uc->tchan->id);
711 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
712 if (IS_ERR(uc->tchan))
713 return PTR_ERR(uc->tchan);
718 /* Only PKTDMA have support for tx flows */
719 if (uc->config.default_flow_id >= 0)
720 tflow_id = uc->config.default_flow_id;
722 tflow_id = uc->tchan->id;
724 if (test_bit(tflow_id, ud->tflow_map)) {
725 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
726 __clear_bit(uc->tchan->id, ud->tchan_map);
731 uc->tchan->tflow_id = tflow_id;
732 __set_bit(tflow_id, ud->tflow_map);
734 uc->tchan->tflow_id = -1;
737 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
742 static int udma_get_rchan(struct udma_chan *uc)
744 struct udma_dev *ud = uc->ud;
747 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
748 uc->id, uc->rchan->id);
752 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
753 if (IS_ERR(uc->rchan))
754 return PTR_ERR(uc->rchan);
756 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
761 static int udma_get_chan_pair(struct udma_chan *uc)
763 struct udma_dev *ud = uc->ud;
766 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
767 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
768 uc->id, uc->tchan->id);
773 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
774 uc->id, uc->tchan->id);
776 } else if (uc->rchan) {
777 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
778 uc->id, uc->rchan->id);
782 /* Can be optimized, but let's have it like this for now */
783 end = min(ud->tchan_cnt, ud->rchan_cnt);
784 for (chan_id = 0; chan_id < end; chan_id++) {
785 if (!test_bit(chan_id, ud->tchan_map) &&
786 !test_bit(chan_id, ud->rchan_map))
793 __set_bit(chan_id, ud->tchan_map);
794 __set_bit(chan_id, ud->rchan_map);
795 uc->tchan = &ud->tchans[chan_id];
796 uc->rchan = &ud->rchans[chan_id];
798 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
803 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
805 struct udma_dev *ud = uc->ud;
808 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
809 uc->id, uc->rflow->id);
814 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
816 uc->rflow = __udma_reserve_rflow(ud, flow_id);
817 if (IS_ERR(uc->rflow))
818 return PTR_ERR(uc->rflow);
820 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
824 static void udma_put_rchan(struct udma_chan *uc)
826 struct udma_dev *ud = uc->ud;
829 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
831 __clear_bit(uc->rchan->id, ud->rchan_map);
836 static void udma_put_tchan(struct udma_chan *uc)
838 struct udma_dev *ud = uc->ud;
841 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
843 __clear_bit(uc->tchan->id, ud->tchan_map);
844 if (uc->tchan->tflow_id >= 0)
845 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
850 static void udma_put_rflow(struct udma_chan *uc)
852 struct udma_dev *ud = uc->ud;
855 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
857 __clear_bit(uc->rflow->id, ud->rflow_map);
862 static void udma_free_tx_resources(struct udma_chan *uc)
867 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
868 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
869 uc->tchan->t_ring = NULL;
870 uc->tchan->tc_ring = NULL;
875 static int udma_alloc_tx_resources(struct udma_chan *uc)
877 struct k3_nav_ring_cfg ring_cfg;
878 struct udma_dev *ud = uc->ud;
881 ret = udma_get_tchan(uc);
885 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
887 &uc->tchan->tc_ring);
893 memset(&ring_cfg, 0, sizeof(ring_cfg));
895 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
896 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
898 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
899 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
907 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
908 uc->tchan->tc_ring = NULL;
909 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
910 uc->tchan->t_ring = NULL;
917 static void udma_free_rx_resources(struct udma_chan *uc)
923 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
924 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
925 uc->rflow->fd_ring = NULL;
926 uc->rflow->r_ring = NULL;
934 static int udma_alloc_rx_resources(struct udma_chan *uc)
936 struct k3_nav_ring_cfg ring_cfg;
937 struct udma_dev *ud = uc->ud;
938 struct udma_rflow *rflow;
942 ret = udma_get_rchan(uc);
946 /* For MEM_TO_MEM we don't need rflow or rings */
947 if (uc->config.dir == DMA_MEM_TO_MEM)
950 if (uc->config.default_flow_id >= 0)
951 ret = udma_get_rflow(uc, uc->config.default_flow_id);
953 ret = udma_get_rflow(uc, uc->rchan->id);
962 fd_ring_id = ud->tflow_cnt + rflow->id;
964 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
968 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
969 &rflow->fd_ring, &rflow->r_ring);
975 memset(&ring_cfg, 0, sizeof(ring_cfg));
977 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
978 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
980 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
981 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
988 k3_nav_ringacc_ring_free(rflow->r_ring);
989 rflow->r_ring = NULL;
990 k3_nav_ringacc_ring_free(rflow->fd_ring);
991 rflow->fd_ring = NULL;
1000 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1002 struct udma_dev *ud = uc->ud;
1003 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1004 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
1005 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1009 if (uc->config.pkt_mode)
1010 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1012 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1014 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1015 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1016 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
1017 req.nav_id = tisci_rm->tisci_dev_id;
1018 req.index = uc->tchan->id;
1019 req.tx_chan_type = mode;
1020 if (uc->config.dir == DMA_MEM_TO_MEM)
1021 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1023 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1024 uc->config.psd_size,
1026 req.txcq_qnum = tc_ring;
1028 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
1030 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
1035 * Above TI SCI call handles firewall configuration, cfg
1036 * register configuration still has to be done locally in
1037 * absence of RM services.
1039 if (IS_ENABLED(CONFIG_K3_DM_FW))
1040 udma_alloc_tchan_raw(uc);
1045 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1047 struct udma_dev *ud = uc->ud;
1048 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1049 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
1050 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1051 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1052 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1053 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1057 if (uc->config.pkt_mode)
1058 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1060 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1062 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1063 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
1064 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
1065 req.nav_id = tisci_rm->tisci_dev_id;
1066 req.index = uc->rchan->id;
1067 req.rx_chan_type = mode;
1068 if (uc->config.dir == DMA_MEM_TO_MEM) {
1069 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1070 req.rxcq_qnum = tc_ring;
1072 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1073 uc->config.psd_size,
1075 req.rxcq_qnum = rx_ring;
1077 if (ud->match_data->type == DMA_TYPE_UDMA &&
1078 uc->rflow->id != uc->rchan->id &&
1079 uc->config.dir != DMA_MEM_TO_MEM) {
1080 req.flowid_start = uc->rflow->id;
1082 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1083 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
1086 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
1088 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1089 uc->rchan->id, ret);
1092 if (uc->config.dir == DMA_MEM_TO_MEM)
1095 flow_req.valid_params =
1096 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1097 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1098 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1099 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1100 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1101 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1102 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1103 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1104 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1105 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1106 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1107 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1108 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1109 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1111 flow_req.nav_id = tisci_rm->tisci_dev_id;
1112 flow_req.flow_index = uc->rflow->id;
1114 if (uc->config.needs_epib)
1115 flow_req.rx_einfo_present = 1;
1117 flow_req.rx_einfo_present = 0;
1119 if (uc->config.psd_size)
1120 flow_req.rx_psinfo_present = 1;
1122 flow_req.rx_psinfo_present = 0;
1124 flow_req.rx_error_handling = 0;
1125 flow_req.rx_desc_type = 0;
1126 flow_req.rx_dest_qnum = rx_ring;
1127 flow_req.rx_src_tag_hi_sel = 2;
1128 flow_req.rx_src_tag_lo_sel = 4;
1129 flow_req.rx_dest_tag_hi_sel = 5;
1130 flow_req.rx_dest_tag_lo_sel = 4;
1131 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1132 flow_req.rx_fdq1_qnum = fd_ring;
1133 flow_req.rx_fdq2_qnum = fd_ring;
1134 flow_req.rx_fdq3_qnum = fd_ring;
1135 flow_req.rx_ps_location = 0;
1137 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1140 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1141 uc->rchan->id, uc->rflow->id, ret);
1146 * Above TI SCI call handles firewall configuration, cfg
1147 * register configuration still has to be done locally in
1148 * absence of RM services.
1150 if (IS_ENABLED(CONFIG_K3_DM_FW))
1151 udma_alloc_rchan_raw(uc);
1156 static int udma_alloc_chan_resources(struct udma_chan *uc)
1158 struct udma_dev *ud = uc->ud;
1161 pr_debug("%s: chan:%d as %s\n",
1162 __func__, uc->id, udma_get_dir_text(uc->config.dir));
1164 switch (uc->config.dir) {
1165 case DMA_MEM_TO_MEM:
1166 /* Non synchronized - mem to mem type of transfer */
1167 uc->config.pkt_mode = false;
1168 ret = udma_get_chan_pair(uc);
1172 ret = udma_alloc_tx_resources(uc);
1176 ret = udma_alloc_rx_resources(uc);
1180 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1181 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1183 case DMA_MEM_TO_DEV:
1184 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1185 ret = udma_alloc_tx_resources(uc);
1189 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1190 uc->config.dst_thread = uc->config.remote_thread_id;
1191 uc->config.dst_thread |= 0x8000;
1194 case DMA_DEV_TO_MEM:
1195 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1196 ret = udma_alloc_rx_resources(uc);
1200 uc->config.src_thread = uc->config.remote_thread_id;
1201 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1205 /* Can not happen */
1206 pr_debug("%s: chan:%d invalid direction (%u)\n",
1207 __func__, uc->id, uc->config.dir);
1211 /* We have channel indexes and rings */
1212 if (uc->config.dir == DMA_MEM_TO_MEM) {
1213 ret = udma_alloc_tchan_sci_req(uc);
1217 ret = udma_alloc_rchan_sci_req(uc);
1221 /* Slave transfer */
1222 if (uc->config.dir == DMA_MEM_TO_DEV) {
1223 ret = udma_alloc_tchan_sci_req(uc);
1227 ret = udma_alloc_rchan_sci_req(uc);
1233 if (udma_is_chan_running(uc)) {
1234 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1236 if (udma_is_chan_running(uc)) {
1237 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1243 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1245 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1252 udma_free_tx_resources(uc);
1253 udma_free_rx_resources(uc);
1254 uc->config.remote_thread_id = -1;
1258 static void udma_free_chan_resources(struct udma_chan *uc)
1260 /* Hard reset UDMA channel */
1262 udma_reset_counters(uc);
1264 /* Release PSI-L pairing */
1265 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1267 /* Reset the rings for a new start */
1268 udma_reset_rings(uc);
1269 udma_free_tx_resources(uc);
1270 udma_free_rx_resources(uc);
1272 uc->config.remote_thread_id = -1;
1273 uc->config.dir = DMA_MEM_TO_MEM;
1276 static const char * const range_names[] = {
1277 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1278 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1279 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1280 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1281 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1284 static int udma_get_mmrs(struct udevice *dev)
1286 struct udma_dev *ud = dev_get_priv(dev);
1287 u32 cap2, cap3, cap4;
1290 ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]);
1291 if (!ud->mmrs[MMR_GCFG])
1294 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1295 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1297 switch (ud->match_data->type) {
1299 ud->rflow_cnt = cap3 & 0x3fff;
1300 ud->tchan_cnt = cap2 & 0x1ff;
1301 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1302 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1304 case DMA_TYPE_BCDMA:
1305 ud->bchan_cnt = cap2 & 0x1ff;
1306 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1307 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1309 case DMA_TYPE_PKTDMA:
1310 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1311 ud->tchan_cnt = cap2 & 0x1ff;
1312 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1313 ud->rflow_cnt = cap3 & 0x3fff;
1314 ud->tflow_cnt = cap4 & 0x3fff;
1320 for (i = 1; i < MMR_LAST; i++) {
1321 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1323 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1325 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1328 ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]);
1336 static int udma_setup_resources(struct udma_dev *ud)
1338 struct udevice *dev = ud->dev;
1340 struct ti_sci_resource_desc *rm_desc;
1341 struct ti_sci_resource *rm_res;
1342 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1344 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1345 sizeof(unsigned long), GFP_KERNEL);
1346 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1348 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1349 sizeof(unsigned long), GFP_KERNEL);
1350 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1352 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1353 sizeof(unsigned long), GFP_KERNEL);
1354 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1355 sizeof(unsigned long),
1357 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1360 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1361 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1366 * RX flows with the same Ids as RX channels are reserved to be used
1367 * as default flows if remote HW can't generate flow_ids. Those
1368 * RX flows can be requested only explicitly by id.
1370 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1372 /* Get resource ranges from tisci */
1373 for (i = 0; i < RM_RANGE_LAST; i++) {
1374 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1377 tisci_rm->rm_ranges[i] =
1378 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1379 tisci_rm->tisci_dev_id,
1380 (char *)range_names[i]);
1384 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1385 if (IS_ERR(rm_res)) {
1386 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1388 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1389 for (i = 0; i < rm_res->sets; i++) {
1390 rm_desc = &rm_res->desc[i];
1391 bitmap_clear(ud->tchan_map, rm_desc->start,
1396 /* rchan and matching default flow ranges */
1397 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1398 if (IS_ERR(rm_res)) {
1399 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1400 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1402 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1403 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1404 for (i = 0; i < rm_res->sets; i++) {
1405 rm_desc = &rm_res->desc[i];
1406 bitmap_clear(ud->rchan_map, rm_desc->start,
1408 bitmap_clear(ud->rflow_map, rm_desc->start,
1413 /* GP rflow ranges */
1414 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1415 if (IS_ERR(rm_res)) {
1416 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1417 ud->rflow_cnt - ud->rchan_cnt);
1419 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1420 ud->rflow_cnt - ud->rchan_cnt);
1421 for (i = 0; i < rm_res->sets; i++) {
1422 rm_desc = &rm_res->desc[i];
1423 bitmap_clear(ud->rflow_map, rm_desc->start,
1431 static int bcdma_setup_resources(struct udma_dev *ud)
1434 struct udevice *dev = ud->dev;
1435 struct ti_sci_resource_desc *rm_desc;
1436 struct ti_sci_resource *rm_res;
1437 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1439 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1440 sizeof(unsigned long), GFP_KERNEL);
1441 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1443 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1444 sizeof(unsigned long), GFP_KERNEL);
1445 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1447 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1448 sizeof(unsigned long), GFP_KERNEL);
1449 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1451 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1454 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
1455 !ud->bchans || !ud->tchans || !ud->rchans ||
1459 /* Get resource ranges from tisci */
1460 for (i = 0; i < RM_RANGE_LAST; i++) {
1461 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1464 tisci_rm->rm_ranges[i] =
1465 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1466 tisci_rm->tisci_dev_id,
1467 (char *)range_names[i]);
1471 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1472 if (IS_ERR(rm_res)) {
1473 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1475 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1476 for (i = 0; i < rm_res->sets; i++) {
1477 rm_desc = &rm_res->desc[i];
1478 bitmap_clear(ud->bchan_map, rm_desc->start,
1480 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1481 rm_desc->start, rm_desc->num);
1486 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1487 if (IS_ERR(rm_res)) {
1488 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1490 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1491 for (i = 0; i < rm_res->sets; i++) {
1492 rm_desc = &rm_res->desc[i];
1493 bitmap_clear(ud->tchan_map, rm_desc->start,
1495 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1496 rm_desc->start, rm_desc->num);
1501 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1502 if (IS_ERR(rm_res)) {
1503 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1505 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1506 for (i = 0; i < rm_res->sets; i++) {
1507 rm_desc = &rm_res->desc[i];
1508 bitmap_clear(ud->rchan_map, rm_desc->start,
1510 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1511 rm_desc->start, rm_desc->num);
1518 static int pktdma_setup_resources(struct udma_dev *ud)
1521 struct udevice *dev = ud->dev;
1522 struct ti_sci_resource *rm_res;
1523 struct ti_sci_resource_desc *rm_desc;
1524 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1526 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1527 sizeof(unsigned long), GFP_KERNEL);
1528 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1530 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1531 sizeof(unsigned long), GFP_KERNEL);
1532 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1534 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1535 sizeof(unsigned long),
1537 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1539 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1540 sizeof(unsigned long), GFP_KERNEL);
1542 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
1543 !ud->rchans || !ud->rflows || !ud->rflow_map)
1546 /* Get resource ranges from tisci */
1547 for (i = 0; i < RM_RANGE_LAST; i++) {
1548 if (i == RM_RANGE_BCHAN)
1551 tisci_rm->rm_ranges[i] =
1552 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1553 tisci_rm->tisci_dev_id,
1554 (char *)range_names[i]);
1558 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1559 if (IS_ERR(rm_res)) {
1560 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1562 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1563 for (i = 0; i < rm_res->sets; i++) {
1564 rm_desc = &rm_res->desc[i];
1565 bitmap_clear(ud->tchan_map, rm_desc->start,
1567 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1568 rm_desc->start, rm_desc->num);
1573 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1574 if (IS_ERR(rm_res)) {
1575 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1577 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1578 for (i = 0; i < rm_res->sets; i++) {
1579 rm_desc = &rm_res->desc[i];
1580 bitmap_clear(ud->rchan_map, rm_desc->start,
1582 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1583 rm_desc->start, rm_desc->num);
1588 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1589 if (IS_ERR(rm_res)) {
1590 /* all rflows are assigned exclusively to Linux */
1591 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
1593 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
1594 for (i = 0; i < rm_res->sets; i++) {
1595 rm_desc = &rm_res->desc[i];
1596 bitmap_clear(ud->rflow_map, rm_desc->start,
1598 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1599 rm_desc->start, rm_desc->num);
1604 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1605 if (IS_ERR(rm_res)) {
1606 /* all tflows are assigned exclusively to Linux */
1607 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1609 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1610 for (i = 0; i < rm_res->sets; i++) {
1611 rm_desc = &rm_res->desc[i];
1612 bitmap_clear(ud->tflow_map, rm_desc->start,
1614 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1615 rm_desc->start, rm_desc->num);
1622 static int setup_resources(struct udma_dev *ud)
1624 struct udevice *dev = ud->dev;
1627 switch (ud->match_data->type) {
1629 ret = udma_setup_resources(ud);
1631 case DMA_TYPE_BCDMA:
1632 ret = bcdma_setup_resources(ud);
1634 case DMA_TYPE_PKTDMA:
1635 ret = pktdma_setup_resources(ud);
1644 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1646 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
1647 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1648 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1652 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1657 switch (ud->match_data->type) {
1660 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1662 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1664 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1666 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1669 case DMA_TYPE_BCDMA:
1671 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1673 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1675 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1677 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1680 case DMA_TYPE_PKTDMA:
1682 "Channels: %d (tchan: %u, rchan: %u)\n",
1684 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1686 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1696 static int udma_probe(struct udevice *dev)
1698 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1699 struct udma_dev *ud = dev_get_priv(dev);
1701 struct udevice *tmp;
1702 struct udevice *tisci_dev = NULL;
1703 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1704 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1707 ud->match_data = (void *)dev_get_driver_data(dev);
1708 ret = udma_get_mmrs(dev);
1712 ud->psil_base = ud->match_data->psil_base;
1714 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1715 "ti,sci", &tisci_dev);
1717 debug("Failed to get TISCI phandle (%d)\n", ret);
1718 tisci_rm->tisci = NULL;
1721 tisci_rm->tisci = (struct ti_sci_handle *)
1722 (ti_sci_get_handle_from_sysfw(tisci_dev));
1724 tisci_rm->tisci_dev_id = -1;
1725 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1727 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1731 tisci_rm->tisci_navss_dev_id = -1;
1732 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1733 &tisci_rm->tisci_navss_dev_id);
1735 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1739 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1740 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1742 if (ud->match_data->type == DMA_TYPE_UDMA) {
1743 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1744 "ti,ringacc", &tmp);
1745 ud->ringacc = dev_get_priv(tmp);
1747 struct k3_ringacc_init_data ring_init_data;
1749 ring_init_data.tisci = ud->tisci_rm.tisci;
1750 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1751 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1752 ring_init_data.num_rings = ud->bchan_cnt +
1756 ring_init_data.num_rings = ud->rflow_cnt +
1760 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1762 if (IS_ERR(ud->ringacc))
1763 return PTR_ERR(ud->ringacc);
1766 ud->ch_count = setup_resources(ud);
1767 if (ud->ch_count <= 0)
1768 return ud->ch_count;
1770 for (i = 0; i < ud->bchan_cnt; i++) {
1771 struct udma_bchan *bchan = &ud->bchans[i];
1774 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1777 for (i = 0; i < ud->tchan_cnt; i++) {
1778 struct udma_tchan *tchan = &ud->tchans[i];
1781 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
1782 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1785 for (i = 0; i < ud->rchan_cnt; i++) {
1786 struct udma_rchan *rchan = &ud->rchans[i];
1789 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
1790 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1793 for (i = 0; i < ud->rflow_cnt; i++) {
1794 struct udma_rflow *rflow = &ud->rflows[i];
1797 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
1800 for (i = 0; i < ud->ch_count; i++) {
1801 struct udma_chan *uc = &ud->channels[i];
1805 uc->config.remote_thread_id = -1;
1809 uc->config.mapped_channel_id = -1;
1810 uc->config.default_flow_id = -1;
1811 uc->config.dir = DMA_MEM_TO_MEM;
1812 sprintf(uc->name, "UDMA chan%d\n", i);
1817 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1819 udma_read(ud->mmrs[MMR_GCFG], 0),
1820 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1821 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1822 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1823 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1825 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1830 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1834 memcpy(&addr, &elem, sizeof(elem));
1835 return k3_nav_ringacc_ring_push(ring, &addr);
1838 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1839 dma_addr_t src, size_t len)
1841 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1842 struct cppi5_tr_type15_t *tr_req;
1844 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1845 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1846 unsigned long dummy;
1855 unsigned long align_to = __ffs(src | dest);
1860 * Keep simple: tr0: SZ_64K-alignment blocks,
1861 * tr1: the remaining
1864 tr0_cnt0 = (SZ_64K - BIT(align_to));
1865 if (len / tr0_cnt0 >= SZ_64K) {
1866 dev_err(uc->ud->dev, "size %zu is not supported\n",
1871 tr0_cnt1 = len / tr0_cnt0;
1872 tr1_cnt0 = len % tr0_cnt0;
1875 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1876 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1879 memset(tr_desc, 0, desc_size);
1881 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1882 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1883 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1885 tr_req = tr_desc + tr_size;
1887 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1888 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1889 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1891 tr_req[0].addr = src;
1892 tr_req[0].icnt0 = tr0_cnt0;
1893 tr_req[0].icnt1 = tr0_cnt1;
1894 tr_req[0].icnt2 = 1;
1895 tr_req[0].icnt3 = 1;
1896 tr_req[0].dim1 = tr0_cnt0;
1898 tr_req[0].daddr = dest;
1899 tr_req[0].dicnt0 = tr0_cnt0;
1900 tr_req[0].dicnt1 = tr0_cnt1;
1901 tr_req[0].dicnt2 = 1;
1902 tr_req[0].dicnt3 = 1;
1903 tr_req[0].ddim1 = tr0_cnt0;
1906 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1907 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1908 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1910 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1911 tr_req[1].icnt0 = tr1_cnt0;
1912 tr_req[1].icnt1 = 1;
1913 tr_req[1].icnt2 = 1;
1914 tr_req[1].icnt3 = 1;
1916 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1917 tr_req[1].dicnt0 = tr1_cnt0;
1918 tr_req[1].dicnt1 = 1;
1919 tr_req[1].dicnt2 = 1;
1920 tr_req[1].dicnt3 = 1;
1923 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1925 flush_dcache_range((unsigned long)tr_desc,
1926 ALIGN((unsigned long)tr_desc + desc_size,
1927 ARCH_DMA_MINALIGN));
1929 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1934 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1935 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1936 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1938 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1939 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1940 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1942 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1945 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1946 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1947 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1948 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1949 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1950 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1951 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1952 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1953 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1955 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1956 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1958 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1959 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1960 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1961 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1962 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1963 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1964 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1966 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1968 struct udma_dev *ud = uc->ud;
1969 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1970 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1971 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1972 struct udma_bchan *bchan = uc->bchan;
1975 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1976 req_tx.nav_id = tisci_rm->tisci_dev_id;
1977 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1978 req_tx.index = bchan->id;
1980 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1982 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1987 static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1990 if (test_bit(id, ud->bchan_map)) {
1991 dev_err(ud->dev, "bchan%d is in use\n", id);
1992 return ERR_PTR(-ENOENT);
1995 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
1996 if (id == ud->bchan_cnt)
1997 return ERR_PTR(-ENOENT);
1999 __set_bit(id, ud->bchan_map);
2000 return &ud->bchans[id];
2003 static int bcdma_get_bchan(struct udma_chan *uc)
2005 struct udma_dev *ud = uc->ud;
2008 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2009 uc->id, uc->bchan->id);
2013 uc->bchan = __bcdma_reserve_bchan(ud, -1);
2014 if (IS_ERR(uc->bchan))
2015 return PTR_ERR(uc->bchan);
2017 uc->tchan = uc->bchan;
2022 static void bcdma_put_bchan(struct udma_chan *uc)
2024 struct udma_dev *ud = uc->ud;
2027 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2029 __clear_bit(uc->bchan->id, ud->bchan_map);
2035 static void bcdma_free_bchan_resources(struct udma_chan *uc)
2040 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2041 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2042 uc->bchan->tc_ring = NULL;
2043 uc->bchan->t_ring = NULL;
2045 bcdma_put_bchan(uc);
2048 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2050 struct k3_nav_ring_cfg ring_cfg;
2051 struct udma_dev *ud = uc->ud;
2054 ret = bcdma_get_bchan(uc);
2058 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2060 &uc->bchan->tc_ring);
2066 memset(&ring_cfg, 0, sizeof(ring_cfg));
2068 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2069 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2071 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2078 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2079 uc->bchan->tc_ring = NULL;
2080 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2081 uc->bchan->t_ring = NULL;
2083 bcdma_put_bchan(uc);
2088 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2090 struct udma_dev *ud = uc->ud;
2091 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2092 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2093 struct udma_tchan *tchan = uc->tchan;
2094 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2097 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2098 req_tx.nav_id = tisci_rm->tisci_dev_id;
2099 req_tx.index = tchan->id;
2100 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2101 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2102 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2103 /* wait for peer to complete the teardown for PDMAs */
2104 req_tx.valid_params |=
2105 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2106 req_tx.tx_tdtype = 1;
2109 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2111 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2116 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2118 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2120 struct udma_dev *ud = uc->ud;
2121 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2122 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2123 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2124 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2127 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2128 req_rx.nav_id = tisci_rm->tisci_dev_id;
2129 req_rx.index = uc->rchan->id;
2131 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2133 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2137 flow_req.valid_params =
2138 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2139 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2140 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2142 flow_req.nav_id = tisci_rm->tisci_dev_id;
2143 flow_req.flow_index = uc->rflow->id;
2145 if (uc->config.needs_epib)
2146 flow_req.rx_einfo_present = 1;
2148 flow_req.rx_einfo_present = 0;
2149 if (uc->config.psd_size)
2150 flow_req.rx_psinfo_present = 1;
2152 flow_req.rx_psinfo_present = 0;
2153 flow_req.rx_error_handling = 0;
2155 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2158 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2164 static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2168 uc->config.pkt_mode = false;
2170 switch (uc->config.dir) {
2171 case DMA_MEM_TO_MEM:
2172 /* Non synchronized - mem to mem type of transfer */
2173 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2176 ret = bcdma_alloc_bchan_resources(uc);
2180 ret = bcdma_tisci_m2m_channel_config(uc);
2183 /* Can not happen */
2184 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2185 __func__, uc->id, uc->config.dir);
2189 /* check if the channel configuration was successful */
2193 if (udma_is_chan_running(uc)) {
2194 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2196 if (udma_is_chan_running(uc)) {
2197 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2202 udma_reset_rings(uc);
2207 bcdma_free_bchan_resources(uc);
2208 udma_free_tx_resources(uc);
2209 udma_free_rx_resources(uc);
2211 udma_reset_uchan(uc);
2216 static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2218 struct udma_dev *ud = uc->ud;
2221 switch (uc->config.dir) {
2222 case DMA_MEM_TO_DEV:
2223 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2224 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2227 ret = udma_alloc_tx_resources(uc);
2229 uc->config.remote_thread_id = -1;
2233 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2234 uc->config.dst_thread = uc->config.remote_thread_id;
2235 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2237 ret = pktdma_tisci_tx_channel_config(uc);
2239 case DMA_DEV_TO_MEM:
2240 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2241 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2244 ret = udma_alloc_rx_resources(uc);
2246 uc->config.remote_thread_id = -1;
2250 uc->config.src_thread = uc->config.remote_thread_id;
2251 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2252 K3_PSIL_DST_THREAD_ID_OFFSET;
2254 ret = pktdma_tisci_rx_channel_config(uc);
2257 /* Can not happen */
2258 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2259 __func__, uc->id, uc->config.dir);
2263 /* check if the channel configuration was successful */
2268 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2270 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2271 uc->config.src_thread, uc->config.dst_thread);
2275 if (udma_is_chan_running(uc)) {
2276 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2278 if (udma_is_chan_running(uc)) {
2279 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2284 udma_reset_rings(uc);
2288 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2289 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2290 uc->config.remote_thread_id);
2293 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2294 uc->id, uc->rchan->id, uc->rflow->id,
2295 uc->config.remote_thread_id);
2299 udma_free_tx_resources(uc);
2300 udma_free_rx_resources(uc);
2302 udma_reset_uchan(uc);
2307 static int udma_transfer(struct udevice *dev, int direction,
2308 dma_addr_t dst, dma_addr_t src, size_t len)
2310 struct udma_dev *ud = dev_get_priv(dev);
2311 /* Channel0 is reserved for memcpy */
2312 struct udma_chan *uc = &ud->channels[0];
2313 dma_addr_t paddr = 0;
2316 switch (ud->match_data->type) {
2318 ret = udma_alloc_chan_resources(uc);
2320 case DMA_TYPE_BCDMA:
2321 ret = bcdma_alloc_chan_resources(uc);
2329 udma_prep_dma_memcpy(uc, dst, src, len);
2331 udma_poll_completion(uc, &paddr);
2334 switch (ud->match_data->type) {
2336 udma_free_chan_resources(uc);
2338 case DMA_TYPE_BCDMA:
2339 bcdma_free_bchan_resources(uc);
2348 static int udma_request(struct dma *dma)
2350 struct udma_dev *ud = dev_get_priv(dma->dev);
2351 struct udma_chan_config *ucc;
2352 struct udma_chan *uc;
2353 unsigned long dummy;
2356 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2357 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2361 uc = &ud->channels[dma->id];
2363 switch (ud->match_data->type) {
2365 ret = udma_alloc_chan_resources(uc);
2367 case DMA_TYPE_BCDMA:
2368 ret = bcdma_alloc_chan_resources(uc);
2370 case DMA_TYPE_PKTDMA:
2371 ret = pktdma_alloc_chan_resources(uc);
2377 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2381 if (uc->config.dir == DMA_MEM_TO_DEV) {
2382 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2383 memset(uc->desc_tx, 0, ucc->hdesc_size);
2385 uc->desc_rx = dma_alloc_coherent(
2386 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2387 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
2391 uc->desc_rx_cur = 0;
2392 uc->num_rx_bufs = 0;
2394 if (uc->config.dir == DMA_DEV_TO_MEM) {
2395 uc->cfg_data.flow_id_base = uc->rflow->id;
2396 uc->cfg_data.flow_id_cnt = 1;
2402 static int udma_rfree(struct dma *dma)
2404 struct udma_dev *ud = dev_get_priv(dma->dev);
2405 struct udma_chan *uc;
2407 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2408 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2411 uc = &ud->channels[dma->id];
2413 if (udma_is_chan_running(uc))
2416 udma_navss_psil_unpair(ud, uc->config.src_thread,
2417 uc->config.dst_thread);
2419 bcdma_free_bchan_resources(uc);
2420 udma_free_tx_resources(uc);
2421 udma_free_rx_resources(uc);
2422 udma_reset_uchan(uc);
2429 static int udma_enable(struct dma *dma)
2431 struct udma_dev *ud = dev_get_priv(dma->dev);
2432 struct udma_chan *uc;
2435 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2436 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2439 uc = &ud->channels[dma->id];
2441 ret = udma_start(uc);
2446 static int udma_disable(struct dma *dma)
2448 struct udma_dev *ud = dev_get_priv(dma->dev);
2449 struct udma_chan *uc;
2452 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2453 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2456 uc = &ud->channels[dma->id];
2458 if (udma_is_chan_running(uc))
2459 ret = udma_stop(uc);
2461 dev_err(dma->dev, "%s not running\n", __func__);
2466 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2468 struct udma_dev *ud = dev_get_priv(dma->dev);
2469 struct cppi5_host_desc_t *desc_tx;
2470 dma_addr_t dma_src = (dma_addr_t)src;
2471 struct ti_udma_drv_packet_data packet_data = { 0 };
2473 struct udma_chan *uc;
2478 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2480 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2481 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2484 uc = &ud->channels[dma->id];
2486 if (uc->config.dir != DMA_MEM_TO_DEV)
2489 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2491 desc_tx = uc->desc_tx;
2493 cppi5_hdesc_reset_hbdesc(desc_tx);
2495 cppi5_hdesc_init(desc_tx,
2496 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2497 uc->config.psd_size);
2498 cppi5_hdesc_set_pktlen(desc_tx, len);
2499 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2500 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2501 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2502 /* pass below information from caller */
2503 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2504 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2506 flush_dcache_range((unsigned long)dma_src,
2507 ALIGN((unsigned long)dma_src + len,
2508 ARCH_DMA_MINALIGN));
2509 flush_dcache_range((unsigned long)desc_tx,
2510 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
2511 ARCH_DMA_MINALIGN));
2513 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
2515 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2520 udma_poll_completion(uc, &paddr);
2525 static int udma_receive(struct dma *dma, void **dst, void *metadata)
2527 struct udma_dev *ud = dev_get_priv(dma->dev);
2528 struct udma_chan_config *ucc;
2529 struct cppi5_host_desc_t *desc_rx;
2531 struct udma_chan *uc;
2532 u32 buf_dma_len, pkt_len;
2536 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2537 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2540 uc = &ud->channels[dma->id];
2543 if (uc->config.dir != DMA_DEV_TO_MEM)
2545 if (!uc->num_rx_bufs)
2548 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
2549 if (ret && ret != -ENODATA) {
2550 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2552 } else if (ret == -ENODATA) {
2556 /* invalidate cache data */
2557 invalidate_dcache_range((ulong)desc_rx,
2558 (ulong)(desc_rx + ucc->hdesc_size));
2560 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2561 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2563 /* invalidate cache data */
2564 invalidate_dcache_range((ulong)buf_dma,
2565 (ulong)(buf_dma + buf_dma_len));
2567 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2569 *dst = (void *)buf_dma;
2575 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2577 struct udma_chan_config *ucc;
2578 struct udma_dev *ud = dev_get_priv(dma->dev);
2579 struct udma_chan *uc = &ud->channels[0];
2580 struct psil_endpoint_config *ep_config;
2583 for (val = 0; val < ud->ch_count; val++) {
2584 uc = &ud->channels[val];
2589 if (val == ud->ch_count)
2593 ucc->remote_thread_id = args->args[0];
2594 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2595 ucc->dir = DMA_MEM_TO_DEV;
2597 ucc->dir = DMA_DEV_TO_MEM;
2599 ep_config = psil_get_ep_config(ucc->remote_thread_id);
2600 if (IS_ERR(ep_config)) {
2601 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2602 uc->config.remote_thread_id);
2603 ucc->dir = DMA_MEM_TO_MEM;
2604 ucc->remote_thread_id = -1;
2608 ucc->pkt_mode = ep_config->pkt_mode;
2609 ucc->channel_tpl = ep_config->channel_tpl;
2610 ucc->notdpkt = ep_config->notdpkt;
2611 ucc->ep_type = ep_config->ep_type;
2613 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2614 ep_config->mapped_channel_id >= 0) {
2615 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2616 ucc->default_flow_id = ep_config->default_flow_id;
2618 ucc->mapped_channel_id = -1;
2619 ucc->default_flow_id = -1;
2622 ucc->needs_epib = ep_config->needs_epib;
2623 ucc->psd_size = ep_config->psd_size;
2624 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2626 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2628 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
2631 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
2632 dma->id, ucc->needs_epib,
2633 ucc->psd_size, ucc->metadata_size,
2634 ucc->remote_thread_id);
2639 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2641 struct udma_dev *ud = dev_get_priv(dma->dev);
2642 struct cppi5_host_desc_t *desc_rx;
2644 struct udma_chan *uc;
2647 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2648 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2651 uc = &ud->channels[dma->id];
2653 if (uc->config.dir != DMA_DEV_TO_MEM)
2656 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2659 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
2660 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
2661 dma_dst = (dma_addr_t)dst;
2663 cppi5_hdesc_reset_hbdesc(desc_rx);
2665 cppi5_hdesc_init(desc_rx,
2666 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2667 uc->config.psd_size);
2668 cppi5_hdesc_set_pktlen(desc_rx, size);
2669 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2671 flush_dcache_range((unsigned long)desc_rx,
2672 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
2673 ARCH_DMA_MINALIGN));
2675 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
2683 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2685 struct udma_dev *ud = dev_get_priv(dma->dev);
2686 struct udma_chan *uc;
2688 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2689 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2694 case TI_UDMA_CHAN_PRIV_INFO:
2695 uc = &ud->channels[dma->id];
2696 *data = &uc->cfg_data;
2703 static const struct dma_ops udma_ops = {
2704 .transfer = udma_transfer,
2705 .of_xlate = udma_of_xlate,
2706 .request = udma_request,
2707 .rfree = udma_rfree,
2708 .enable = udma_enable,
2709 .disable = udma_disable,
2711 .receive = udma_receive,
2712 .prepare_rcv_buf = udma_prepare_rcv_buf,
2713 .get_cfg = udma_get_cfg,
2716 static struct udma_match_data am654_main_data = {
2717 .type = DMA_TYPE_UDMA,
2718 .psil_base = 0x1000,
2719 .enable_memcpy_support = true,
2720 .statictr_z_mask = GENMASK(11, 0),
2722 .udma_rchan = 0x200,
2725 .level_start_idx = {
2726 [0] = 8, /* Normal channels */
2727 [1] = 0, /* High Throughput channels */
2731 static struct udma_match_data am654_mcu_data = {
2732 .type = DMA_TYPE_UDMA,
2733 .psil_base = 0x6000,
2734 .enable_memcpy_support = true,
2735 .statictr_z_mask = GENMASK(11, 0),
2737 .udma_rchan = 0x200,
2740 .level_start_idx = {
2741 [0] = 2, /* Normal channels */
2742 [1] = 0, /* High Throughput channels */
2746 static struct udma_match_data j721e_main_data = {
2747 .type = DMA_TYPE_UDMA,
2748 .psil_base = 0x1000,
2749 .enable_memcpy_support = true,
2750 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2751 .statictr_z_mask = GENMASK(23, 0),
2753 .udma_rchan = 0x400,
2756 .level_start_idx = {
2757 [0] = 16, /* Normal channels */
2758 [1] = 4, /* High Throughput channels */
2759 [2] = 0, /* Ultra High Throughput channels */
2763 static struct udma_match_data j721e_mcu_data = {
2764 .type = DMA_TYPE_UDMA,
2765 .psil_base = 0x6000,
2766 .enable_memcpy_support = true,
2767 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2768 .statictr_z_mask = GENMASK(23, 0),
2770 .udma_rchan = 0x400,
2773 .level_start_idx = {
2774 [0] = 2, /* Normal channels */
2775 [1] = 0, /* High Throughput channels */
2779 static struct udma_match_data am64_bcdma_data = {
2780 .type = DMA_TYPE_BCDMA,
2781 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2782 .enable_memcpy_support = true, /* Supported via bchan */
2783 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2784 .statictr_z_mask = GENMASK(23, 0),
2786 .bcdma_bchan_data = 0x2200,
2787 .bcdma_bchan_ring = 0x2400,
2788 .bcdma_tchan_data = 0x2800,
2789 .bcdma_tchan_ring = 0x2a00,
2790 .bcdma_rchan_data = 0x2e00,
2791 .bcdma_rchan_ring = 0x3000,
2793 /* No throughput levels */
2796 static struct udma_match_data am64_pktdma_data = {
2797 .type = DMA_TYPE_PKTDMA,
2798 .psil_base = 0x1000,
2799 .enable_memcpy_support = false,
2800 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2801 .statictr_z_mask = GENMASK(23, 0),
2803 .pktdma_tchan_flow = 0x1200,
2804 .pktdma_rchan_flow = 0x1600,
2806 /* No throughput levels */
2809 static const struct udevice_id udma_ids[] = {
2811 .compatible = "ti,am654-navss-main-udmap",
2812 .data = (ulong)&am654_main_data,
2815 .compatible = "ti,am654-navss-mcu-udmap",
2816 .data = (ulong)&am654_mcu_data,
2818 .compatible = "ti,j721e-navss-main-udmap",
2819 .data = (ulong)&j721e_main_data,
2821 .compatible = "ti,j721e-navss-mcu-udmap",
2822 .data = (ulong)&j721e_mcu_data,
2825 .compatible = "ti,am64-dmss-bcdma",
2826 .data = (ulong)&am64_bcdma_data,
2829 .compatible = "ti,am64-dmss-pktdma",
2830 .data = (ulong)&am64_pktdma_data,
2835 U_BOOT_DRIVER(ti_edma3) = {
2838 .of_match = udma_ids,
2840 .probe = udma_probe,
2841 .priv_auto = sizeof(struct udma_dev),