8a62d63dfef259285082ae7c120ff65658f94937
[pandora-u-boot.git] / drivers / dma / ti / k3-udma.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6 #define pr_fmt(fmt) "udma: " fmt
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <log.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <asm/bitops.h>
14 #include <malloc.h>
15 #include <linux/bitops.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/sizes.h>
18 #include <dm.h>
19 #include <dm/device_compat.h>
20 #include <dm/devres.h>
21 #include <dm/read.h>
22 #include <dm/of_access.h>
23 #include <dma.h>
24 #include <dma-uclass.h>
25 #include <linux/delay.h>
26 #include <linux/bitmap.h>
27 #include <linux/err.h>
28 #include <linux/printk.h>
29 #include <linux/soc/ti/k3-navss-ringacc.h>
30 #include <linux/soc/ti/cppi5.h>
31 #include <linux/soc/ti/ti-udma.h>
32 #include <linux/soc/ti/ti_sci_protocol.h>
33 #include <linux/soc/ti/cppi5.h>
34
35 #include "k3-udma-hwdef.h"
36 #include "k3-psil-priv.h"
37
38 #define K3_UDMA_MAX_RFLOWS 1024
39
40 struct udma_chan;
41
42 enum k3_dma_type {
43         DMA_TYPE_UDMA = 0,
44         DMA_TYPE_BCDMA,
45         DMA_TYPE_PKTDMA,
46 };
47
48 enum udma_mmr {
49         MMR_GCFG = 0,
50         MMR_BCHANRT,
51         MMR_RCHANRT,
52         MMR_TCHANRT,
53         MMR_RCHAN,
54         MMR_TCHAN,
55         MMR_RFLOW,
56         MMR_LAST,
57 };
58
59 static const char * const mmr_names[] = {
60         [MMR_GCFG] = "gcfg",
61         [MMR_BCHANRT] = "bchanrt",
62         [MMR_RCHANRT] = "rchanrt",
63         [MMR_TCHANRT] = "tchanrt",
64         [MMR_RCHAN] = "rchan",
65         [MMR_TCHAN] = "tchan",
66         [MMR_RFLOW] = "rflow",
67 };
68
69 struct udma_tchan {
70         void __iomem *reg_chan;
71         void __iomem *reg_rt;
72
73         int id;
74         struct k3_nav_ring *t_ring; /* Transmit ring */
75         struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
76         int tflow_id; /* applicable only for PKTDMA */
77
78 };
79
80 #define udma_bchan udma_tchan
81
82 struct udma_rflow {
83         void __iomem *reg_rflow;
84         int id;
85         struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
86         struct k3_nav_ring *r_ring; /* Receive ring */
87 };
88
89 struct udma_rchan {
90         void __iomem *reg_chan;
91         void __iomem *reg_rt;
92
93         int id;
94 };
95
96 struct udma_oes_offsets {
97         /* K3 UDMA Output Event Offset */
98         u32 udma_rchan;
99
100         /* BCDMA Output Event Offsets */
101         u32 bcdma_bchan_data;
102         u32 bcdma_bchan_ring;
103         u32 bcdma_tchan_data;
104         u32 bcdma_tchan_ring;
105         u32 bcdma_rchan_data;
106         u32 bcdma_rchan_ring;
107
108         /* PKTDMA Output Event Offsets */
109         u32 pktdma_tchan_flow;
110         u32 pktdma_rchan_flow;
111 };
112
113 #define UDMA_FLAG_PDMA_ACC32            BIT(0)
114 #define UDMA_FLAG_PDMA_BURST            BIT(1)
115 #define UDMA_FLAG_TDTYPE                BIT(2)
116
117 struct udma_match_data {
118         enum k3_dma_type type;
119         u32 psil_base;
120         bool enable_memcpy_support;
121         u32 flags;
122         u32 statictr_z_mask;
123         struct udma_oes_offsets oes;
124
125         u8 tpl_levels;
126         u32 level_start_idx[];
127 };
128
129 enum udma_rm_range {
130         RM_RANGE_BCHAN = 0,
131         RM_RANGE_TCHAN,
132         RM_RANGE_RCHAN,
133         RM_RANGE_RFLOW,
134         RM_RANGE_TFLOW,
135         RM_RANGE_LAST,
136 };
137
138 struct udma_tisci_rm {
139         const struct ti_sci_handle *tisci;
140         const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
141         u32  tisci_dev_id;
142
143         /* tisci information for PSI-L thread pairing/unpairing */
144         const struct ti_sci_rm_psil_ops *tisci_psil_ops;
145         u32  tisci_navss_dev_id;
146
147         struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
148 };
149
150 struct udma_dev {
151         struct udevice *dev;
152         void __iomem *mmrs[MMR_LAST];
153
154         struct udma_tisci_rm tisci_rm;
155         struct k3_nav_ringacc *ringacc;
156
157         u32 features;
158
159         int bchan_cnt;
160         int tchan_cnt;
161         int echan_cnt;
162         int rchan_cnt;
163         int rflow_cnt;
164         int tflow_cnt;
165         unsigned long *bchan_map;
166         unsigned long *tchan_map;
167         unsigned long *rchan_map;
168         unsigned long *rflow_map;
169         unsigned long *rflow_map_reserved;
170         unsigned long *tflow_map;
171
172         struct udma_bchan *bchans;
173         struct udma_tchan *tchans;
174         struct udma_rchan *rchans;
175         struct udma_rflow *rflows;
176
177         struct udma_match_data *match_data;
178
179         struct udma_chan *channels;
180         u32 psil_base;
181
182         u32 ch_count;
183 };
184
185 struct udma_chan_config {
186         u32 psd_size; /* size of Protocol Specific Data */
187         u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
188         u32 hdesc_size; /* Size of a packet descriptor in packet mode */
189         int remote_thread_id;
190         u32 atype;
191         u32 src_thread;
192         u32 dst_thread;
193         enum psil_endpoint_type ep_type;
194         enum udma_tp_level channel_tpl; /* Channel Throughput Level */
195
196         /* PKTDMA mapped channel */
197         int mapped_channel_id;
198         /* PKTDMA default tflow or rflow for mapped channel */
199         int default_flow_id;
200
201         enum dma_direction dir;
202
203         unsigned int pkt_mode:1; /* TR or packet */
204         unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
205         unsigned int enable_acc32:1;
206         unsigned int enable_burst:1;
207         unsigned int notdpkt:1; /* Suppress sending TDC packet */
208 };
209
210 struct udma_chan {
211         struct udma_dev *ud;
212         char name[20];
213
214         struct udma_bchan *bchan;
215         struct udma_tchan *tchan;
216         struct udma_rchan *rchan;
217         struct udma_rflow *rflow;
218
219         struct ti_udma_drv_chan_cfg_data cfg_data;
220
221         u32 bcnt; /* number of bytes completed since the start of the channel */
222
223         struct udma_chan_config config;
224
225         u32 id;
226
227         struct cppi5_host_desc_t *desc_tx;
228         bool in_use;
229         void    *desc_rx;
230         u32     num_rx_bufs;
231         u32     desc_rx_cur;
232
233 };
234
235 #define UDMA_CH_1000(ch)                (ch * 0x1000)
236 #define UDMA_CH_100(ch)                 (ch * 0x100)
237 #define UDMA_CH_40(ch)                  (ch * 0x40)
238
239 #ifdef PKTBUFSRX
240 #define UDMA_RX_DESC_NUM PKTBUFSRX
241 #else
242 #define UDMA_RX_DESC_NUM 4
243 #endif
244
245 /* Generic register access functions */
246 static inline u32 udma_read(void __iomem *base, int reg)
247 {
248         u32 v;
249
250         v = __raw_readl(base + reg);
251         pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
252         return v;
253 }
254
255 static inline void udma_write(void __iomem *base, int reg, u32 val)
256 {
257         pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
258         __raw_writel(val, base + reg);
259 }
260
261 static inline void udma_update_bits(void __iomem *base, int reg,
262                                     u32 mask, u32 val)
263 {
264         u32 tmp, orig;
265
266         orig = udma_read(base, reg);
267         tmp = orig & ~mask;
268         tmp |= (val & mask);
269
270         if (tmp != orig)
271                 udma_write(base, reg, tmp);
272 }
273
274 /* TCHANRT */
275 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
276 {
277         if (!tchan)
278                 return 0;
279         return udma_read(tchan->reg_rt, reg);
280 }
281
282 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
283                                       int reg, u32 val)
284 {
285         if (!tchan)
286                 return;
287         udma_write(tchan->reg_rt, reg, val);
288 }
289
290 /* RCHANRT */
291 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
292 {
293         if (!rchan)
294                 return 0;
295         return udma_read(rchan->reg_rt, reg);
296 }
297
298 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
299                                       int reg, u32 val)
300 {
301         if (!rchan)
302                 return;
303         udma_write(rchan->reg_rt, reg, val);
304 }
305
306 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
307                                        u32 dst_thread)
308 {
309         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
310
311         dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
312
313         return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
314                                               tisci_rm->tisci_navss_dev_id,
315                                               src_thread, dst_thread);
316 }
317
318 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
319                                          u32 dst_thread)
320 {
321         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
322
323         dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
324
325         return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
326                                                 tisci_rm->tisci_navss_dev_id,
327                                                 src_thread, dst_thread);
328 }
329
330 static inline char *udma_get_dir_text(enum dma_direction dir)
331 {
332         switch (dir) {
333         case DMA_DEV_TO_MEM:
334                 return "DEV_TO_MEM";
335         case DMA_MEM_TO_DEV:
336                 return "MEM_TO_DEV";
337         case DMA_MEM_TO_MEM:
338                 return "MEM_TO_MEM";
339         case DMA_DEV_TO_DEV:
340                 return "DEV_TO_DEV";
341         default:
342                 break;
343         }
344
345         return "invalid";
346 }
347
348 #include "k3-udma-u-boot.c"
349
350 static void udma_reset_uchan(struct udma_chan *uc)
351 {
352         memset(&uc->config, 0, sizeof(uc->config));
353         uc->config.remote_thread_id = -1;
354         uc->config.mapped_channel_id = -1;
355         uc->config.default_flow_id = -1;
356 }
357
358 static inline bool udma_is_chan_running(struct udma_chan *uc)
359 {
360         u32 trt_ctl = 0;
361         u32 rrt_ctl = 0;
362
363         switch (uc->config.dir) {
364         case DMA_DEV_TO_MEM:
365                 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
366                 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
367                          __func__, rrt_ctl,
368                          udma_rchanrt_read(uc->rchan,
369                                            UDMA_RCHAN_RT_PEER_RT_EN_REG));
370                 break;
371         case DMA_MEM_TO_DEV:
372                 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
373                 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
374                          __func__, trt_ctl,
375                          udma_tchanrt_read(uc->tchan,
376                                            UDMA_TCHAN_RT_PEER_RT_EN_REG));
377                 break;
378         case DMA_MEM_TO_MEM:
379                 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
380                 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
381                 break;
382         default:
383                 break;
384         }
385
386         if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
387                 return true;
388
389         return false;
390 }
391
392 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
393 {
394         struct k3_nav_ring *ring = NULL;
395         int ret = -ENOENT;
396
397         switch (uc->config.dir) {
398         case DMA_DEV_TO_MEM:
399                 ring = uc->rflow->r_ring;
400                 break;
401         case DMA_MEM_TO_DEV:
402                 ring = uc->tchan->tc_ring;
403                 break;
404         case DMA_MEM_TO_MEM:
405                 ring = uc->tchan->tc_ring;
406                 break;
407         default:
408                 break;
409         }
410
411         if (ring && k3_nav_ringacc_ring_get_occ(ring))
412                 ret = k3_nav_ringacc_ring_pop(ring, addr);
413
414         return ret;
415 }
416
417 static void udma_reset_rings(struct udma_chan *uc)
418 {
419         struct k3_nav_ring *ring1 = NULL;
420         struct k3_nav_ring *ring2 = NULL;
421
422         switch (uc->config.dir) {
423         case DMA_DEV_TO_MEM:
424                 ring1 = uc->rflow->fd_ring;
425                 ring2 = uc->rflow->r_ring;
426                 break;
427         case DMA_MEM_TO_DEV:
428                 ring1 = uc->tchan->t_ring;
429                 ring2 = uc->tchan->tc_ring;
430                 break;
431         case DMA_MEM_TO_MEM:
432                 ring1 = uc->tchan->t_ring;
433                 ring2 = uc->tchan->tc_ring;
434                 break;
435         default:
436                 break;
437         }
438
439         if (ring1)
440                 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
441         if (ring2)
442                 k3_nav_ringacc_ring_reset(ring2);
443 }
444
445 static void udma_reset_counters(struct udma_chan *uc)
446 {
447         u32 val;
448
449         if (uc->tchan) {
450                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
451                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
452
453                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
454                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
455
456                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
457                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
458
459                 if (!uc->bchan) {
460                         val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
461                         udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
462                 }
463         }
464
465         if (uc->rchan) {
466                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
467                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
468
469                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
470                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
471
472                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
473                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
474
475                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
476                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
477         }
478
479         uc->bcnt = 0;
480 }
481
482 static inline int udma_stop_hard(struct udma_chan *uc)
483 {
484         pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
485
486         switch (uc->config.dir) {
487         case DMA_DEV_TO_MEM:
488                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
489                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
490                 break;
491         case DMA_MEM_TO_DEV:
492                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
493                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
494                 break;
495         case DMA_MEM_TO_MEM:
496                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
497                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
498                 break;
499         default:
500                 return -EINVAL;
501         }
502
503         return 0;
504 }
505
506 static int udma_start(struct udma_chan *uc)
507 {
508         /* Channel is already running, no need to proceed further */
509         if (udma_is_chan_running(uc))
510                 goto out;
511
512         pr_debug("%s: chan:%d dir:%s\n",
513                  __func__, uc->id, udma_get_dir_text(uc->config.dir));
514
515         /* Make sure that we clear the teardown bit, if it is set */
516         udma_stop_hard(uc);
517
518         /* Reset all counters */
519         udma_reset_counters(uc);
520
521         switch (uc->config.dir) {
522         case DMA_DEV_TO_MEM:
523                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
524                                    UDMA_CHAN_RT_CTL_EN);
525
526                 /* Enable remote */
527                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
528                                    UDMA_PEER_RT_EN_ENABLE);
529
530                 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
531                          __func__,
532                          udma_rchanrt_read(uc->rchan,
533                                            UDMA_RCHAN_RT_CTL_REG),
534                          udma_rchanrt_read(uc->rchan,
535                                            UDMA_RCHAN_RT_PEER_RT_EN_REG));
536                 break;
537         case DMA_MEM_TO_DEV:
538                 /* Enable remote */
539                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
540                                    UDMA_PEER_RT_EN_ENABLE);
541
542                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
543                                    UDMA_CHAN_RT_CTL_EN);
544
545                 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
546                          __func__,
547                          udma_tchanrt_read(uc->tchan,
548                                            UDMA_TCHAN_RT_CTL_REG),
549                          udma_tchanrt_read(uc->tchan,
550                                            UDMA_TCHAN_RT_PEER_RT_EN_REG));
551                 break;
552         case DMA_MEM_TO_MEM:
553                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
554                                    UDMA_CHAN_RT_CTL_EN);
555                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
556                                    UDMA_CHAN_RT_CTL_EN);
557
558                 break;
559         default:
560                 return -EINVAL;
561         }
562
563         pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
564 out:
565         return 0;
566 }
567
568 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
569 {
570         int i = 0;
571         u32 val;
572
573         udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
574                            UDMA_CHAN_RT_CTL_EN |
575                            UDMA_CHAN_RT_CTL_TDOWN);
576
577         val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
578
579         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
580                 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
581                 udelay(1);
582                 if (i > 1000) {
583                         printf(" %s TIMEOUT !\n", __func__);
584                         break;
585                 }
586                 i++;
587         }
588
589         val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
590         if (val & UDMA_PEER_RT_EN_ENABLE)
591                 printf("%s: peer not stopped TIMEOUT !\n", __func__);
592 }
593
594 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
595 {
596         int i = 0;
597         u32 val;
598
599         udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
600                            UDMA_PEER_RT_EN_ENABLE |
601                            UDMA_PEER_RT_EN_TEARDOWN);
602
603         val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
604
605         while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
606                 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
607                 udelay(1);
608                 if (i > 1000) {
609                         printf("%s TIMEOUT !\n", __func__);
610                         break;
611                 }
612                 i++;
613         }
614
615         val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
616         if (val & UDMA_PEER_RT_EN_ENABLE)
617                 printf("%s: peer not stopped TIMEOUT !\n", __func__);
618 }
619
620 static inline int udma_stop(struct udma_chan *uc)
621 {
622         pr_debug("%s: chan:%d dir:%s\n",
623                  __func__, uc->id, udma_get_dir_text(uc->config.dir));
624
625         udma_reset_counters(uc);
626         switch (uc->config.dir) {
627         case DMA_DEV_TO_MEM:
628                 udma_stop_dev2mem(uc, true);
629                 break;
630         case DMA_MEM_TO_DEV:
631                 udma_stop_mem2dev(uc, true);
632                 break;
633         case DMA_MEM_TO_MEM:
634                 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
635                 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
636                 break;
637         default:
638                 return -EINVAL;
639         }
640
641         return 0;
642 }
643
644 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
645 {
646         int i = 1;
647
648         while (udma_pop_from_ring(uc, paddr)) {
649                 udelay(1);
650                 if (!(i % 1000000))
651                         printf(".");
652                 i++;
653         }
654 }
655
656 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
657 {
658         DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
659
660         if (id >= 0) {
661                 if (test_bit(id, ud->rflow_map)) {
662                         dev_err(ud->dev, "rflow%d is in use\n", id);
663                         return ERR_PTR(-ENOENT);
664                 }
665         } else {
666                 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
667                           ud->rflow_cnt);
668
669                 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
670                 if (id >= ud->rflow_cnt)
671                         return ERR_PTR(-ENOENT);
672         }
673
674         __set_bit(id, ud->rflow_map);
675         return &ud->rflows[id];
676 }
677
678 #define UDMA_RESERVE_RESOURCE(res)                                      \
679 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,     \
680                                                int id)                  \
681 {                                                                       \
682         if (id >= 0) {                                                  \
683                 if (test_bit(id, ud->res##_map)) {                      \
684                         dev_err(ud->dev, "res##%d is in use\n", id);    \
685                         return ERR_PTR(-ENOENT);                        \
686                 }                                                       \
687         } else {                                                        \
688                 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
689                 if (id == ud->res##_cnt) {                              \
690                         return ERR_PTR(-ENOENT);                        \
691                 }                                                       \
692         }                                                               \
693                                                                         \
694         __set_bit(id, ud->res##_map);                                   \
695         return &ud->res##s[id];                                         \
696 }
697
698 UDMA_RESERVE_RESOURCE(tchan);
699 UDMA_RESERVE_RESOURCE(rchan);
700
701 static int udma_get_tchan(struct udma_chan *uc)
702 {
703         struct udma_dev *ud = uc->ud;
704
705         if (uc->tchan) {
706                 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
707                         uc->id, uc->tchan->id);
708                 return 0;
709         }
710
711         uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
712         if (IS_ERR(uc->tchan))
713                 return PTR_ERR(uc->tchan);
714
715         if (ud->tflow_cnt) {
716                 int tflow_id;
717
718                 /* Only PKTDMA have support for tx flows */
719                 if (uc->config.default_flow_id >= 0)
720                         tflow_id = uc->config.default_flow_id;
721                 else
722                         tflow_id = uc->tchan->id;
723
724                 if (test_bit(tflow_id, ud->tflow_map)) {
725                         dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
726                         __clear_bit(uc->tchan->id, ud->tchan_map);
727                         uc->tchan = NULL;
728                         return -ENOENT;
729                 }
730
731                 uc->tchan->tflow_id = tflow_id;
732                 __set_bit(tflow_id, ud->tflow_map);
733         } else {
734                 uc->tchan->tflow_id = -1;
735         }
736
737         pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
738
739         return 0;
740 }
741
742 static int udma_get_rchan(struct udma_chan *uc)
743 {
744         struct udma_dev *ud = uc->ud;
745
746         if (uc->rchan) {
747                 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
748                         uc->id, uc->rchan->id);
749                 return 0;
750         }
751
752         uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
753         if (IS_ERR(uc->rchan))
754                 return PTR_ERR(uc->rchan);
755
756         pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
757
758         return 0;
759 }
760
761 static int udma_get_chan_pair(struct udma_chan *uc)
762 {
763         struct udma_dev *ud = uc->ud;
764         int chan_id, end;
765
766         if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
767                 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
768                          uc->id, uc->tchan->id);
769                 return 0;
770         }
771
772         if (uc->tchan) {
773                 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
774                         uc->id, uc->tchan->id);
775                 return -EBUSY;
776         } else if (uc->rchan) {
777                 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
778                         uc->id, uc->rchan->id);
779                 return -EBUSY;
780         }
781
782         /* Can be optimized, but let's have it like this for now */
783         end = min(ud->tchan_cnt, ud->rchan_cnt);
784         for (chan_id = 0; chan_id < end; chan_id++) {
785                 if (!test_bit(chan_id, ud->tchan_map) &&
786                     !test_bit(chan_id, ud->rchan_map))
787                         break;
788         }
789
790         if (chan_id == end)
791                 return -ENOENT;
792
793         __set_bit(chan_id, ud->tchan_map);
794         __set_bit(chan_id, ud->rchan_map);
795         uc->tchan = &ud->tchans[chan_id];
796         uc->rchan = &ud->rchans[chan_id];
797
798         pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
799
800         return 0;
801 }
802
803 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
804 {
805         struct udma_dev *ud = uc->ud;
806
807         if (uc->rflow) {
808                 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
809                         uc->id, uc->rflow->id);
810                 return 0;
811         }
812
813         if (!uc->rchan)
814                 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
815
816         uc->rflow = __udma_reserve_rflow(ud, flow_id);
817         if (IS_ERR(uc->rflow))
818                 return PTR_ERR(uc->rflow);
819
820         pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
821         return 0;
822 }
823
824 static void udma_put_rchan(struct udma_chan *uc)
825 {
826         struct udma_dev *ud = uc->ud;
827
828         if (uc->rchan) {
829                 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
830                         uc->rchan->id);
831                 __clear_bit(uc->rchan->id, ud->rchan_map);
832                 uc->rchan = NULL;
833         }
834 }
835
836 static void udma_put_tchan(struct udma_chan *uc)
837 {
838         struct udma_dev *ud = uc->ud;
839
840         if (uc->tchan) {
841                 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
842                         uc->tchan->id);
843                 __clear_bit(uc->tchan->id, ud->tchan_map);
844                 if (uc->tchan->tflow_id >= 0)
845                         __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
846                 uc->tchan = NULL;
847         }
848 }
849
850 static void udma_put_rflow(struct udma_chan *uc)
851 {
852         struct udma_dev *ud = uc->ud;
853
854         if (uc->rflow) {
855                 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
856                         uc->rflow->id);
857                 __clear_bit(uc->rflow->id, ud->rflow_map);
858                 uc->rflow = NULL;
859         }
860 }
861
862 static void udma_free_tx_resources(struct udma_chan *uc)
863 {
864         if (!uc->tchan)
865                 return;
866
867         k3_nav_ringacc_ring_free(uc->tchan->t_ring);
868         k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
869         uc->tchan->t_ring = NULL;
870         uc->tchan->tc_ring = NULL;
871
872         udma_put_tchan(uc);
873 }
874
875 static int udma_alloc_tx_resources(struct udma_chan *uc)
876 {
877         struct k3_nav_ring_cfg ring_cfg;
878         struct udma_dev *ud = uc->ud;
879         int ret;
880
881         ret = udma_get_tchan(uc);
882         if (ret)
883                 return ret;
884
885         ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
886                                                 &uc->tchan->t_ring,
887                                                 &uc->tchan->tc_ring);
888         if (ret) {
889                 ret = -EBUSY;
890                 goto err_tx_ring;
891         }
892
893         memset(&ring_cfg, 0, sizeof(ring_cfg));
894         ring_cfg.size = 16;
895         ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
896         ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
897
898         ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
899         ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
900
901         if (ret)
902                 goto err_ringcfg;
903
904         return 0;
905
906 err_ringcfg:
907         k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
908         uc->tchan->tc_ring = NULL;
909         k3_nav_ringacc_ring_free(uc->tchan->t_ring);
910         uc->tchan->t_ring = NULL;
911 err_tx_ring:
912         udma_put_tchan(uc);
913
914         return ret;
915 }
916
917 static void udma_free_rx_resources(struct udma_chan *uc)
918 {
919         if (!uc->rchan)
920                 return;
921
922         if (uc->rflow) {
923                 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
924                 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
925                 uc->rflow->fd_ring = NULL;
926                 uc->rflow->r_ring = NULL;
927
928                 udma_put_rflow(uc);
929         }
930
931         udma_put_rchan(uc);
932 }
933
934 static int udma_alloc_rx_resources(struct udma_chan *uc)
935 {
936         struct k3_nav_ring_cfg ring_cfg;
937         struct udma_dev *ud = uc->ud;
938         struct udma_rflow *rflow;
939         int fd_ring_id;
940         int ret;
941
942         ret = udma_get_rchan(uc);
943         if (ret)
944                 return ret;
945
946         /* For MEM_TO_MEM we don't need rflow or rings */
947         if (uc->config.dir == DMA_MEM_TO_MEM)
948                 return 0;
949
950         if (uc->config.default_flow_id >= 0)
951                 ret = udma_get_rflow(uc, uc->config.default_flow_id);
952         else
953                 ret = udma_get_rflow(uc, uc->rchan->id);
954
955         if (ret) {
956                 ret = -EBUSY;
957                 goto err_rflow;
958         }
959
960         rflow = uc->rflow;
961         if (ud->tflow_cnt) {
962                 fd_ring_id = ud->tflow_cnt + rflow->id;
963         } else {
964                 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
965                         uc->rchan->id;
966         }
967
968         ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
969                                                 &rflow->fd_ring, &rflow->r_ring);
970         if (ret) {
971                 ret = -EBUSY;
972                 goto err_rx_ring;
973         }
974
975         memset(&ring_cfg, 0, sizeof(ring_cfg));
976         ring_cfg.size = 16;
977         ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
978         ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
979
980         ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
981         ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
982         if (ret)
983                 goto err_ringcfg;
984
985         return 0;
986
987 err_ringcfg:
988         k3_nav_ringacc_ring_free(rflow->r_ring);
989         rflow->r_ring = NULL;
990         k3_nav_ringacc_ring_free(rflow->fd_ring);
991         rflow->fd_ring = NULL;
992 err_rx_ring:
993         udma_put_rflow(uc);
994 err_rflow:
995         udma_put_rchan(uc);
996
997         return ret;
998 }
999
1000 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1001 {
1002         struct udma_dev *ud = uc->ud;
1003         int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1004         struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
1005         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1006         u32 mode;
1007         int ret;
1008
1009         if (uc->config.pkt_mode)
1010                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1011         else
1012                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1013
1014         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1015                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1016                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
1017         req.nav_id = tisci_rm->tisci_dev_id;
1018         req.index = uc->tchan->id;
1019         req.tx_chan_type = mode;
1020         if (uc->config.dir == DMA_MEM_TO_MEM)
1021                 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1022         else
1023                 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1024                                                           uc->config.psd_size,
1025                                                           0) >> 2;
1026         req.txcq_qnum = tc_ring;
1027
1028         ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
1029         if (ret) {
1030                 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
1031                 return ret;
1032         }
1033
1034         /*
1035          * Above TI SCI call handles firewall configuration, cfg
1036          * register configuration still has to be done locally in
1037          * absence of RM services.
1038          */
1039         if (IS_ENABLED(CONFIG_K3_DM_FW))
1040                 udma_alloc_tchan_raw(uc);
1041
1042         return 0;
1043 }
1044
1045 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1046 {
1047         struct udma_dev *ud = uc->ud;
1048         int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1049         int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
1050         int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1051         struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1052         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1053         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1054         u32 mode;
1055         int ret;
1056
1057         if (uc->config.pkt_mode)
1058                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1059         else
1060                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1061
1062         req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1063                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
1064                         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
1065         req.nav_id = tisci_rm->tisci_dev_id;
1066         req.index = uc->rchan->id;
1067         req.rx_chan_type = mode;
1068         if (uc->config.dir == DMA_MEM_TO_MEM) {
1069                 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1070                 req.rxcq_qnum = tc_ring;
1071         } else {
1072                 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1073                                                           uc->config.psd_size,
1074                                                           0) >> 2;
1075                 req.rxcq_qnum = rx_ring;
1076         }
1077         if (ud->match_data->type == DMA_TYPE_UDMA &&
1078             uc->rflow->id != uc->rchan->id &&
1079             uc->config.dir != DMA_MEM_TO_MEM) {
1080                 req.flowid_start = uc->rflow->id;
1081                 req.flowid_cnt = 1;
1082                 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1083                                     TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
1084         }
1085
1086         ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
1087         if (ret) {
1088                 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1089                         uc->rchan->id, ret);
1090                 return ret;
1091         }
1092         if (uc->config.dir == DMA_MEM_TO_MEM)
1093                 return ret;
1094
1095         flow_req.valid_params =
1096                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1097                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1098                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1099                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1100                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1101                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1102                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1103                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1104                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1105                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1106                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1107                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1108                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1109                         TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1110
1111         flow_req.nav_id = tisci_rm->tisci_dev_id;
1112         flow_req.flow_index = uc->rflow->id;
1113
1114         if (uc->config.needs_epib)
1115                 flow_req.rx_einfo_present = 1;
1116         else
1117                 flow_req.rx_einfo_present = 0;
1118
1119         if (uc->config.psd_size)
1120                 flow_req.rx_psinfo_present = 1;
1121         else
1122                 flow_req.rx_psinfo_present = 0;
1123
1124         flow_req.rx_error_handling = 0;
1125         flow_req.rx_desc_type = 0;
1126         flow_req.rx_dest_qnum = rx_ring;
1127         flow_req.rx_src_tag_hi_sel = 2;
1128         flow_req.rx_src_tag_lo_sel = 4;
1129         flow_req.rx_dest_tag_hi_sel = 5;
1130         flow_req.rx_dest_tag_lo_sel = 4;
1131         flow_req.rx_fdq0_sz0_qnum = fd_ring;
1132         flow_req.rx_fdq1_qnum = fd_ring;
1133         flow_req.rx_fdq2_qnum = fd_ring;
1134         flow_req.rx_fdq3_qnum = fd_ring;
1135         flow_req.rx_ps_location = 0;
1136
1137         ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1138                                                      &flow_req);
1139         if (ret) {
1140                 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1141                         uc->rchan->id, uc->rflow->id, ret);
1142                 return ret;
1143         }
1144
1145         /*
1146          * Above TI SCI call handles firewall configuration, cfg
1147          * register configuration still has to be done locally in
1148          * absence of RM services.
1149          */
1150         if (IS_ENABLED(CONFIG_K3_DM_FW))
1151                 udma_alloc_rchan_raw(uc);
1152
1153         return 0;
1154 }
1155
1156 static int udma_alloc_chan_resources(struct udma_chan *uc)
1157 {
1158         struct udma_dev *ud = uc->ud;
1159         int ret;
1160
1161         pr_debug("%s: chan:%d as %s\n",
1162                  __func__, uc->id, udma_get_dir_text(uc->config.dir));
1163
1164         switch (uc->config.dir) {
1165         case DMA_MEM_TO_MEM:
1166                 /* Non synchronized - mem to mem type of transfer */
1167                 uc->config.pkt_mode = false;
1168                 ret = udma_get_chan_pair(uc);
1169                 if (ret)
1170                         return ret;
1171
1172                 ret = udma_alloc_tx_resources(uc);
1173                 if (ret)
1174                         goto err_free_res;
1175
1176                 ret = udma_alloc_rx_resources(uc);
1177                 if (ret)
1178                         goto err_free_res;
1179
1180                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1181                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1182                 break;
1183         case DMA_MEM_TO_DEV:
1184                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1185                 ret = udma_alloc_tx_resources(uc);
1186                 if (ret)
1187                         goto err_free_res;
1188
1189                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1190                 uc->config.dst_thread = uc->config.remote_thread_id;
1191                 uc->config.dst_thread |= 0x8000;
1192
1193                 break;
1194         case DMA_DEV_TO_MEM:
1195                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1196                 ret = udma_alloc_rx_resources(uc);
1197                 if (ret)
1198                         goto err_free_res;
1199
1200                 uc->config.src_thread = uc->config.remote_thread_id;
1201                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1202
1203                 break;
1204         default:
1205                 /* Can not happen */
1206                 pr_debug("%s: chan:%d invalid direction (%u)\n",
1207                          __func__, uc->id, uc->config.dir);
1208                 return -EINVAL;
1209         }
1210
1211         /* We have channel indexes and rings */
1212         if (uc->config.dir == DMA_MEM_TO_MEM) {
1213                 ret = udma_alloc_tchan_sci_req(uc);
1214                 if (ret)
1215                         goto err_free_res;
1216
1217                 ret = udma_alloc_rchan_sci_req(uc);
1218                 if (ret)
1219                         goto err_free_res;
1220         } else {
1221                 /* Slave transfer */
1222                 if (uc->config.dir == DMA_MEM_TO_DEV) {
1223                         ret = udma_alloc_tchan_sci_req(uc);
1224                         if (ret)
1225                                 goto err_free_res;
1226                 } else {
1227                         ret = udma_alloc_rchan_sci_req(uc);
1228                         if (ret)
1229                                 goto err_free_res;
1230                 }
1231         }
1232
1233         if (udma_is_chan_running(uc)) {
1234                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1235                 udma_stop(uc);
1236                 if (udma_is_chan_running(uc)) {
1237                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1238                         goto err_free_res;
1239                 }
1240         }
1241
1242         /* PSI-L pairing */
1243         ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1244         if (ret) {
1245                 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1246                 goto err_free_res;
1247         }
1248
1249         return 0;
1250
1251 err_free_res:
1252         udma_free_tx_resources(uc);
1253         udma_free_rx_resources(uc);
1254         uc->config.remote_thread_id = -1;
1255         return ret;
1256 }
1257
1258 static void udma_free_chan_resources(struct udma_chan *uc)
1259 {
1260         /* Hard reset UDMA channel */
1261         udma_stop_hard(uc);
1262         udma_reset_counters(uc);
1263
1264         /* Release PSI-L pairing */
1265         udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1266
1267         /* Reset the rings for a new start */
1268         udma_reset_rings(uc);
1269         udma_free_tx_resources(uc);
1270         udma_free_rx_resources(uc);
1271
1272         uc->config.remote_thread_id = -1;
1273         uc->config.dir = DMA_MEM_TO_MEM;
1274 }
1275
1276 static const char * const range_names[] = {
1277         [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1278         [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1279         [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1280         [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1281         [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1282 };
1283
1284 static int udma_get_mmrs(struct udevice *dev)
1285 {
1286         struct udma_dev *ud = dev_get_priv(dev);
1287         u32 cap2, cap3, cap4;
1288         int i;
1289
1290         ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]);
1291         if (!ud->mmrs[MMR_GCFG])
1292                 return -EINVAL;
1293
1294         cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1295         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1296
1297         switch (ud->match_data->type) {
1298         case DMA_TYPE_UDMA:
1299                 ud->rflow_cnt = cap3 & 0x3fff;
1300                 ud->tchan_cnt = cap2 & 0x1ff;
1301                 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1302                 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1303                 break;
1304         case DMA_TYPE_BCDMA:
1305                 ud->bchan_cnt = cap2 & 0x1ff;
1306                 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1307                 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1308                 break;
1309         case DMA_TYPE_PKTDMA:
1310                 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1311                 ud->tchan_cnt = cap2 & 0x1ff;
1312                 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1313                 ud->rflow_cnt = cap3 & 0x3fff;
1314                 ud->tflow_cnt = cap4 & 0x3fff;
1315                 break;
1316         default:
1317                 return -EINVAL;
1318         }
1319
1320         for (i = 1; i < MMR_LAST; i++) {
1321                 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1322                         continue;
1323                 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1324                         continue;
1325                 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1326                         continue;
1327
1328                 ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]);
1329                 if (!ud->mmrs[i])
1330                         return -EINVAL;
1331         }
1332
1333         return 0;
1334 }
1335
1336 static int udma_setup_resources(struct udma_dev *ud)
1337 {
1338         struct udevice *dev = ud->dev;
1339         int i;
1340         struct ti_sci_resource_desc *rm_desc;
1341         struct ti_sci_resource *rm_res;
1342         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1343
1344         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1345                                            sizeof(unsigned long), GFP_KERNEL);
1346         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1347                                   GFP_KERNEL);
1348         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1349                                            sizeof(unsigned long), GFP_KERNEL);
1350         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1351                                   GFP_KERNEL);
1352         ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1353                                            sizeof(unsigned long), GFP_KERNEL);
1354         ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1355                                               sizeof(unsigned long),
1356                                               GFP_KERNEL);
1357         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1358                                   GFP_KERNEL);
1359
1360         if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1361             !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1362             !ud->rflows)
1363                 return -ENOMEM;
1364
1365         /*
1366          * RX flows with the same Ids as RX channels are reserved to be used
1367          * as default flows if remote HW can't generate flow_ids. Those
1368          * RX flows can be requested only explicitly by id.
1369          */
1370         bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1371
1372         /* Get resource ranges from tisci */
1373         for (i = 0; i < RM_RANGE_LAST; i++) {
1374                 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1375                         continue;
1376
1377                 tisci_rm->rm_ranges[i] =
1378                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1379                                                     tisci_rm->tisci_dev_id,
1380                                                     (char *)range_names[i]);
1381         }
1382
1383         /* tchan ranges */
1384         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1385         if (IS_ERR(rm_res)) {
1386                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1387         } else {
1388                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1389                 for (i = 0; i < rm_res->sets; i++) {
1390                         rm_desc = &rm_res->desc[i];
1391                         bitmap_clear(ud->tchan_map, rm_desc->start,
1392                                      rm_desc->num);
1393                 }
1394         }
1395
1396         /* rchan and matching default flow ranges */
1397         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1398         if (IS_ERR(rm_res)) {
1399                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1400                 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1401         } else {
1402                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1403                 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1404                 for (i = 0; i < rm_res->sets; i++) {
1405                         rm_desc = &rm_res->desc[i];
1406                         bitmap_clear(ud->rchan_map, rm_desc->start,
1407                                      rm_desc->num);
1408                         bitmap_clear(ud->rflow_map, rm_desc->start,
1409                                      rm_desc->num);
1410                 }
1411         }
1412
1413         /* GP rflow ranges */
1414         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1415         if (IS_ERR(rm_res)) {
1416                 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1417                              ud->rflow_cnt - ud->rchan_cnt);
1418         } else {
1419                 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1420                            ud->rflow_cnt - ud->rchan_cnt);
1421                 for (i = 0; i < rm_res->sets; i++) {
1422                         rm_desc = &rm_res->desc[i];
1423                         bitmap_clear(ud->rflow_map, rm_desc->start,
1424                                      rm_desc->num);
1425                 }
1426         }
1427
1428         return 0;
1429 }
1430
1431 static int bcdma_setup_resources(struct udma_dev *ud)
1432 {
1433         int i;
1434         struct udevice *dev = ud->dev;
1435         struct ti_sci_resource_desc *rm_desc;
1436         struct ti_sci_resource *rm_res;
1437         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1438
1439         ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1440                                            sizeof(unsigned long), GFP_KERNEL);
1441         ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1442                                   GFP_KERNEL);
1443         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1444                                            sizeof(unsigned long), GFP_KERNEL);
1445         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1446                                   GFP_KERNEL);
1447         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1448                                            sizeof(unsigned long), GFP_KERNEL);
1449         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1450                                   GFP_KERNEL);
1451         ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1452                                   GFP_KERNEL);
1453
1454         if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
1455             !ud->bchans || !ud->tchans || !ud->rchans ||
1456             !ud->rflows)
1457                 return -ENOMEM;
1458
1459         /* Get resource ranges from tisci */
1460         for (i = 0; i < RM_RANGE_LAST; i++) {
1461                 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1462                         continue;
1463
1464                 tisci_rm->rm_ranges[i] =
1465                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1466                                                     tisci_rm->tisci_dev_id,
1467                                                     (char *)range_names[i]);
1468         }
1469
1470         /* bchan ranges */
1471         rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1472         if (IS_ERR(rm_res)) {
1473                 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1474         } else {
1475                 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1476                 for (i = 0; i < rm_res->sets; i++) {
1477                         rm_desc = &rm_res->desc[i];
1478                         bitmap_clear(ud->bchan_map, rm_desc->start,
1479                                      rm_desc->num);
1480                         dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1481                                 rm_desc->start, rm_desc->num);
1482                 }
1483         }
1484
1485         /* tchan ranges */
1486         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1487         if (IS_ERR(rm_res)) {
1488                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1489         } else {
1490                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1491                 for (i = 0; i < rm_res->sets; i++) {
1492                         rm_desc = &rm_res->desc[i];
1493                         bitmap_clear(ud->tchan_map, rm_desc->start,
1494                                      rm_desc->num);
1495                         dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1496                                 rm_desc->start, rm_desc->num);
1497                 }
1498         }
1499
1500         /* rchan ranges */
1501         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1502         if (IS_ERR(rm_res)) {
1503                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1504         } else {
1505                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1506                 for (i = 0; i < rm_res->sets; i++) {
1507                         rm_desc = &rm_res->desc[i];
1508                         bitmap_clear(ud->rchan_map, rm_desc->start,
1509                                      rm_desc->num);
1510                         dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1511                                 rm_desc->start, rm_desc->num);
1512                 }
1513         }
1514
1515         return 0;
1516 }
1517
1518 static int pktdma_setup_resources(struct udma_dev *ud)
1519 {
1520         int i;
1521         struct udevice *dev = ud->dev;
1522         struct ti_sci_resource *rm_res;
1523         struct ti_sci_resource_desc *rm_desc;
1524         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1525
1526         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1527                                            sizeof(unsigned long), GFP_KERNEL);
1528         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1529                                   GFP_KERNEL);
1530         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1531                                            sizeof(unsigned long), GFP_KERNEL);
1532         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1533                                   GFP_KERNEL);
1534         ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1535                                      sizeof(unsigned long),
1536                                      GFP_KERNEL);
1537         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1538                                   GFP_KERNEL);
1539         ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1540                                            sizeof(unsigned long), GFP_KERNEL);
1541
1542         if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
1543             !ud->rchans || !ud->rflows || !ud->rflow_map)
1544                 return -ENOMEM;
1545
1546         /* Get resource ranges from tisci */
1547         for (i = 0; i < RM_RANGE_LAST; i++) {
1548                 if (i == RM_RANGE_BCHAN)
1549                         continue;
1550
1551                 tisci_rm->rm_ranges[i] =
1552                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1553                                                     tisci_rm->tisci_dev_id,
1554                                                     (char *)range_names[i]);
1555         }
1556
1557         /* tchan ranges */
1558         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1559         if (IS_ERR(rm_res)) {
1560                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1561         } else {
1562                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1563                 for (i = 0; i < rm_res->sets; i++) {
1564                         rm_desc = &rm_res->desc[i];
1565                         bitmap_clear(ud->tchan_map, rm_desc->start,
1566                                      rm_desc->num);
1567                         dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1568                                 rm_desc->start, rm_desc->num);
1569                 }
1570         }
1571
1572         /* rchan ranges */
1573         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1574         if (IS_ERR(rm_res)) {
1575                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1576         } else {
1577                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1578                 for (i = 0; i < rm_res->sets; i++) {
1579                         rm_desc = &rm_res->desc[i];
1580                         bitmap_clear(ud->rchan_map, rm_desc->start,
1581                                      rm_desc->num);
1582                         dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1583                                 rm_desc->start, rm_desc->num);
1584                 }
1585         }
1586
1587         /* rflow ranges */
1588         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1589         if (IS_ERR(rm_res)) {
1590                 /* all rflows are assigned exclusively to Linux */
1591                 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
1592         } else {
1593                 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
1594                 for (i = 0; i < rm_res->sets; i++) {
1595                         rm_desc = &rm_res->desc[i];
1596                         bitmap_clear(ud->rflow_map, rm_desc->start,
1597                                      rm_desc->num);
1598                         dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1599                                 rm_desc->start, rm_desc->num);
1600                 }
1601         }
1602
1603         /* tflow ranges */
1604         rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1605         if (IS_ERR(rm_res)) {
1606                 /* all tflows are assigned exclusively to Linux */
1607                 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1608         } else {
1609                 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1610                 for (i = 0; i < rm_res->sets; i++) {
1611                         rm_desc = &rm_res->desc[i];
1612                         bitmap_clear(ud->tflow_map, rm_desc->start,
1613                                      rm_desc->num);
1614                         dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1615                                 rm_desc->start, rm_desc->num);
1616                 }
1617         }
1618
1619         return 0;
1620 }
1621
1622 static int setup_resources(struct udma_dev *ud)
1623 {
1624         struct udevice *dev = ud->dev;
1625         int ch_count, ret;
1626
1627         switch (ud->match_data->type) {
1628         case DMA_TYPE_UDMA:
1629                 ret = udma_setup_resources(ud);
1630                 break;
1631         case DMA_TYPE_BCDMA:
1632                 ret = bcdma_setup_resources(ud);
1633                 break;
1634         case DMA_TYPE_PKTDMA:
1635                 ret = pktdma_setup_resources(ud);
1636                 break;
1637         default:
1638                 return -EINVAL;
1639         }
1640
1641         if (ret)
1642                 return ret;
1643
1644         ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1645         if (ud->bchan_cnt)
1646                 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
1647         ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1648         ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1649         if (!ch_count)
1650                 return -ENODEV;
1651
1652         ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1653                                     GFP_KERNEL);
1654         if (!ud->channels)
1655                 return -ENOMEM;
1656
1657         switch (ud->match_data->type) {
1658         case DMA_TYPE_UDMA:
1659                 dev_dbg(dev,
1660                         "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1661                         ch_count,
1662                         ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1663                                                       ud->tchan_cnt),
1664                         ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1665                                                       ud->rchan_cnt),
1666                         ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1667                                                       ud->rflow_cnt));
1668                 break;
1669         case DMA_TYPE_BCDMA:
1670                 dev_dbg(dev,
1671                         "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1672                         ch_count,
1673                         ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1674                                                       ud->bchan_cnt),
1675                         ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1676                                                       ud->tchan_cnt),
1677                         ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1678                                                       ud->rchan_cnt));
1679                 break;
1680         case DMA_TYPE_PKTDMA:
1681                 dev_dbg(dev,
1682                         "Channels: %d (tchan: %u, rchan: %u)\n",
1683                         ch_count,
1684                         ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1685                                                       ud->tchan_cnt),
1686                         ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1687                                                       ud->rchan_cnt));
1688                 break;
1689         default:
1690                 break;
1691         }
1692
1693         return ch_count;
1694 }
1695
1696 static int udma_probe(struct udevice *dev)
1697 {
1698         struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1699         struct udma_dev *ud = dev_get_priv(dev);
1700         int i, ret;
1701         struct udevice *tmp;
1702         struct udevice *tisci_dev = NULL;
1703         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1704         ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1705
1706
1707         ud->match_data = (void *)dev_get_driver_data(dev);
1708         ret = udma_get_mmrs(dev);
1709         if (ret)
1710                 return ret;
1711
1712         ud->psil_base = ud->match_data->psil_base;
1713
1714         ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1715                                            "ti,sci", &tisci_dev);
1716         if (ret) {
1717                 debug("Failed to get TISCI phandle (%d)\n", ret);
1718                 tisci_rm->tisci = NULL;
1719                 return -EINVAL;
1720         }
1721         tisci_rm->tisci = (struct ti_sci_handle *)
1722                           (ti_sci_get_handle_from_sysfw(tisci_dev));
1723
1724         tisci_rm->tisci_dev_id = -1;
1725         ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1726         if (ret) {
1727                 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1728                 return ret;
1729         }
1730
1731         tisci_rm->tisci_navss_dev_id = -1;
1732         ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1733                               &tisci_rm->tisci_navss_dev_id);
1734         if (ret) {
1735                 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1736                 return ret;
1737         }
1738
1739         tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1740         tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1741
1742         if (ud->match_data->type == DMA_TYPE_UDMA) {
1743                 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1744                                                    "ti,ringacc", &tmp);
1745                 ud->ringacc = dev_get_priv(tmp);
1746         } else {
1747                 struct k3_ringacc_init_data ring_init_data;
1748
1749                 ring_init_data.tisci = ud->tisci_rm.tisci;
1750                 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1751                 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1752                         ring_init_data.num_rings = ud->bchan_cnt +
1753                                                    ud->tchan_cnt +
1754                                                    ud->rchan_cnt;
1755                 } else {
1756                         ring_init_data.num_rings = ud->rflow_cnt +
1757                                                    ud->tflow_cnt;
1758                 }
1759
1760                 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1761         }
1762         if (IS_ERR(ud->ringacc))
1763                 return PTR_ERR(ud->ringacc);
1764
1765         ud->dev = dev;
1766         ud->ch_count = setup_resources(ud);
1767         if (ud->ch_count <= 0)
1768                 return ud->ch_count;
1769
1770         for (i = 0; i < ud->bchan_cnt; i++) {
1771                 struct udma_bchan *bchan = &ud->bchans[i];
1772
1773                 bchan->id = i;
1774                 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1775         }
1776
1777         for (i = 0; i < ud->tchan_cnt; i++) {
1778                 struct udma_tchan *tchan = &ud->tchans[i];
1779
1780                 tchan->id = i;
1781                 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
1782                 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1783         }
1784
1785         for (i = 0; i < ud->rchan_cnt; i++) {
1786                 struct udma_rchan *rchan = &ud->rchans[i];
1787
1788                 rchan->id = i;
1789                 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
1790                 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1791         }
1792
1793         for (i = 0; i < ud->rflow_cnt; i++) {
1794                 struct udma_rflow *rflow = &ud->rflows[i];
1795
1796                 rflow->id = i;
1797                 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
1798         }
1799
1800         for (i = 0; i < ud->ch_count; i++) {
1801                 struct udma_chan *uc = &ud->channels[i];
1802
1803                 uc->ud = ud;
1804                 uc->id = i;
1805                 uc->config.remote_thread_id = -1;
1806                 uc->bchan = NULL;
1807                 uc->tchan = NULL;
1808                 uc->rchan = NULL;
1809                 uc->config.mapped_channel_id = -1;
1810                 uc->config.default_flow_id = -1;
1811                 uc->config.dir = DMA_MEM_TO_MEM;
1812                 sprintf(uc->name, "UDMA chan%d\n", i);
1813                 if (!i)
1814                         uc->in_use = true;
1815         }
1816
1817         pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1818                  dev->name,
1819                  udma_read(ud->mmrs[MMR_GCFG], 0),
1820                  udma_read(ud->mmrs[MMR_GCFG], 0x20),
1821                  udma_read(ud->mmrs[MMR_GCFG], 0x24),
1822                  udma_read(ud->mmrs[MMR_GCFG], 0x28),
1823                  udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1824
1825         uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1826
1827         return ret;
1828 }
1829
1830 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1831 {
1832         u64 addr = 0;
1833
1834         memcpy(&addr, &elem, sizeof(elem));
1835         return k3_nav_ringacc_ring_push(ring, &addr);
1836 }
1837
1838 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1839                                  dma_addr_t src, size_t len)
1840 {
1841         u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1842         struct cppi5_tr_type15_t *tr_req;
1843         int num_tr;
1844         size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1845         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1846         unsigned long dummy;
1847         void *tr_desc;
1848         size_t desc_size;
1849
1850         if (len < SZ_64K) {
1851                 num_tr = 1;
1852                 tr0_cnt0 = len;
1853                 tr0_cnt1 = 1;
1854         } else {
1855                 unsigned long align_to = __ffs(src | dest);
1856
1857                 if (align_to > 3)
1858                         align_to = 3;
1859                 /*
1860                  * Keep simple: tr0: SZ_64K-alignment blocks,
1861                  *              tr1: the remaining
1862                  */
1863                 num_tr = 2;
1864                 tr0_cnt0 = (SZ_64K - BIT(align_to));
1865                 if (len / tr0_cnt0 >= SZ_64K) {
1866                         dev_err(uc->ud->dev, "size %zu is not supported\n",
1867                                 len);
1868                         return NULL;
1869                 }
1870
1871                 tr0_cnt1 = len / tr0_cnt0;
1872                 tr1_cnt0 = len % tr0_cnt0;
1873         }
1874
1875         desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1876         tr_desc = dma_alloc_coherent(desc_size, &dummy);
1877         if (!tr_desc)
1878                 return NULL;
1879         memset(tr_desc, 0, desc_size);
1880
1881         cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1882         cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1883         cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1884
1885         tr_req = tr_desc + tr_size;
1886
1887         cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1888                       CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1889         cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1890
1891         tr_req[0].addr = src;
1892         tr_req[0].icnt0 = tr0_cnt0;
1893         tr_req[0].icnt1 = tr0_cnt1;
1894         tr_req[0].icnt2 = 1;
1895         tr_req[0].icnt3 = 1;
1896         tr_req[0].dim1 = tr0_cnt0;
1897
1898         tr_req[0].daddr = dest;
1899         tr_req[0].dicnt0 = tr0_cnt0;
1900         tr_req[0].dicnt1 = tr0_cnt1;
1901         tr_req[0].dicnt2 = 1;
1902         tr_req[0].dicnt3 = 1;
1903         tr_req[0].ddim1 = tr0_cnt0;
1904
1905         if (num_tr == 2) {
1906                 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1907                               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1908                 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1909
1910                 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1911                 tr_req[1].icnt0 = tr1_cnt0;
1912                 tr_req[1].icnt1 = 1;
1913                 tr_req[1].icnt2 = 1;
1914                 tr_req[1].icnt3 = 1;
1915
1916                 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1917                 tr_req[1].dicnt0 = tr1_cnt0;
1918                 tr_req[1].dicnt1 = 1;
1919                 tr_req[1].dicnt2 = 1;
1920                 tr_req[1].dicnt3 = 1;
1921         }
1922
1923         cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1924
1925         flush_dcache_range((unsigned long)tr_desc,
1926                            ALIGN((unsigned long)tr_desc + desc_size,
1927                                  ARCH_DMA_MINALIGN));
1928
1929         udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1930
1931         return 0;
1932 }
1933
1934 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (                        \
1935         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1936         TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1937
1938 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (                        \
1939         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1940         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1941
1942 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (                        \
1943         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1944
1945 #define TISCI_UDMA_TCHAN_VALID_PARAMS (                         \
1946         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1947         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
1948         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
1949         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1950         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
1951         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1952         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1953         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1954
1955 #define TISCI_UDMA_RCHAN_VALID_PARAMS (                         \
1956         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1957         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1958         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1959         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1960         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
1961         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
1962         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
1963         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |      \
1964         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1965
1966 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1967 {
1968         struct udma_dev *ud = uc->ud;
1969         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1970         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1971         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1972         struct udma_bchan *bchan = uc->bchan;
1973         int ret = 0;
1974
1975         req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1976         req_tx.nav_id = tisci_rm->tisci_dev_id;
1977         req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1978         req_tx.index = bchan->id;
1979
1980         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1981         if (ret)
1982                 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1983
1984         return ret;
1985 }
1986
1987 static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1988 {
1989         if (id >= 0) {
1990                 if (test_bit(id, ud->bchan_map)) {
1991                         dev_err(ud->dev, "bchan%d is in use\n", id);
1992                         return ERR_PTR(-ENOENT);
1993                 }
1994         } else {
1995                 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
1996                 if (id == ud->bchan_cnt)
1997                         return ERR_PTR(-ENOENT);
1998         }
1999         __set_bit(id, ud->bchan_map);
2000         return &ud->bchans[id];
2001 }
2002
2003 static int bcdma_get_bchan(struct udma_chan *uc)
2004 {
2005         struct udma_dev *ud = uc->ud;
2006
2007         if (uc->bchan) {
2008                 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2009                         uc->id, uc->bchan->id);
2010                 return 0;
2011         }
2012
2013         uc->bchan = __bcdma_reserve_bchan(ud, -1);
2014         if (IS_ERR(uc->bchan))
2015                 return PTR_ERR(uc->bchan);
2016
2017         uc->tchan = uc->bchan;
2018
2019         return 0;
2020 }
2021
2022 static void bcdma_put_bchan(struct udma_chan *uc)
2023 {
2024         struct udma_dev *ud = uc->ud;
2025
2026         if (uc->bchan) {
2027                 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2028                         uc->bchan->id);
2029                 __clear_bit(uc->bchan->id, ud->bchan_map);
2030                 uc->bchan = NULL;
2031                 uc->tchan = NULL;
2032         }
2033 }
2034
2035 static void bcdma_free_bchan_resources(struct udma_chan *uc)
2036 {
2037         if (!uc->bchan)
2038                 return;
2039
2040         k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2041         k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2042         uc->bchan->tc_ring = NULL;
2043         uc->bchan->t_ring = NULL;
2044
2045         bcdma_put_bchan(uc);
2046 }
2047
2048 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2049 {
2050         struct k3_nav_ring_cfg ring_cfg;
2051         struct udma_dev *ud = uc->ud;
2052         int ret;
2053
2054         ret = bcdma_get_bchan(uc);
2055         if (ret)
2056                 return ret;
2057
2058         ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2059                                                 &uc->bchan->t_ring,
2060                                                 &uc->bchan->tc_ring);
2061         if (ret) {
2062                 ret = -EBUSY;
2063                 goto err_ring;
2064         }
2065
2066         memset(&ring_cfg, 0, sizeof(ring_cfg));
2067         ring_cfg.size = 16;
2068         ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2069         ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2070
2071         ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2072         if (ret)
2073                 goto err_ringcfg;
2074
2075         return 0;
2076
2077 err_ringcfg:
2078         k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2079         uc->bchan->tc_ring = NULL;
2080         k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2081         uc->bchan->t_ring = NULL;
2082 err_ring:
2083         bcdma_put_bchan(uc);
2084
2085         return ret;
2086 }
2087
2088 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2089 {
2090         struct udma_dev *ud = uc->ud;
2091         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2092         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2093         struct udma_tchan *tchan = uc->tchan;
2094         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2095         int ret = 0;
2096
2097         req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2098         req_tx.nav_id = tisci_rm->tisci_dev_id;
2099         req_tx.index = tchan->id;
2100         req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2101         if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2102             ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2103                 /* wait for peer to complete the teardown for PDMAs */
2104                 req_tx.valid_params |=
2105                                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2106                 req_tx.tx_tdtype = 1;
2107         }
2108
2109         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2110         if (ret)
2111                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2112
2113         return ret;
2114 }
2115
2116 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2117
2118 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2119 {
2120         struct udma_dev *ud = uc->ud;
2121         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2122         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2123         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2124         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2125         int ret = 0;
2126
2127         req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2128         req_rx.nav_id = tisci_rm->tisci_dev_id;
2129         req_rx.index = uc->rchan->id;
2130
2131         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2132         if (ret) {
2133                 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2134                 return ret;
2135         }
2136
2137         flow_req.valid_params =
2138                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2139                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2140                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2141
2142         flow_req.nav_id = tisci_rm->tisci_dev_id;
2143         flow_req.flow_index = uc->rflow->id;
2144
2145         if (uc->config.needs_epib)
2146                 flow_req.rx_einfo_present = 1;
2147         else
2148                 flow_req.rx_einfo_present = 0;
2149         if (uc->config.psd_size)
2150                 flow_req.rx_psinfo_present = 1;
2151         else
2152                 flow_req.rx_psinfo_present = 0;
2153         flow_req.rx_error_handling = 0;
2154
2155         ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2156
2157         if (ret)
2158                 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2159                         ret);
2160
2161         return ret;
2162 }
2163
2164 static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2165 {
2166         int ret;
2167
2168         uc->config.pkt_mode = false;
2169
2170         switch (uc->config.dir) {
2171         case DMA_MEM_TO_MEM:
2172                 /* Non synchronized - mem to mem type of transfer */
2173                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2174                         uc->id);
2175
2176                 ret = bcdma_alloc_bchan_resources(uc);
2177                 if (ret)
2178                         return ret;
2179
2180                 ret = bcdma_tisci_m2m_channel_config(uc);
2181                 break;
2182         default:
2183                 /* Can not happen */
2184                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2185                         __func__, uc->id, uc->config.dir);
2186                 return -EINVAL;
2187         }
2188
2189         /* check if the channel configuration was successful */
2190         if (ret)
2191                 goto err_res_free;
2192
2193         if (udma_is_chan_running(uc)) {
2194                 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2195                 udma_stop(uc);
2196                 if (udma_is_chan_running(uc)) {
2197                         dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2198                         goto err_res_free;
2199                 }
2200         }
2201
2202         udma_reset_rings(uc);
2203
2204         return 0;
2205
2206 err_res_free:
2207         bcdma_free_bchan_resources(uc);
2208         udma_free_tx_resources(uc);
2209         udma_free_rx_resources(uc);
2210
2211         udma_reset_uchan(uc);
2212
2213         return ret;
2214 }
2215
2216 static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2217 {
2218         struct udma_dev *ud = uc->ud;
2219         int ret;
2220
2221         switch (uc->config.dir) {
2222         case DMA_MEM_TO_DEV:
2223                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2224                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2225                         uc->id);
2226
2227                 ret = udma_alloc_tx_resources(uc);
2228                 if (ret) {
2229                         uc->config.remote_thread_id = -1;
2230                         return ret;
2231                 }
2232
2233                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2234                 uc->config.dst_thread = uc->config.remote_thread_id;
2235                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2236
2237                 ret = pktdma_tisci_tx_channel_config(uc);
2238                 break;
2239         case DMA_DEV_TO_MEM:
2240                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2241                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2242                         uc->id);
2243
2244                 ret = udma_alloc_rx_resources(uc);
2245                 if (ret) {
2246                         uc->config.remote_thread_id = -1;
2247                         return ret;
2248                 }
2249
2250                 uc->config.src_thread = uc->config.remote_thread_id;
2251                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2252                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2253
2254                 ret = pktdma_tisci_rx_channel_config(uc);
2255                 break;
2256         default:
2257                 /* Can not happen */
2258                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2259                         __func__, uc->id, uc->config.dir);
2260                 return -EINVAL;
2261         }
2262
2263         /* check if the channel configuration was successful */
2264         if (ret)
2265                 goto err_res_free;
2266
2267         /* PSI-L pairing */
2268         ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2269         if (ret) {
2270                 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2271                         uc->config.src_thread, uc->config.dst_thread);
2272                 goto err_res_free;
2273         }
2274
2275         if (udma_is_chan_running(uc)) {
2276                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2277                 udma_stop(uc);
2278                 if (udma_is_chan_running(uc)) {
2279                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2280                         goto err_res_free;
2281                 }
2282         }
2283
2284         udma_reset_rings(uc);
2285
2286         if (uc->tchan)
2287                 dev_dbg(ud->dev,
2288                         "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2289                         uc->id, uc->tchan->id, uc->tchan->tflow_id,
2290                         uc->config.remote_thread_id);
2291         else if (uc->rchan)
2292                 dev_dbg(ud->dev,
2293                         "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2294                         uc->id, uc->rchan->id, uc->rflow->id,
2295                         uc->config.remote_thread_id);
2296         return 0;
2297
2298 err_res_free:
2299         udma_free_tx_resources(uc);
2300         udma_free_rx_resources(uc);
2301
2302         udma_reset_uchan(uc);
2303
2304         return ret;
2305 }
2306
2307 static int udma_transfer(struct udevice *dev, int direction,
2308                          dma_addr_t dst, dma_addr_t src, size_t len)
2309 {
2310         struct udma_dev *ud = dev_get_priv(dev);
2311         /* Channel0 is reserved for memcpy */
2312         struct udma_chan *uc = &ud->channels[0];
2313         dma_addr_t paddr = 0;
2314         int ret;
2315
2316         switch (ud->match_data->type) {
2317         case DMA_TYPE_UDMA:
2318                 ret = udma_alloc_chan_resources(uc);
2319                 break;
2320         case DMA_TYPE_BCDMA:
2321                 ret = bcdma_alloc_chan_resources(uc);
2322                 break;
2323         default:
2324                 return -EINVAL;
2325         };
2326         if (ret)
2327                 return ret;
2328
2329         udma_prep_dma_memcpy(uc, dst, src, len);
2330         udma_start(uc);
2331         udma_poll_completion(uc, &paddr);
2332         udma_stop(uc);
2333
2334         switch (ud->match_data->type) {
2335         case DMA_TYPE_UDMA:
2336                 udma_free_chan_resources(uc);
2337                 break;
2338         case DMA_TYPE_BCDMA:
2339                 bcdma_free_bchan_resources(uc);
2340                 break;
2341         default:
2342                 return -EINVAL;
2343         };
2344
2345         return 0;
2346 }
2347
2348 static int udma_request(struct dma *dma)
2349 {
2350         struct udma_dev *ud = dev_get_priv(dma->dev);
2351         struct udma_chan_config *ucc;
2352         struct udma_chan *uc;
2353         unsigned long dummy;
2354         int ret;
2355
2356         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2357                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2358                 return -EINVAL;
2359         }
2360
2361         uc = &ud->channels[dma->id];
2362         ucc = &uc->config;
2363         switch (ud->match_data->type) {
2364         case DMA_TYPE_UDMA:
2365                 ret = udma_alloc_chan_resources(uc);
2366                 break;
2367         case DMA_TYPE_BCDMA:
2368                 ret = bcdma_alloc_chan_resources(uc);
2369                 break;
2370         case DMA_TYPE_PKTDMA:
2371                 ret = pktdma_alloc_chan_resources(uc);
2372                 break;
2373         default:
2374                 return -EINVAL;
2375         }
2376         if (ret) {
2377                 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2378                 return -EINVAL;
2379         }
2380
2381         if (uc->config.dir == DMA_MEM_TO_DEV) {
2382                 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2383                 memset(uc->desc_tx, 0, ucc->hdesc_size);
2384         } else {
2385                 uc->desc_rx = dma_alloc_coherent(
2386                                 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2387                 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
2388         }
2389
2390         uc->in_use = true;
2391         uc->desc_rx_cur = 0;
2392         uc->num_rx_bufs = 0;
2393
2394         if (uc->config.dir == DMA_DEV_TO_MEM) {
2395                 uc->cfg_data.flow_id_base = uc->rflow->id;
2396                 uc->cfg_data.flow_id_cnt = 1;
2397         }
2398
2399         return 0;
2400 }
2401
2402 static int udma_rfree(struct dma *dma)
2403 {
2404         struct udma_dev *ud = dev_get_priv(dma->dev);
2405         struct udma_chan *uc;
2406
2407         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2408                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2409                 return -EINVAL;
2410         }
2411         uc = &ud->channels[dma->id];
2412
2413         if (udma_is_chan_running(uc))
2414                 udma_stop(uc);
2415
2416         udma_navss_psil_unpair(ud, uc->config.src_thread,
2417                                uc->config.dst_thread);
2418
2419         bcdma_free_bchan_resources(uc);
2420         udma_free_tx_resources(uc);
2421         udma_free_rx_resources(uc);
2422         udma_reset_uchan(uc);
2423
2424         uc->in_use = false;
2425
2426         return 0;
2427 }
2428
2429 static int udma_enable(struct dma *dma)
2430 {
2431         struct udma_dev *ud = dev_get_priv(dma->dev);
2432         struct udma_chan *uc;
2433         int ret;
2434
2435         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2436                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2437                 return -EINVAL;
2438         }
2439         uc = &ud->channels[dma->id];
2440
2441         ret = udma_start(uc);
2442
2443         return ret;
2444 }
2445
2446 static int udma_disable(struct dma *dma)
2447 {
2448         struct udma_dev *ud = dev_get_priv(dma->dev);
2449         struct udma_chan *uc;
2450         int ret = 0;
2451
2452         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2453                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2454                 return -EINVAL;
2455         }
2456         uc = &ud->channels[dma->id];
2457
2458         if (udma_is_chan_running(uc))
2459                 ret = udma_stop(uc);
2460         else
2461                 dev_err(dma->dev, "%s not running\n", __func__);
2462
2463         return ret;
2464 }
2465
2466 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2467 {
2468         struct udma_dev *ud = dev_get_priv(dma->dev);
2469         struct cppi5_host_desc_t *desc_tx;
2470         dma_addr_t dma_src = (dma_addr_t)src;
2471         struct ti_udma_drv_packet_data packet_data = { 0 };
2472         dma_addr_t paddr;
2473         struct udma_chan *uc;
2474         u32 tc_ring_id;
2475         int ret;
2476
2477         if (metadata)
2478                 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2479
2480         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2481                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2482                 return -EINVAL;
2483         }
2484         uc = &ud->channels[dma->id];
2485
2486         if (uc->config.dir != DMA_MEM_TO_DEV)
2487                 return -EINVAL;
2488
2489         tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2490
2491         desc_tx = uc->desc_tx;
2492
2493         cppi5_hdesc_reset_hbdesc(desc_tx);
2494
2495         cppi5_hdesc_init(desc_tx,
2496                          uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2497                          uc->config.psd_size);
2498         cppi5_hdesc_set_pktlen(desc_tx, len);
2499         cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2500         cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2501         cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2502         /* pass below information from caller */
2503         cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2504         cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2505
2506         flush_dcache_range((unsigned long)dma_src,
2507                            ALIGN((unsigned long)dma_src + len,
2508                                  ARCH_DMA_MINALIGN));
2509         flush_dcache_range((unsigned long)desc_tx,
2510                            ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
2511                                  ARCH_DMA_MINALIGN));
2512
2513         ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
2514         if (ret) {
2515                 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2516                         dma->id, ret);
2517                 return ret;
2518         }
2519
2520         udma_poll_completion(uc, &paddr);
2521
2522         return 0;
2523 }
2524
2525 static int udma_receive(struct dma *dma, void **dst, void *metadata)
2526 {
2527         struct udma_dev *ud = dev_get_priv(dma->dev);
2528         struct udma_chan_config *ucc;
2529         struct cppi5_host_desc_t *desc_rx;
2530         dma_addr_t buf_dma;
2531         struct udma_chan *uc;
2532         u32 buf_dma_len, pkt_len;
2533         u32 port_id = 0;
2534         int ret;
2535
2536         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2537                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2538                 return -EINVAL;
2539         }
2540         uc = &ud->channels[dma->id];
2541         ucc = &uc->config;
2542
2543         if (uc->config.dir != DMA_DEV_TO_MEM)
2544                 return -EINVAL;
2545         if (!uc->num_rx_bufs)
2546                 return -EINVAL;
2547
2548         ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
2549         if (ret && ret != -ENODATA) {
2550                 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2551                 return ret;
2552         } else if (ret == -ENODATA) {
2553                 return 0;
2554         }
2555
2556         /* invalidate cache data */
2557         invalidate_dcache_range((ulong)desc_rx,
2558                                 (ulong)(desc_rx + ucc->hdesc_size));
2559
2560         cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2561         pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2562
2563         /* invalidate cache data */
2564         invalidate_dcache_range((ulong)buf_dma,
2565                                 (ulong)(buf_dma + buf_dma_len));
2566
2567         cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2568
2569         *dst = (void *)buf_dma;
2570         uc->num_rx_bufs--;
2571
2572         return pkt_len;
2573 }
2574
2575 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2576 {
2577         struct udma_chan_config *ucc;
2578         struct udma_dev *ud = dev_get_priv(dma->dev);
2579         struct udma_chan *uc = &ud->channels[0];
2580         struct psil_endpoint_config *ep_config;
2581         u32 val;
2582
2583         for (val = 0; val < ud->ch_count; val++) {
2584                 uc = &ud->channels[val];
2585                 if (!uc->in_use)
2586                         break;
2587         }
2588
2589         if (val == ud->ch_count)
2590                 return -EBUSY;
2591
2592         ucc = &uc->config;
2593         ucc->remote_thread_id = args->args[0];
2594         if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2595                 ucc->dir = DMA_MEM_TO_DEV;
2596         else
2597                 ucc->dir = DMA_DEV_TO_MEM;
2598
2599         ep_config = psil_get_ep_config(ucc->remote_thread_id);
2600         if (IS_ERR(ep_config)) {
2601                 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2602                         uc->config.remote_thread_id);
2603                 ucc->dir = DMA_MEM_TO_MEM;
2604                 ucc->remote_thread_id = -1;
2605                 return false;
2606         }
2607
2608         ucc->pkt_mode = ep_config->pkt_mode;
2609         ucc->channel_tpl = ep_config->channel_tpl;
2610         ucc->notdpkt = ep_config->notdpkt;
2611         ucc->ep_type = ep_config->ep_type;
2612
2613         if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2614             ep_config->mapped_channel_id >= 0) {
2615                 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2616                 ucc->default_flow_id = ep_config->default_flow_id;
2617         } else {
2618                 ucc->mapped_channel_id = -1;
2619                 ucc->default_flow_id = -1;
2620         }
2621
2622         ucc->needs_epib = ep_config->needs_epib;
2623         ucc->psd_size = ep_config->psd_size;
2624         ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2625
2626         ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2627                                                 ucc->psd_size, 0);
2628         ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
2629
2630         dma->id = uc->id;
2631         pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
2632                  dma->id, ucc->needs_epib,
2633                  ucc->psd_size, ucc->metadata_size,
2634                  ucc->remote_thread_id);
2635
2636         return 0;
2637 }
2638
2639 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2640 {
2641         struct udma_dev *ud = dev_get_priv(dma->dev);
2642         struct cppi5_host_desc_t *desc_rx;
2643         dma_addr_t dma_dst;
2644         struct udma_chan *uc;
2645         u32 desc_num;
2646
2647         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2648                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2649                 return -EINVAL;
2650         }
2651         uc = &ud->channels[dma->id];
2652
2653         if (uc->config.dir != DMA_DEV_TO_MEM)
2654                 return -EINVAL;
2655
2656         if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2657                 return -EINVAL;
2658
2659         desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
2660         desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
2661         dma_dst = (dma_addr_t)dst;
2662
2663         cppi5_hdesc_reset_hbdesc(desc_rx);
2664
2665         cppi5_hdesc_init(desc_rx,
2666                          uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2667                          uc->config.psd_size);
2668         cppi5_hdesc_set_pktlen(desc_rx, size);
2669         cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2670
2671         flush_dcache_range((unsigned long)desc_rx,
2672                            ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
2673                                  ARCH_DMA_MINALIGN));
2674
2675         udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
2676
2677         uc->num_rx_bufs++;
2678         uc->desc_rx_cur++;
2679
2680         return 0;
2681 }
2682
2683 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2684 {
2685         struct udma_dev *ud = dev_get_priv(dma->dev);
2686         struct udma_chan *uc;
2687
2688         if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2689                 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2690                 return -EINVAL;
2691         }
2692
2693         switch (id) {
2694         case TI_UDMA_CHAN_PRIV_INFO:
2695                 uc = &ud->channels[dma->id];
2696                 *data = &uc->cfg_data;
2697                 return 0;
2698         }
2699
2700         return -EINVAL;
2701 }
2702
2703 static const struct dma_ops udma_ops = {
2704         .transfer       = udma_transfer,
2705         .of_xlate       = udma_of_xlate,
2706         .request        = udma_request,
2707         .rfree          = udma_rfree,
2708         .enable         = udma_enable,
2709         .disable        = udma_disable,
2710         .send           = udma_send,
2711         .receive        = udma_receive,
2712         .prepare_rcv_buf = udma_prepare_rcv_buf,
2713         .get_cfg        = udma_get_cfg,
2714 };
2715
2716 static struct udma_match_data am654_main_data = {
2717         .type = DMA_TYPE_UDMA,
2718         .psil_base = 0x1000,
2719         .enable_memcpy_support = true,
2720         .statictr_z_mask = GENMASK(11, 0),
2721         .oes = {
2722                 .udma_rchan = 0x200,
2723         },
2724         .tpl_levels = 2,
2725         .level_start_idx = {
2726                 [0] = 8, /* Normal channels */
2727                 [1] = 0, /* High Throughput channels */
2728         },
2729 };
2730
2731 static struct udma_match_data am654_mcu_data = {
2732         .type = DMA_TYPE_UDMA,
2733         .psil_base = 0x6000,
2734         .enable_memcpy_support = true,
2735         .statictr_z_mask = GENMASK(11, 0),
2736         .oes = {
2737                 .udma_rchan = 0x200,
2738         },
2739         .tpl_levels = 2,
2740         .level_start_idx = {
2741                 [0] = 2, /* Normal channels */
2742                 [1] = 0, /* High Throughput channels */
2743         },
2744 };
2745
2746 static struct udma_match_data j721e_main_data = {
2747         .type = DMA_TYPE_UDMA,
2748         .psil_base = 0x1000,
2749         .enable_memcpy_support = true,
2750         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2751         .statictr_z_mask = GENMASK(23, 0),
2752         .oes = {
2753                 .udma_rchan = 0x400,
2754         },
2755         .tpl_levels = 3,
2756         .level_start_idx = {
2757                 [0] = 16, /* Normal channels */
2758                 [1] = 4, /* High Throughput channels */
2759                 [2] = 0, /* Ultra High Throughput channels */
2760         },
2761 };
2762
2763 static struct udma_match_data j721e_mcu_data = {
2764         .type = DMA_TYPE_UDMA,
2765         .psil_base = 0x6000,
2766         .enable_memcpy_support = true,
2767         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2768         .statictr_z_mask = GENMASK(23, 0),
2769         .oes = {
2770                 .udma_rchan = 0x400,
2771         },
2772         .tpl_levels = 2,
2773         .level_start_idx = {
2774                 [0] = 2, /* Normal channels */
2775                 [1] = 0, /* High Throughput channels */
2776         },
2777 };
2778
2779 static struct udma_match_data am64_bcdma_data = {
2780         .type = DMA_TYPE_BCDMA,
2781         .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2782         .enable_memcpy_support = true, /* Supported via bchan */
2783         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2784         .statictr_z_mask = GENMASK(23, 0),
2785         .oes = {
2786                 .bcdma_bchan_data = 0x2200,
2787                 .bcdma_bchan_ring = 0x2400,
2788                 .bcdma_tchan_data = 0x2800,
2789                 .bcdma_tchan_ring = 0x2a00,
2790                 .bcdma_rchan_data = 0x2e00,
2791                 .bcdma_rchan_ring = 0x3000,
2792         },
2793         /* No throughput levels */
2794 };
2795
2796 static struct udma_match_data am64_pktdma_data = {
2797         .type = DMA_TYPE_PKTDMA,
2798         .psil_base = 0x1000,
2799         .enable_memcpy_support = false,
2800         .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2801         .statictr_z_mask = GENMASK(23, 0),
2802         .oes = {
2803                 .pktdma_tchan_flow = 0x1200,
2804                 .pktdma_rchan_flow = 0x1600,
2805         },
2806         /* No throughput levels */
2807 };
2808
2809 static const struct udevice_id udma_ids[] = {
2810         {
2811                 .compatible = "ti,am654-navss-main-udmap",
2812                 .data = (ulong)&am654_main_data,
2813         },
2814         {
2815                 .compatible = "ti,am654-navss-mcu-udmap",
2816                 .data = (ulong)&am654_mcu_data,
2817         }, {
2818                 .compatible = "ti,j721e-navss-main-udmap",
2819                 .data = (ulong)&j721e_main_data,
2820         }, {
2821                 .compatible = "ti,j721e-navss-mcu-udmap",
2822                 .data = (ulong)&j721e_mcu_data,
2823         },
2824         {
2825                 .compatible = "ti,am64-dmss-bcdma",
2826                 .data = (ulong)&am64_bcdma_data,
2827         },
2828         {
2829                 .compatible = "ti,am64-dmss-pktdma",
2830                 .data = (ulong)&am64_pktdma_data,
2831         },
2832         { /* Sentinel */ },
2833 };
2834
2835 U_BOOT_DRIVER(ti_edma3) = {
2836         .name   = "ti-udma",
2837         .id     = UCLASS_DMA,
2838         .of_match = udma_ids,
2839         .ops    = &udma_ops,
2840         .probe  = udma_probe,
2841         .priv_auto      = sizeof(struct udma_dev),
2842 };