usb: musb: musb_cppi41: Handle ISOCH differently and not use the hrtimer.
[pandora-kernel.git] / drivers / usb / musb / musb_cppi41.c
1 #include <linux/device.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmaengine.h>
4 #include <linux/sizes.h>
5 #include <linux/platform_device.h>
6 #include <linux/of.h>
7
8 #include "musb_core.h"
9
10 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
11
12 #define EP_MODE_AUTOREG_NONE            0
13 #define EP_MODE_AUTOREG_ALL_NEOP        1
14 #define EP_MODE_AUTOREG_ALWAYS          3
15
16 #define EP_MODE_DMA_TRANSPARENT         0
17 #define EP_MODE_DMA_RNDIS               1
18 #define EP_MODE_DMA_GEN_RNDIS           3
19
20 #define USB_CTRL_TX_MODE        0x70
21 #define USB_CTRL_RX_MODE        0x74
22 #define USB_CTRL_AUTOREQ        0xd0
23 #define USB_TDOWN               0xd8
24
25 struct cppi41_dma_channel {
26         struct dma_channel channel;
27         struct cppi41_dma_controller *controller;
28         struct musb_hw_ep *hw_ep;
29         struct dma_chan *dc;
30         dma_cookie_t cookie;
31         u8 port_num;
32         u8 is_tx;
33         u8 is_allocated;
34         u8 usb_toggle;
35
36         dma_addr_t buf_addr;
37         u32 total_len;
38         u32 prog_len;
39         u32 transferred;
40         u32 packet_sz;
41         struct list_head tx_check;
42         struct work_struct dma_completion;
43 };
44
45 #define MUSB_DMA_NUM_CHANNELS 15
46
47 struct cppi41_dma_controller {
48         struct dma_controller controller;
49         struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
50         struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
51         struct musb *musb;
52         struct hrtimer early_tx;
53         struct list_head early_tx_list;
54         u32 rx_mode;
55         u32 tx_mode;
56         u32 auto_req;
57 };
58
59 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
60 {
61         u16 csr;
62         u8 toggle;
63
64         if (cppi41_channel->is_tx)
65                 return;
66         if (!is_host_active(cppi41_channel->controller->musb))
67                 return;
68
69         csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
70         toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
71
72         cppi41_channel->usb_toggle = toggle;
73 }
74
75 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
76 {
77         u16 csr;
78         u8 toggle;
79
80         if (cppi41_channel->is_tx)
81                 return;
82         if (!is_host_active(cppi41_channel->controller->musb))
83                 return;
84
85         csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
86         toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
87
88         /*
89          * AM335x Advisory 1.0.13: Due to internal synchronisation error the
90          * data toggle may reset from DATA1 to DATA0 during receiving data from
91          * more than one endpoint.
92          */
93         if (!toggle && toggle == cppi41_channel->usb_toggle) {
94                 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
95                 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
96                 dev_dbg(cppi41_channel->controller->musb->controller,
97                                 "Restoring DATA1 toggle.\n");
98         }
99
100         cppi41_channel->usb_toggle = toggle;
101 }
102
103 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
104 {
105         u8              epnum = hw_ep->epnum;
106         struct musb     *musb = hw_ep->musb;
107         void __iomem    *epio = musb->endpoints[epnum].regs;
108         u16             csr;
109
110         csr = musb_readw(epio, MUSB_TXCSR);
111         if (csr & MUSB_TXCSR_TXPKTRDY)
112                 return false;
113         return true;
114 }
115
116 static bool is_isoc(struct musb_hw_ep *hw_ep, bool in)
117 {
118         if (in && hw_ep->in_qh) {
119                 if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC)
120                         return true;
121         } else if (hw_ep->out_qh) {
122                 if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC)
123                         return true;
124         }
125         return false;
126 }
127
128 static void cppi41_dma_callback(void *private_data);
129
130 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
131 {
132         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
133         struct musb *musb = hw_ep->musb;
134
135         if (!cppi41_channel->prog_len) {
136
137                 /* done, complete */
138                 cppi41_channel->channel.actual_len =
139                         cppi41_channel->transferred;
140                 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
141                 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
142         } else {
143                 /* next iteration, reload */
144                 struct dma_chan *dc = cppi41_channel->dc;
145                 struct dma_async_tx_descriptor *dma_desc;
146                 enum dma_transfer_direction direction;
147                 u16 csr;
148                 u32 remain_bytes;
149                 void __iomem *epio = cppi41_channel->hw_ep->regs;
150
151                 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
152
153                 remain_bytes = cppi41_channel->total_len;
154                 remain_bytes -= cppi41_channel->transferred;
155                 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
156                 cppi41_channel->prog_len = remain_bytes;
157
158                 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
159                         : DMA_DEV_TO_MEM;
160                 dma_desc = dmaengine_prep_slave_single(dc,
161                                 cppi41_channel->buf_addr,
162                                 remain_bytes,
163                                 direction,
164                                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
165                 if (WARN_ON(!dma_desc))
166                         return;
167
168                 dma_desc->callback = cppi41_dma_callback;
169                 dma_desc->callback_param = &cppi41_channel->channel;
170                 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
171                 dma_async_issue_pending(dc);
172
173                 if (!cppi41_channel->is_tx) {
174                         csr = musb_readw(epio, MUSB_RXCSR);
175                         csr |= MUSB_RXCSR_H_REQPKT;
176                         musb_writew(epio, MUSB_RXCSR, csr);
177                 }
178         }
179 }
180
181 static void cppi_trans_done_work(struct work_struct *work)
182 {
183         unsigned long flags;
184         struct cppi41_dma_channel *cppi41_channel =
185                 container_of(work, struct cppi41_dma_channel, dma_completion);
186         struct cppi41_dma_controller *controller = cppi41_channel->controller;
187         struct musb *musb = controller->musb;
188         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
189         bool empty;
190
191         if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) {
192                 spin_lock_irqsave(&musb->lock, flags);
193                 cppi41_trans_done(cppi41_channel);
194                 spin_unlock_irqrestore(&musb->lock, flags);
195         } else {
196                 empty = musb_is_tx_fifo_empty(hw_ep);
197                 if (empty) {
198                         spin_lock_irqsave(&musb->lock, flags);
199                         cppi41_trans_done(cppi41_channel);
200                         spin_unlock_irqrestore(&musb->lock, flags);
201                 } else {
202                         schedule_work(&cppi41_channel->dma_completion);
203                 }
204         }
205 }
206
207 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
208 {
209         struct cppi41_dma_controller *controller;
210         struct cppi41_dma_channel *cppi41_channel, *n;
211         struct musb *musb;
212         unsigned long flags;
213         enum hrtimer_restart ret = HRTIMER_NORESTART;
214
215         controller = container_of(timer, struct cppi41_dma_controller,
216                         early_tx);
217         musb = controller->musb;
218
219         spin_lock_irqsave(&musb->lock, flags);
220         list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
221                         tx_check) {
222                 bool empty;
223                 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
224
225                 empty = musb_is_tx_fifo_empty(hw_ep);
226                 if (empty) {
227                         list_del_init(&cppi41_channel->tx_check);
228                         cppi41_trans_done(cppi41_channel);
229                 }
230         }
231
232         if (!list_empty(&controller->early_tx_list)) {
233                 ret = HRTIMER_RESTART;
234                 hrtimer_forward_now(&controller->early_tx,
235                                 ktime_set(0, 150 * NSEC_PER_USEC));
236         }
237
238         spin_unlock_irqrestore(&musb->lock, flags);
239         return ret;
240 }
241
242 static void cppi41_dma_callback(void *private_data)
243 {
244         struct dma_channel *channel = private_data;
245         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
246         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
247         struct musb *musb = hw_ep->musb;
248         unsigned long flags;
249         struct dma_tx_state txstate;
250         u32 transferred;
251         bool empty;
252
253         spin_lock_irqsave(&musb->lock, flags);
254
255         dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
256                         &txstate);
257         transferred = cppi41_channel->prog_len - txstate.residue;
258         cppi41_channel->transferred += transferred;
259
260         dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
261                 hw_ep->epnum, cppi41_channel->transferred,
262                 cppi41_channel->total_len);
263
264         update_rx_toggle(cppi41_channel);
265
266         if (cppi41_channel->transferred == cppi41_channel->total_len ||
267                         transferred < cppi41_channel->packet_sz)
268                 cppi41_channel->prog_len = 0;
269
270         if (!cppi41_channel->is_tx) {
271                 if (is_isoc(hw_ep, 1))
272                         schedule_work(&cppi41_channel->dma_completion);
273                 else
274                         cppi41_trans_done(cppi41_channel);
275                 goto out;
276         }
277
278         empty = musb_is_tx_fifo_empty(hw_ep);
279         if (empty) {
280                 cppi41_trans_done(cppi41_channel);
281         } else {
282                 struct cppi41_dma_controller *controller;
283                 /*
284                  * On AM335x it has been observed that the TX interrupt fires
285                  * too early that means the TXFIFO is not yet empty but the DMA
286                  * engine says that it is done with the transfer. We don't
287                  * receive a FIFO empty interrupt so the only thing we can do is
288                  * to poll for the bit. On HS it usually takes 2us, on FS around
289                  * 110us - 150us depending on the transfer size.
290                  * We spin on HS (no longer than than 25us and setup a timer on
291                  * FS to check for the bit and complete the transfer.
292                  */
293                 controller = cppi41_channel->controller;
294
295                 if (musb->g.speed == USB_SPEED_HIGH) {
296                         unsigned wait = 25;
297
298                         do {
299                                 empty = musb_is_tx_fifo_empty(hw_ep);
300                                 if (empty)
301                                         break;
302                                 wait--;
303                                 if (!wait)
304                                         break;
305                                 udelay(1);
306                         } while (1);
307
308                         empty = musb_is_tx_fifo_empty(hw_ep);
309                         if (empty) {
310                                 cppi41_trans_done(cppi41_channel);
311                                 goto out;
312                         }
313                 }
314                 if (is_isoc(hw_ep, 0)) {
315                         schedule_work(&cppi41_channel->dma_completion);
316                         goto out;
317                 }
318                 list_add_tail(&cppi41_channel->tx_check,
319                                 &controller->early_tx_list);
320                 if (!hrtimer_active(&controller->early_tx)) {
321                         hrtimer_start_range_ns(&controller->early_tx,
322                                 ktime_set(0, 140 * NSEC_PER_USEC),
323                                 40 * NSEC_PER_USEC,
324                                 HRTIMER_MODE_REL);
325                 }
326         }
327 out:
328         spin_unlock_irqrestore(&musb->lock, flags);
329 }
330
331 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
332 {
333         unsigned shift;
334
335         shift = (ep - 1) * 2;
336         old &= ~(3 << shift);
337         old |= mode << shift;
338         return old;
339 }
340
341 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
342                 unsigned mode)
343 {
344         struct cppi41_dma_controller *controller = cppi41_channel->controller;
345         u32 port;
346         u32 new_mode;
347         u32 old_mode;
348
349         if (cppi41_channel->is_tx)
350                 old_mode = controller->tx_mode;
351         else
352                 old_mode = controller->rx_mode;
353         port = cppi41_channel->port_num;
354         new_mode = update_ep_mode(port, mode, old_mode);
355
356         if (new_mode == old_mode)
357                 return;
358         if (cppi41_channel->is_tx) {
359                 controller->tx_mode = new_mode;
360                 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
361                                 new_mode);
362         } else {
363                 controller->rx_mode = new_mode;
364                 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
365                                 new_mode);
366         }
367 }
368
369 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
370                 unsigned mode)
371 {
372         struct cppi41_dma_controller *controller = cppi41_channel->controller;
373         u32 port;
374         u32 new_mode;
375         u32 old_mode;
376
377         old_mode = controller->auto_req;
378         port = cppi41_channel->port_num;
379         new_mode = update_ep_mode(port, mode, old_mode);
380
381         if (new_mode == old_mode)
382                 return;
383         controller->auto_req = new_mode;
384         musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
385 }
386
387 static bool cppi41_configure_channel(struct dma_channel *channel,
388                                 u16 packet_sz, u8 mode,
389                                 dma_addr_t dma_addr, u32 len)
390 {
391         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
392         struct dma_chan *dc = cppi41_channel->dc;
393         struct dma_async_tx_descriptor *dma_desc;
394         enum dma_transfer_direction direction;
395         struct musb *musb = cppi41_channel->controller->musb;
396         unsigned use_gen_rndis = 0;
397
398         dev_dbg(musb->controller,
399                 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
400                 cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
401                 packet_sz, mode, (unsigned long long) dma_addr,
402                 len, cppi41_channel->is_tx);
403
404         cppi41_channel->buf_addr = dma_addr;
405         cppi41_channel->total_len = len;
406         cppi41_channel->transferred = 0;
407         cppi41_channel->packet_sz = packet_sz;
408
409         /*
410          * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
411          * than max packet size at a time.
412          */
413         if (cppi41_channel->is_tx)
414                 use_gen_rndis = 1;
415
416         if (use_gen_rndis) {
417                 /* RNDIS mode */
418                 if (len > packet_sz) {
419                         musb_writel(musb->ctrl_base,
420                                 RNDIS_REG(cppi41_channel->port_num), len);
421                         /* gen rndis */
422                         cppi41_set_dma_mode(cppi41_channel,
423                                         EP_MODE_DMA_GEN_RNDIS);
424
425                         /* auto req */
426                         cppi41_set_autoreq_mode(cppi41_channel,
427                                         EP_MODE_AUTOREG_ALL_NEOP);
428                 } else {
429                         musb_writel(musb->ctrl_base,
430                                         RNDIS_REG(cppi41_channel->port_num), 0);
431                         cppi41_set_dma_mode(cppi41_channel,
432                                         EP_MODE_DMA_TRANSPARENT);
433                         cppi41_set_autoreq_mode(cppi41_channel,
434                                         EP_MODE_AUTOREG_NONE);
435                 }
436         } else {
437                 /* fallback mode */
438                 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
439                 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
440                 len = min_t(u32, packet_sz, len);
441         }
442         cppi41_channel->prog_len = len;
443         direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
444         dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
445                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
446         if (!dma_desc)
447                 return false;
448
449         dma_desc->callback = cppi41_dma_callback;
450         dma_desc->callback_param = channel;
451         cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
452
453         save_rx_toggle(cppi41_channel);
454         dma_async_issue_pending(dc);
455         return true;
456 }
457
458 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
459                                 struct musb_hw_ep *hw_ep, u8 is_tx)
460 {
461         struct cppi41_dma_controller *controller = container_of(c,
462                         struct cppi41_dma_controller, controller);
463         struct cppi41_dma_channel *cppi41_channel = NULL;
464         u8 ch_num = hw_ep->epnum - 1;
465
466         if (ch_num >= MUSB_DMA_NUM_CHANNELS)
467                 return NULL;
468
469         if (is_tx)
470                 cppi41_channel = &controller->tx_channel[ch_num];
471         else
472                 cppi41_channel = &controller->rx_channel[ch_num];
473
474         if (!cppi41_channel->dc)
475                 return NULL;
476
477         if (cppi41_channel->is_allocated)
478                 return NULL;
479
480         cppi41_channel->hw_ep = hw_ep;
481         cppi41_channel->is_allocated = 1;
482
483         return &cppi41_channel->channel;
484 }
485
486 static void cppi41_dma_channel_release(struct dma_channel *channel)
487 {
488         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
489
490         if (cppi41_channel->is_allocated) {
491                 cppi41_channel->is_allocated = 0;
492                 channel->status = MUSB_DMA_STATUS_FREE;
493                 channel->actual_len = 0;
494         }
495 }
496
497 static int cppi41_dma_channel_program(struct dma_channel *channel,
498                                 u16 packet_sz, u8 mode,
499                                 dma_addr_t dma_addr, u32 len)
500 {
501         int ret;
502         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
503         int hb_mult = 0;
504
505         BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
506                 channel->status == MUSB_DMA_STATUS_BUSY);
507
508         if (is_host_active(cppi41_channel->controller->musb)) {
509                 if (cppi41_channel->is_tx)
510                         hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
511                 else
512                         hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
513         }
514
515         channel->status = MUSB_DMA_STATUS_BUSY;
516         channel->actual_len = 0;
517
518         if (hb_mult)
519                 packet_sz = hb_mult * (packet_sz & 0x7FF);
520
521         ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
522         if (!ret)
523                 channel->status = MUSB_DMA_STATUS_FREE;
524
525         return ret;
526 }
527
528 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
529                 void *buf, u32 length)
530 {
531         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
532         struct cppi41_dma_controller *controller = cppi41_channel->controller;
533         struct musb *musb = controller->musb;
534
535         if (is_host_active(musb)) {
536                 WARN_ON(1);
537                 return 1;
538         }
539         if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
540                 return 0;
541         if (cppi41_channel->is_tx)
542                 return 1;
543         /* AM335x Advisory 1.0.13. No workaround for device RX mode */
544         return 0;
545 }
546
547 static int cppi41_dma_channel_abort(struct dma_channel *channel)
548 {
549         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
550         struct cppi41_dma_controller *controller = cppi41_channel->controller;
551         struct musb *musb = controller->musb;
552         void __iomem *epio = cppi41_channel->hw_ep->regs;
553         int tdbit;
554         int ret;
555         unsigned is_tx;
556         u16 csr;
557
558         is_tx = cppi41_channel->is_tx;
559         dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
560                         cppi41_channel->port_num, is_tx);
561
562         if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
563                 return 0;
564
565         list_del_init(&cppi41_channel->tx_check);
566         if (is_tx) {
567                 csr = musb_readw(epio, MUSB_TXCSR);
568                 csr &= ~MUSB_TXCSR_DMAENAB;
569                 musb_writew(epio, MUSB_TXCSR, csr);
570         } else {
571                 csr = musb_readw(epio, MUSB_RXCSR);
572                 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
573                 musb_writew(epio, MUSB_RXCSR, csr);
574
575                 csr = musb_readw(epio, MUSB_RXCSR);
576                 if (csr & MUSB_RXCSR_RXPKTRDY) {
577                         csr |= MUSB_RXCSR_FLUSHFIFO;
578                         musb_writew(epio, MUSB_RXCSR, csr);
579                         musb_writew(epio, MUSB_RXCSR, csr);
580                 }
581         }
582
583         tdbit = 1 << cppi41_channel->port_num;
584         if (is_tx)
585                 tdbit <<= 16;
586
587         do {
588                 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
589                 ret = dmaengine_terminate_all(cppi41_channel->dc);
590         } while (ret == -EAGAIN);
591
592         musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
593
594         if (is_tx) {
595                 csr = musb_readw(epio, MUSB_TXCSR);
596                 if (csr & MUSB_TXCSR_TXPKTRDY) {
597                         csr |= MUSB_TXCSR_FLUSHFIFO;
598                         musb_writew(epio, MUSB_TXCSR, csr);
599                 }
600         }
601
602         cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
603         return 0;
604 }
605
606 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
607 {
608         struct dma_chan *dc;
609         int i;
610
611         for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
612                 dc = ctrl->tx_channel[i].dc;
613                 if (dc)
614                         dma_release_channel(dc);
615                 dc = ctrl->rx_channel[i].dc;
616                 if (dc)
617                         dma_release_channel(dc);
618         }
619 }
620
621 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
622 {
623         cppi41_release_all_dma_chans(controller);
624 }
625
626 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
627 {
628         struct musb *musb = controller->musb;
629         struct device *dev = musb->controller;
630         struct device_node *np = dev->of_node;
631         struct cppi41_dma_channel *cppi41_channel;
632         int count;
633         int i;
634         int ret;
635
636         count = of_property_count_strings(np, "dma-names");
637         if (count < 0)
638                 return count;
639
640         for (i = 0; i < count; i++) {
641                 struct dma_chan *dc;
642                 struct dma_channel *musb_dma;
643                 const char *str;
644                 unsigned is_tx;
645                 unsigned int port;
646
647                 ret = of_property_read_string_index(np, "dma-names", i, &str);
648                 if (ret)
649                         goto err;
650                 if (!strncmp(str, "tx", 2))
651                         is_tx = 1;
652                 else if (!strncmp(str, "rx", 2))
653                         is_tx = 0;
654                 else {
655                         dev_err(dev, "Wrong dmatype %s\n", str);
656                         goto err;
657                 }
658                 ret = kstrtouint(str + 2, 0, &port);
659                 if (ret)
660                         goto err;
661
662                 ret = -EINVAL;
663                 if (port > MUSB_DMA_NUM_CHANNELS || !port)
664                         goto err;
665                 if (is_tx)
666                         cppi41_channel = &controller->tx_channel[port - 1];
667                 else
668                         cppi41_channel = &controller->rx_channel[port - 1];
669
670                 cppi41_channel->controller = controller;
671                 cppi41_channel->port_num = port;
672                 cppi41_channel->is_tx = is_tx;
673                 INIT_LIST_HEAD(&cppi41_channel->tx_check);
674                 INIT_WORK(&cppi41_channel->dma_completion,
675                           cppi_trans_done_work);
676
677                 musb_dma = &cppi41_channel->channel;
678                 musb_dma->private_data = cppi41_channel;
679                 musb_dma->status = MUSB_DMA_STATUS_FREE;
680                 musb_dma->max_len = SZ_4M;
681
682                 dc = dma_request_slave_channel(dev, str);
683                 if (!dc) {
684                         dev_err(dev, "Failed to request %s.\n", str);
685                         ret = -EPROBE_DEFER;
686                         goto err;
687                 }
688                 cppi41_channel->dc = dc;
689         }
690         return 0;
691 err:
692         cppi41_release_all_dma_chans(controller);
693         return ret;
694 }
695
696 void dma_controller_destroy(struct dma_controller *c)
697 {
698         struct cppi41_dma_controller *controller = container_of(c,
699                         struct cppi41_dma_controller, controller);
700
701         hrtimer_cancel(&controller->early_tx);
702         cppi41_dma_controller_stop(controller);
703         kfree(controller);
704 }
705
706 struct dma_controller *dma_controller_create(struct musb *musb,
707                                         void __iomem *base)
708 {
709         struct cppi41_dma_controller *controller;
710         int ret = 0;
711
712         if (!musb->controller->of_node) {
713                 dev_err(musb->controller, "Need DT for the DMA engine.\n");
714                 return NULL;
715         }
716
717         controller = kzalloc(sizeof(*controller), GFP_KERNEL);
718         if (!controller)
719                 goto kzalloc_fail;
720
721         hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
722         controller->early_tx.function = cppi41_recheck_tx_req;
723         INIT_LIST_HEAD(&controller->early_tx_list);
724         controller->musb = musb;
725
726         controller->controller.channel_alloc = cppi41_dma_channel_allocate;
727         controller->controller.channel_release = cppi41_dma_channel_release;
728         controller->controller.channel_program = cppi41_dma_channel_program;
729         controller->controller.channel_abort = cppi41_dma_channel_abort;
730         controller->controller.is_compatible = cppi41_is_compatible;
731
732         ret = cppi41_dma_controller_start(controller);
733         if (ret)
734                 goto plat_get_fail;
735         return &controller->controller;
736
737 plat_get_fail:
738         kfree(controller);
739 kzalloc_fail:
740         if (ret == -EPROBE_DEFER)
741                 return ERR_PTR(ret);
742         return NULL;
743 }