2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 * Neil Zhang <zhangwm@marvell.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/ioport.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/list.h>
26 #include <linux/interrupt.h>
27 #include <linux/moduleparam.h>
28 #include <linux/device.h>
29 #include <linux/usb/ch9.h>
30 #include <linux/usb/gadget.h>
31 #include <linux/usb/otg.h>
34 #include <linux/irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/clk.h>
37 #include <linux/platform_data/mv_usb.h>
38 #include <asm/unaligned.h>
42 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
43 #define DRIVER_VERSION "8 Nov 2010"
45 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
46 ((ep)->udc->ep0_dir) : ((ep)->direction))
48 /* timeout value -- usec */
49 #define RESET_TIMEOUT 10000
50 #define FLUSH_TIMEOUT 10000
51 #define EPSTATUS_TIMEOUT 10000
52 #define PRIME_TIMEOUT 10000
53 #define READSAFE_TIMEOUT 1000
55 #define LOOPS_USEC_SHIFT 1
56 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
57 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
59 static DECLARE_COMPLETION(release_done);
61 static const char driver_name[] = "mv_udc";
62 static const char driver_desc[] = DRIVER_DESC;
64 /* controller device global variable */
65 static struct mv_udc *the_controller;
67 static void nuke(struct mv_ep *ep, int status);
68 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
70 /* for endpoint 0 operations */
71 static const struct usb_endpoint_descriptor mv_ep0_desc = {
72 .bLength = USB_DT_ENDPOINT_SIZE,
73 .bDescriptorType = USB_DT_ENDPOINT,
74 .bEndpointAddress = 0,
75 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
76 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
79 static void ep0_reset(struct mv_udc *udc)
86 for (i = 0; i < 2; i++) {
91 ep->dqh = &udc->ep_dqh[i];
93 /* configure ep0 endpoint capabilities in dQH */
94 ep->dqh->max_packet_length =
95 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
98 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
100 epctrlx = readl(&udc->op_regs->epctrlx[0]);
102 epctrlx |= EPCTRL_TX_ENABLE
103 | (USB_ENDPOINT_XFER_CONTROL
104 << EPCTRL_TX_EP_TYPE_SHIFT);
107 epctrlx |= EPCTRL_RX_ENABLE
108 | (USB_ENDPOINT_XFER_CONTROL
109 << EPCTRL_RX_EP_TYPE_SHIFT);
112 writel(epctrlx, &udc->op_regs->epctrlx[0]);
116 /* protocol ep0 stall, will automatically be cleared on new transaction */
117 static void ep0_stall(struct mv_udc *udc)
121 /* set TX and RX to stall */
122 epctrlx = readl(&udc->op_regs->epctrlx[0]);
123 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
124 writel(epctrlx, &udc->op_regs->epctrlx[0]);
126 /* update ep0 state */
127 udc->ep0_state = WAIT_FOR_SETUP;
128 udc->ep0_dir = EP_DIR_OUT;
131 static int process_ep_req(struct mv_udc *udc, int index,
132 struct mv_req *curr_req)
134 struct mv_dtd *curr_dtd;
135 struct mv_dqh *curr_dqh;
136 int td_complete, actual, remaining_length;
142 curr_dqh = &udc->ep_dqh[index];
143 direction = index % 2;
145 curr_dtd = curr_req->head;
147 actual = curr_req->req.length;
149 for (i = 0; i < curr_req->dtd_count; i++) {
150 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
151 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
152 udc->eps[index].name);
156 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
159 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
160 >> DTD_LENGTH_BIT_POS;
161 actual -= remaining_length;
163 if (remaining_length) {
165 dev_dbg(&udc->dev->dev,
166 "TX dTD remains data\n");
173 dev_info(&udc->dev->dev,
174 "complete_tr error: ep=%d %s: error = 0x%x\n",
175 index >> 1, direction ? "SEND" : "RECV",
177 if (errors & DTD_STATUS_HALTED) {
178 /* Clear the errors and Halt condition */
179 curr_dqh->size_ioc_int_sts &= ~errors;
181 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
183 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
187 if (i != curr_req->dtd_count - 1)
188 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
193 if (direction == EP_DIR_OUT)
194 bit_pos = 1 << curr_req->ep->ep_num;
196 bit_pos = 1 << (16 + curr_req->ep->ep_num);
198 while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
199 if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
200 while (readl(&udc->op_regs->epstatus) & bit_pos)
207 curr_req->req.actual = actual;
213 * done() - retire a request; caller blocked irqs
214 * @status : request status to be set, only works when
215 * request is still in progress.
217 static void done(struct mv_ep *ep, struct mv_req *req, int status)
219 struct mv_udc *udc = NULL;
220 unsigned char stopped = ep->stopped;
221 struct mv_dtd *curr_td, *next_td;
224 udc = (struct mv_udc *)ep->udc;
225 /* Removed the req from fsl_ep->queue */
226 list_del_init(&req->queue);
228 /* req.status should be set as -EINPROGRESS in ep_queue() */
229 if (req->req.status == -EINPROGRESS)
230 req->req.status = status;
232 status = req->req.status;
234 /* Free dtd for the request */
236 for (j = 0; j < req->dtd_count; j++) {
238 if (j != req->dtd_count - 1)
239 next_td = curr_td->next_dtd_virt;
240 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
244 dma_unmap_single(ep->udc->gadget.dev.parent,
245 req->req.dma, req->req.length,
246 ((ep_dir(ep) == EP_DIR_IN) ?
247 DMA_TO_DEVICE : DMA_FROM_DEVICE));
248 req->req.dma = DMA_ADDR_INVALID;
251 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
252 req->req.dma, req->req.length,
253 ((ep_dir(ep) == EP_DIR_IN) ?
254 DMA_TO_DEVICE : DMA_FROM_DEVICE));
256 if (status && (status != -ESHUTDOWN))
257 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
258 ep->ep.name, &req->req, status,
259 req->req.actual, req->req.length);
263 spin_unlock(&ep->udc->lock);
265 * complete() is from gadget layer,
266 * eg fsg->bulk_in_complete()
268 if (req->req.complete)
269 req->req.complete(&ep->ep, &req->req);
271 spin_lock(&ep->udc->lock);
272 ep->stopped = stopped;
275 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
279 u32 bit_pos, direction;
280 u32 usbcmd, epstatus;
285 direction = ep_dir(ep);
286 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
287 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
289 /* check if the pipe is empty */
290 if (!(list_empty(&ep->queue))) {
291 struct mv_req *lastreq;
292 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
293 lastreq->tail->dtd_next =
294 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
298 if (readl(&udc->op_regs->epprime) & bit_pos)
301 loops = LOOPS(READSAFE_TIMEOUT);
303 /* start with setting the semaphores */
304 usbcmd = readl(&udc->op_regs->usbcmd);
305 usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
306 writel(usbcmd, &udc->op_regs->usbcmd);
308 /* read the endpoint status */
309 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
312 * Reread the ATDTW semaphore bit to check if it is
313 * cleared. When hardware see a hazard, it will clear
314 * the bit or else we remain set to 1 and we can
315 * proceed with priming of endpoint if not already
318 if (readl(&udc->op_regs->usbcmd)
319 & USBCMD_ATDTW_TRIPWIRE_SET)
324 dev_err(&udc->dev->dev,
325 "Timeout for ATDTW_TRIPWIRE...\n");
332 /* Clear the semaphore */
333 usbcmd = readl(&udc->op_regs->usbcmd);
334 usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
335 writel(usbcmd, &udc->op_regs->usbcmd);
341 /* Write dQH next pointer and terminate bit to 0 */
342 dqh->next_dtd_ptr = req->head->td_dma
343 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
345 /* clear active and halt bit, in case set from a previous error */
346 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
348 /* Ensure that updates to the QH will occure before priming. */
351 /* Prime the Endpoint */
352 writel(bit_pos, &udc->op_regs->epprime);
358 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
359 dma_addr_t *dma, int *is_last)
366 /* how big will this transfer be? */
367 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
369 mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
371 *length = min(req->req.length - req->req.actual,
372 (unsigned)(mult * req->ep->ep.maxpacket));
374 *length = min(req->req.length - req->req.actual,
375 (unsigned)EP_MAX_LENGTH_TRANSFER);
380 * Be careful that no _GFP_HIGHMEM is set,
381 * or we can not use dma_to_virt
383 dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
388 /* initialize buffer page pointers */
389 temp = (u32)(req->req.dma + req->req.actual);
390 dtd->buff_ptr0 = cpu_to_le32(temp);
392 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
393 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
394 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
395 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
397 req->req.actual += *length;
399 /* zlp is needed if req->req.zero is set */
401 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
405 } else if (req->req.length == req->req.actual)
410 /* Fill in the transfer size; set active bit */
411 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
413 /* Enable interrupt for the last dtd of a request */
414 if (*is_last && !req->req.no_interrupt)
419 dtd->size_ioc_sts = temp;
426 /* generate dTD linked list for a request */
427 static int req_to_dtd(struct mv_req *req)
430 int is_last, is_first = 1;
431 struct mv_dtd *dtd, *last_dtd = NULL;
438 dtd = build_dtd(req, &count, &dma, &is_last);
446 last_dtd->dtd_next = dma;
447 last_dtd->next_dtd_virt = dtd;
453 /* set terminate bit to 1 for the last dTD */
454 dtd->dtd_next = DTD_NEXT_TERMINATE;
461 static int mv_ep_enable(struct usb_ep *_ep,
462 const struct usb_endpoint_descriptor *desc)
468 u32 bit_pos, epctrlx, direction;
469 unsigned char zlt = 0, ios = 0, mult = 0;
472 ep = container_of(_ep, struct mv_ep, ep);
476 || desc->bDescriptorType != USB_DT_ENDPOINT)
479 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
482 direction = ep_dir(ep);
483 max = usb_endpoint_maxp(desc);
486 * disable HW zero length termination select
487 * driver handles zero length packet through req->req.zero
491 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
493 /* Check if the Endpoint is Primed */
494 if ((readl(&udc->op_regs->epprime) & bit_pos)
495 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
496 dev_info(&udc->dev->dev,
497 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
498 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
499 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
500 (unsigned)readl(&udc->op_regs->epprime),
501 (unsigned)readl(&udc->op_regs->epstatus),
505 /* Set the max packet length, interrupt on Setup and Mult fields */
506 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
507 case USB_ENDPOINT_XFER_BULK:
511 case USB_ENDPOINT_XFER_CONTROL:
513 case USB_ENDPOINT_XFER_INT:
516 case USB_ENDPOINT_XFER_ISOC:
517 /* Calculate transactions needed for high bandwidth iso */
518 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
519 max = max & 0x7ff; /* bit 0~10 */
520 /* 3 transactions at most */
528 spin_lock_irqsave(&udc->lock, flags);
529 /* Get the endpoint queue head address */
531 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
532 | (mult << EP_QUEUE_HEAD_MULT_POS)
533 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
534 | (ios ? EP_QUEUE_HEAD_IOS : 0);
535 dqh->next_dtd_ptr = 1;
536 dqh->size_ioc_int_sts = 0;
538 ep->ep.maxpacket = max;
542 /* Enable the endpoint for Rx or Tx and set the endpoint type */
543 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
544 if (direction == EP_DIR_IN) {
545 epctrlx &= ~EPCTRL_TX_ALL_MASK;
546 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
547 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
548 << EPCTRL_TX_EP_TYPE_SHIFT);
550 epctrlx &= ~EPCTRL_RX_ALL_MASK;
551 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
552 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
553 << EPCTRL_RX_EP_TYPE_SHIFT);
555 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
558 * Implement Guideline (GL# USB-7) The unused endpoint type must
559 * be programmed to bulk.
561 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
562 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
563 epctrlx |= (USB_ENDPOINT_XFER_BULK
564 << EPCTRL_RX_EP_TYPE_SHIFT);
565 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
568 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
569 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
570 epctrlx |= (USB_ENDPOINT_XFER_BULK
571 << EPCTRL_TX_EP_TYPE_SHIFT);
572 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
575 spin_unlock_irqrestore(&udc->lock, flags);
582 static int mv_ep_disable(struct usb_ep *_ep)
587 u32 bit_pos, epctrlx, direction;
590 ep = container_of(_ep, struct mv_ep, ep);
591 if ((_ep == NULL) || !ep->ep.desc)
596 /* Get the endpoint queue head address */
599 spin_lock_irqsave(&udc->lock, flags);
601 direction = ep_dir(ep);
602 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
604 /* Reset the max packet length and the interrupt on Setup */
605 dqh->max_packet_length = 0;
607 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
608 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
609 epctrlx &= ~((direction == EP_DIR_IN)
610 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
611 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
612 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
614 /* nuke all pending requests (does flush) */
615 nuke(ep, -ESHUTDOWN);
620 spin_unlock_irqrestore(&udc->lock, flags);
625 static struct usb_request *
626 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
628 struct mv_req *req = NULL;
630 req = kzalloc(sizeof *req, gfp_flags);
634 req->req.dma = DMA_ADDR_INVALID;
635 INIT_LIST_HEAD(&req->queue);
640 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
642 struct mv_req *req = NULL;
644 req = container_of(_req, struct mv_req, req);
650 static void mv_ep_fifo_flush(struct usb_ep *_ep)
653 u32 bit_pos, direction;
660 ep = container_of(_ep, struct mv_ep, ep);
665 direction = ep_dir(ep);
668 bit_pos = (1 << 16) | 1;
669 else if (direction == EP_DIR_OUT)
670 bit_pos = 1 << ep->ep_num;
672 bit_pos = 1 << (16 + ep->ep_num);
674 loops = LOOPS(EPSTATUS_TIMEOUT);
676 unsigned int inter_loops;
679 dev_err(&udc->dev->dev,
680 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
681 (unsigned)readl(&udc->op_regs->epstatus),
685 /* Write 1 to the Flush register */
686 writel(bit_pos, &udc->op_regs->epflush);
688 /* Wait until flushing completed */
689 inter_loops = LOOPS(FLUSH_TIMEOUT);
690 while (readl(&udc->op_regs->epflush)) {
692 * ENDPTFLUSH bit should be cleared to indicate this
693 * operation is complete
695 if (inter_loops == 0) {
696 dev_err(&udc->dev->dev,
697 "TIMEOUT for ENDPTFLUSH=0x%x,"
699 (unsigned)readl(&udc->op_regs->epflush),
707 } while (readl(&udc->op_regs->epstatus) & bit_pos);
710 /* queues (submits) an I/O request to an endpoint */
712 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
714 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
715 struct mv_req *req = container_of(_req, struct mv_req, req);
716 struct mv_udc *udc = ep->udc;
720 /* catch various bogus parameters */
721 if (!_req || !req->req.complete || !req->req.buf
722 || !list_empty(&req->queue)) {
723 dev_err(&udc->dev->dev, "%s, bad params", __func__);
726 if (unlikely(!_ep || !ep->ep.desc)) {
727 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
732 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
737 /* map virtual address to hardware */
738 if (req->req.dma == DMA_ADDR_INVALID) {
739 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
741 req->req.length, ep_dir(ep)
746 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
747 req->req.dma, req->req.length,
754 req->req.status = -EINPROGRESS;
758 spin_lock_irqsave(&udc->lock, flags);
760 /* build dtds and push them to device queue */
761 if (!req_to_dtd(req)) {
762 retval = queue_dtd(ep, req);
764 spin_unlock_irqrestore(&udc->lock, flags);
765 dev_err(&udc->dev->dev, "Failed to queue dtd\n");
769 spin_unlock_irqrestore(&udc->lock, flags);
770 dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
775 /* Update ep0 state */
777 udc->ep0_state = DATA_STATE_XMIT;
779 /* irq handler advances the queue */
780 list_add_tail(&req->queue, &ep->queue);
781 spin_unlock_irqrestore(&udc->lock, flags);
787 dma_unmap_single(ep->udc->gadget.dev.parent,
788 req->req.dma, req->req.length,
789 ((ep_dir(ep) == EP_DIR_IN) ?
790 DMA_TO_DEVICE : DMA_FROM_DEVICE));
791 req->req.dma = DMA_ADDR_INVALID;
794 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
795 req->req.dma, req->req.length,
796 ((ep_dir(ep) == EP_DIR_IN) ?
797 DMA_TO_DEVICE : DMA_FROM_DEVICE));
802 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
804 struct mv_dqh *dqh = ep->dqh;
807 /* Write dQH next pointer and terminate bit to 0 */
808 dqh->next_dtd_ptr = req->head->td_dma
809 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
811 /* clear active and halt bit, in case set from a previous error */
812 dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
814 /* Ensure that updates to the QH will occure before priming. */
817 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
819 /* Prime the Endpoint */
820 writel(bit_pos, &ep->udc->op_regs->epprime);
823 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
824 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
826 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
828 struct mv_udc *udc = ep->udc;
830 int stopped, ret = 0;
836 spin_lock_irqsave(&ep->udc->lock, flags);
837 stopped = ep->stopped;
839 /* Stop the ep before we deal with the queue */
841 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
842 if (ep_dir(ep) == EP_DIR_IN)
843 epctrlx &= ~EPCTRL_TX_ENABLE;
845 epctrlx &= ~EPCTRL_RX_ENABLE;
846 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
848 /* make sure it's actually queued on this endpoint */
849 list_for_each_entry(req, &ep->queue, queue) {
850 if (&req->req == _req)
853 if (&req->req != _req) {
858 /* The request is in progress, or completed but not dequeued */
859 if (ep->queue.next == &req->queue) {
860 _req->status = -ECONNRESET;
861 mv_ep_fifo_flush(_ep); /* flush current transfer */
863 /* The request isn't the last request in this ep queue */
864 if (req->queue.next != &ep->queue) {
865 struct mv_req *next_req;
867 next_req = list_entry(req->queue.next,
868 struct mv_req, queue);
870 /* Point the QH to the first TD of next request */
871 mv_prime_ep(ep, next_req);
876 qh->next_dtd_ptr = 1;
877 qh->size_ioc_int_sts = 0;
880 /* The request hasn't been processed, patch up the TD chain */
882 struct mv_req *prev_req;
884 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
885 writel(readl(&req->tail->dtd_next),
886 &prev_req->tail->dtd_next);
890 done(ep, req, -ECONNRESET);
894 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
895 if (ep_dir(ep) == EP_DIR_IN)
896 epctrlx |= EPCTRL_TX_ENABLE;
898 epctrlx |= EPCTRL_RX_ENABLE;
899 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
900 ep->stopped = stopped;
902 spin_unlock_irqrestore(&ep->udc->lock, flags);
906 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
910 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
913 if (direction == EP_DIR_IN)
914 epctrlx |= EPCTRL_TX_EP_STALL;
916 epctrlx |= EPCTRL_RX_EP_STALL;
918 if (direction == EP_DIR_IN) {
919 epctrlx &= ~EPCTRL_TX_EP_STALL;
920 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
922 epctrlx &= ~EPCTRL_RX_EP_STALL;
923 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
926 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
929 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
933 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
935 if (direction == EP_DIR_OUT)
936 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
938 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
941 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
944 unsigned long flags = 0;
948 ep = container_of(_ep, struct mv_ep, ep);
950 if (!_ep || !ep->ep.desc) {
955 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
956 status = -EOPNOTSUPP;
961 * Attempt to halt IN ep will fail if any transfer requests
964 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
969 spin_lock_irqsave(&ep->udc->lock, flags);
970 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
975 spin_unlock_irqrestore(&ep->udc->lock, flags);
977 if (ep->ep_num == 0) {
978 udc->ep0_state = WAIT_FOR_SETUP;
979 udc->ep0_dir = EP_DIR_OUT;
985 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
987 return mv_ep_set_halt_wedge(_ep, halt, 0);
990 static int mv_ep_set_wedge(struct usb_ep *_ep)
992 return mv_ep_set_halt_wedge(_ep, 1, 1);
995 static struct usb_ep_ops mv_ep_ops = {
996 .enable = mv_ep_enable,
997 .disable = mv_ep_disable,
999 .alloc_request = mv_alloc_request,
1000 .free_request = mv_free_request,
1002 .queue = mv_ep_queue,
1003 .dequeue = mv_ep_dequeue,
1005 .set_wedge = mv_ep_set_wedge,
1006 .set_halt = mv_ep_set_halt,
1007 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
1010 static void udc_clock_enable(struct mv_udc *udc)
1014 for (i = 0; i < udc->clknum; i++)
1015 clk_enable(udc->clk[i]);
1018 static void udc_clock_disable(struct mv_udc *udc)
1022 for (i = 0; i < udc->clknum; i++)
1023 clk_disable(udc->clk[i]);
1026 static void udc_stop(struct mv_udc *udc)
1030 /* Disable interrupts */
1031 tmp = readl(&udc->op_regs->usbintr);
1032 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1033 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1034 writel(tmp, &udc->op_regs->usbintr);
1038 /* Reset the Run the bit in the command register to stop VUSB */
1039 tmp = readl(&udc->op_regs->usbcmd);
1040 tmp &= ~USBCMD_RUN_STOP;
1041 writel(tmp, &udc->op_regs->usbcmd);
1044 static void udc_start(struct mv_udc *udc)
1048 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1049 | USBINTR_PORT_CHANGE_DETECT_EN
1050 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1051 /* Enable interrupts */
1052 writel(usbintr, &udc->op_regs->usbintr);
1056 /* Set the Run bit in the command register */
1057 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1060 static int udc_reset(struct mv_udc *udc)
1065 /* Stop the controller */
1066 tmp = readl(&udc->op_regs->usbcmd);
1067 tmp &= ~USBCMD_RUN_STOP;
1068 writel(tmp, &udc->op_regs->usbcmd);
1070 /* Reset the controller to get default values */
1071 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1073 /* wait for reset to complete */
1074 loops = LOOPS(RESET_TIMEOUT);
1075 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1077 dev_err(&udc->dev->dev,
1078 "Wait for RESET completed TIMEOUT\n");
1085 /* set controller to device mode */
1086 tmp = readl(&udc->op_regs->usbmode);
1087 tmp |= USBMODE_CTRL_MODE_DEVICE;
1089 /* turn setup lockout off, require setup tripwire in usbcmd */
1090 tmp |= USBMODE_SETUP_LOCK_OFF;
1092 writel(tmp, &udc->op_regs->usbmode);
1094 writel(0x0, &udc->op_regs->epsetupstat);
1096 /* Configure the Endpoint List Address */
1097 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1098 &udc->op_regs->eplistaddr);
1100 portsc = readl(&udc->op_regs->portsc[0]);
1101 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1102 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1105 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1107 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1109 writel(portsc, &udc->op_regs->portsc[0]);
1111 tmp = readl(&udc->op_regs->epctrlx[0]);
1112 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1113 writel(tmp, &udc->op_regs->epctrlx[0]);
1118 static int mv_udc_enable_internal(struct mv_udc *udc)
1125 dev_dbg(&udc->dev->dev, "enable udc\n");
1126 udc_clock_enable(udc);
1127 if (udc->pdata->phy_init) {
1128 retval = udc->pdata->phy_init(udc->phy_regs);
1130 dev_err(&udc->dev->dev,
1131 "init phy error %d\n", retval);
1132 udc_clock_disable(udc);
1141 static int mv_udc_enable(struct mv_udc *udc)
1143 if (udc->clock_gating)
1144 return mv_udc_enable_internal(udc);
1149 static void mv_udc_disable_internal(struct mv_udc *udc)
1152 dev_dbg(&udc->dev->dev, "disable udc\n");
1153 if (udc->pdata->phy_deinit)
1154 udc->pdata->phy_deinit(udc->phy_regs);
1155 udc_clock_disable(udc);
1160 static void mv_udc_disable(struct mv_udc *udc)
1162 if (udc->clock_gating)
1163 mv_udc_disable_internal(udc);
1166 static int mv_udc_get_frame(struct usb_gadget *gadget)
1174 udc = container_of(gadget, struct mv_udc, gadget);
1176 retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1181 /* Tries to wake up the host connected to this gadget */
1182 static int mv_udc_wakeup(struct usb_gadget *gadget)
1184 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1187 /* Remote wakeup feature not enabled by host */
1188 if (!udc->remote_wakeup)
1191 portsc = readl(&udc->op_regs->portsc);
1192 /* not suspended? */
1193 if (!(portsc & PORTSCX_PORT_SUSPEND))
1195 /* trigger force resume */
1196 portsc |= PORTSCX_PORT_FORCE_RESUME;
1197 writel(portsc, &udc->op_regs->portsc[0]);
1201 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1204 unsigned long flags;
1207 udc = container_of(gadget, struct mv_udc, gadget);
1208 spin_lock_irqsave(&udc->lock, flags);
1210 udc->vbus_active = (is_active != 0);
1212 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1213 __func__, udc->softconnect, udc->vbus_active);
1215 if (udc->driver && udc->softconnect && udc->vbus_active) {
1216 retval = mv_udc_enable(udc);
1218 /* Clock is disabled, need re-init registers */
1223 } else if (udc->driver && udc->softconnect) {
1227 /* stop all the transfer in queue*/
1228 stop_activity(udc, udc->driver);
1230 mv_udc_disable(udc);
1234 spin_unlock_irqrestore(&udc->lock, flags);
1238 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1241 unsigned long flags;
1244 udc = container_of(gadget, struct mv_udc, gadget);
1245 spin_lock_irqsave(&udc->lock, flags);
1247 udc->softconnect = (is_on != 0);
1249 dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1250 __func__, udc->softconnect, udc->vbus_active);
1252 if (udc->driver && udc->softconnect && udc->vbus_active) {
1253 retval = mv_udc_enable(udc);
1255 /* Clock is disabled, need re-init registers */
1260 } else if (udc->driver && udc->vbus_active) {
1261 /* stop all the transfer in queue*/
1262 stop_activity(udc, udc->driver);
1264 mv_udc_disable(udc);
1267 spin_unlock_irqrestore(&udc->lock, flags);
1271 static int mv_udc_start(struct usb_gadget_driver *driver,
1272 int (*bind)(struct usb_gadget *, struct usb_gadget_driver *));
1273 static int mv_udc_stop(struct usb_gadget_driver *driver);
1274 /* device controller usb_gadget_ops structure */
1275 static const struct usb_gadget_ops mv_ops = {
1277 /* returns the current frame number */
1278 .get_frame = mv_udc_get_frame,
1280 /* tries to wake up the host connected to this gadget */
1281 .wakeup = mv_udc_wakeup,
1283 /* notify controller that VBUS is powered or not */
1284 .vbus_session = mv_udc_vbus_session,
1286 /* D+ pullup, software-controlled connect/disconnect to USB host */
1287 .pullup = mv_udc_pullup,
1288 .start = mv_udc_start,
1289 .stop = mv_udc_stop,
1292 static int eps_init(struct mv_udc *udc)
1298 /* initialize ep0 */
1301 strncpy(ep->name, "ep0", sizeof(ep->name));
1302 ep->ep.name = ep->name;
1303 ep->ep.ops = &mv_ep_ops;
1306 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1308 ep->ep.desc = &mv_ep0_desc;
1309 INIT_LIST_HEAD(&ep->queue);
1311 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1313 /* initialize other endpoints */
1314 for (i = 2; i < udc->max_eps * 2; i++) {
1317 snprintf(name, sizeof(name), "ep%din", i / 2);
1318 ep->direction = EP_DIR_IN;
1320 snprintf(name, sizeof(name), "ep%dout", i / 2);
1321 ep->direction = EP_DIR_OUT;
1324 strncpy(ep->name, name, sizeof(ep->name));
1325 ep->ep.name = ep->name;
1327 ep->ep.ops = &mv_ep_ops;
1329 ep->ep.maxpacket = (unsigned short) ~0;
1332 INIT_LIST_HEAD(&ep->queue);
1333 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1335 ep->dqh = &udc->ep_dqh[i];
1341 /* delete all endpoint requests, called with spinlock held */
1342 static void nuke(struct mv_ep *ep, int status)
1344 /* called with spinlock held */
1347 /* endpoint fifo flush */
1348 mv_ep_fifo_flush(&ep->ep);
1350 while (!list_empty(&ep->queue)) {
1351 struct mv_req *req = NULL;
1352 req = list_entry(ep->queue.next, struct mv_req, queue);
1353 done(ep, req, status);
1357 /* stop all USB activities */
1358 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1362 nuke(&udc->eps[0], -ESHUTDOWN);
1364 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1365 nuke(ep, -ESHUTDOWN);
1368 /* report disconnect; the driver is already quiesced */
1370 spin_unlock(&udc->lock);
1371 driver->disconnect(&udc->gadget);
1372 spin_lock(&udc->lock);
1376 static int mv_udc_start(struct usb_gadget_driver *driver,
1377 int (*bind)(struct usb_gadget *, struct usb_gadget_driver *))
1379 struct mv_udc *udc = the_controller;
1381 unsigned long flags;
1389 spin_lock_irqsave(&udc->lock, flags);
1391 /* hook up the driver ... */
1392 driver->driver.bus = NULL;
1393 udc->driver = driver;
1394 udc->gadget.dev.driver = &driver->driver;
1396 udc->usb_state = USB_STATE_ATTACHED;
1397 udc->ep0_state = WAIT_FOR_SETUP;
1398 udc->ep0_dir = EP_DIR_OUT;
1400 spin_unlock_irqrestore(&udc->lock, flags);
1402 retval = bind(&udc->gadget, driver);
1404 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1405 driver->driver.name, retval);
1407 udc->gadget.dev.driver = NULL;
1411 if (!IS_ERR_OR_NULL(udc->transceiver)) {
1412 retval = otg_set_peripheral(udc->transceiver->otg,
1415 dev_err(&udc->dev->dev,
1416 "unable to register peripheral to otg\n");
1417 if (driver->unbind) {
1418 driver->unbind(&udc->gadget);
1419 udc->gadget.dev.driver = NULL;
1426 /* pullup is always on */
1427 mv_udc_pullup(&udc->gadget, 1);
1429 /* When boot with cable attached, there will be no vbus irq occurred */
1431 queue_work(udc->qwork, &udc->vbus_work);
1436 static int mv_udc_stop(struct usb_gadget_driver *driver)
1438 struct mv_udc *udc = the_controller;
1439 unsigned long flags;
1444 spin_lock_irqsave(&udc->lock, flags);
1449 /* stop all usb activities */
1450 udc->gadget.speed = USB_SPEED_UNKNOWN;
1451 stop_activity(udc, driver);
1452 mv_udc_disable(udc);
1454 spin_unlock_irqrestore(&udc->lock, flags);
1456 /* unbind gadget driver */
1457 driver->unbind(&udc->gadget);
1458 udc->gadget.dev.driver = NULL;
1464 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1468 portsc = readl(&udc->op_regs->portsc[0]);
1469 portsc |= mode << 16;
1470 writel(portsc, &udc->op_regs->portsc[0]);
1473 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1475 struct mv_udc *udc = the_controller;
1476 struct mv_req *req = container_of(_req, struct mv_req, req);
1477 unsigned long flags;
1479 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1481 spin_lock_irqsave(&udc->lock, flags);
1482 if (req->test_mode) {
1483 mv_set_ptc(udc, req->test_mode);
1486 spin_unlock_irqrestore(&udc->lock, flags);
1490 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1497 udc->ep0_dir = direction;
1498 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1500 req = udc->status_req;
1502 /* fill in the reqest structure */
1503 if (empty == false) {
1504 *((u16 *) req->req.buf) = cpu_to_le16(status);
1505 req->req.length = 2;
1507 req->req.length = 0;
1510 req->req.status = -EINPROGRESS;
1511 req->req.actual = 0;
1512 if (udc->test_mode) {
1513 req->req.complete = prime_status_complete;
1514 req->test_mode = udc->test_mode;
1517 req->req.complete = NULL;
1520 if (req->req.dma == DMA_ADDR_INVALID) {
1521 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1522 req->req.buf, req->req.length,
1523 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1527 /* prime the data phase */
1528 if (!req_to_dtd(req)) {
1529 retval = queue_dtd(ep, req);
1531 dev_err(&udc->dev->dev,
1532 "Failed to queue dtd when prime status\n");
1535 } else{ /* no mem */
1537 dev_err(&udc->dev->dev,
1538 "Failed to dma_pool_alloc when prime status\n");
1542 list_add_tail(&req->queue, &ep->queue);
1547 dma_unmap_single(ep->udc->gadget.dev.parent,
1548 req->req.dma, req->req.length,
1549 ((ep_dir(ep) == EP_DIR_IN) ?
1550 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1551 req->req.dma = DMA_ADDR_INVALID;
1558 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1560 if (index <= TEST_FORCE_EN) {
1561 udc->test_mode = index;
1562 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1565 dev_err(&udc->dev->dev,
1566 "This test mode(%d) is not supported\n", index);
1569 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1571 udc->dev_addr = (u8)setup->wValue;
1573 /* update usb state */
1574 udc->usb_state = USB_STATE_ADDRESS;
1576 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1580 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1581 struct usb_ctrlrequest *setup)
1586 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1587 != (USB_DIR_IN | USB_TYPE_STANDARD))
1590 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1591 status = 1 << USB_DEVICE_SELF_POWERED;
1592 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1593 } else if ((setup->bRequestType & USB_RECIP_MASK)
1594 == USB_RECIP_INTERFACE) {
1595 /* get interface status */
1597 } else if ((setup->bRequestType & USB_RECIP_MASK)
1598 == USB_RECIP_ENDPOINT) {
1599 u8 ep_num, direction;
1601 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1602 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1603 ? EP_DIR_IN : EP_DIR_OUT;
1604 status = ep_is_stall(udc, ep_num, direction)
1605 << USB_ENDPOINT_HALT;
1608 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1612 udc->ep0_state = DATA_STATE_XMIT;
1615 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1621 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1622 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1623 switch (setup->wValue) {
1624 case USB_DEVICE_REMOTE_WAKEUP:
1625 udc->remote_wakeup = 0;
1630 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1631 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1632 switch (setup->wValue) {
1633 case USB_ENDPOINT_HALT:
1634 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1635 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1636 ? EP_DIR_IN : EP_DIR_OUT;
1637 if (setup->wValue != 0 || setup->wLength != 0
1638 || ep_num > udc->max_eps)
1640 ep = &udc->eps[ep_num * 2 + direction];
1643 spin_unlock(&udc->lock);
1644 ep_set_stall(udc, ep_num, direction, 0);
1645 spin_lock(&udc->lock);
1653 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1659 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1664 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1665 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1666 switch (setup->wValue) {
1667 case USB_DEVICE_REMOTE_WAKEUP:
1668 udc->remote_wakeup = 1;
1670 case USB_DEVICE_TEST_MODE:
1671 if (setup->wIndex & 0xFF
1672 || udc->gadget.speed != USB_SPEED_HIGH)
1675 if (udc->usb_state != USB_STATE_CONFIGURED
1676 && udc->usb_state != USB_STATE_ADDRESS
1677 && udc->usb_state != USB_STATE_DEFAULT)
1680 mv_udc_testmode(udc, (setup->wIndex >> 8));
1685 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1686 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1687 switch (setup->wValue) {
1688 case USB_ENDPOINT_HALT:
1689 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1690 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1691 ? EP_DIR_IN : EP_DIR_OUT;
1692 if (setup->wValue != 0 || setup->wLength != 0
1693 || ep_num > udc->max_eps)
1695 spin_unlock(&udc->lock);
1696 ep_set_stall(udc, ep_num, direction, 1);
1697 spin_lock(&udc->lock);
1705 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1711 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1712 struct usb_ctrlrequest *setup)
1714 bool delegate = false;
1716 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1718 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1719 setup->bRequestType, setup->bRequest,
1720 setup->wValue, setup->wIndex, setup->wLength);
1721 /* We process some stardard setup requests here */
1722 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1723 switch (setup->bRequest) {
1724 case USB_REQ_GET_STATUS:
1725 ch9getstatus(udc, ep_num, setup);
1728 case USB_REQ_SET_ADDRESS:
1729 ch9setaddress(udc, setup);
1732 case USB_REQ_CLEAR_FEATURE:
1733 ch9clearfeature(udc, setup);
1736 case USB_REQ_SET_FEATURE:
1737 ch9setfeature(udc, setup);
1746 /* delegate USB standard requests to the gadget driver */
1747 if (delegate == true) {
1748 /* USB requests handled by gadget */
1749 if (setup->wLength) {
1750 /* DATA phase from gadget, STATUS phase from udc */
1751 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1752 ? EP_DIR_IN : EP_DIR_OUT;
1753 spin_unlock(&udc->lock);
1754 if (udc->driver->setup(&udc->gadget,
1755 &udc->local_setup_buff) < 0)
1757 spin_lock(&udc->lock);
1758 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1759 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1761 /* no DATA phase, IN STATUS phase from gadget */
1762 udc->ep0_dir = EP_DIR_IN;
1763 spin_unlock(&udc->lock);
1764 if (udc->driver->setup(&udc->gadget,
1765 &udc->local_setup_buff) < 0)
1767 spin_lock(&udc->lock);
1768 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1773 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
1774 static void ep0_req_complete(struct mv_udc *udc,
1775 struct mv_ep *ep0, struct mv_req *req)
1779 if (udc->usb_state == USB_STATE_ADDRESS) {
1780 /* set the new address */
1781 new_addr = (u32)udc->dev_addr;
1782 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1783 &udc->op_regs->deviceaddr);
1788 switch (udc->ep0_state) {
1789 case DATA_STATE_XMIT:
1790 /* receive status phase */
1791 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1794 case DATA_STATE_RECV:
1795 /* send status phase */
1796 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1799 case WAIT_FOR_OUT_STATUS:
1800 udc->ep0_state = WAIT_FOR_SETUP;
1802 case WAIT_FOR_SETUP:
1803 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1811 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1816 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1818 /* Clear bit in ENDPTSETUPSTAT */
1819 writel((1 << ep_num), &udc->op_regs->epsetupstat);
1821 /* while a hazard exists when setup package arrives */
1823 /* Set Setup Tripwire */
1824 temp = readl(&udc->op_regs->usbcmd);
1825 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1827 /* Copy the setup packet to local buffer */
1828 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1829 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1831 /* Clear Setup Tripwire */
1832 temp = readl(&udc->op_regs->usbcmd);
1833 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1836 static void irq_process_tr_complete(struct mv_udc *udc)
1839 int i, ep_num = 0, direction = 0;
1840 struct mv_ep *curr_ep;
1841 struct mv_req *curr_req, *temp_req;
1845 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1846 * because the setup packets are to be read ASAP
1849 /* Process all Setup packet received interrupts */
1850 tmp = readl(&udc->op_regs->epsetupstat);
1853 for (i = 0; i < udc->max_eps; i++) {
1854 if (tmp & (1 << i)) {
1855 get_setup_data(udc, i,
1856 (u8 *)(&udc->local_setup_buff));
1857 handle_setup_packet(udc, i,
1858 &udc->local_setup_buff);
1863 /* Don't clear the endpoint setup status register here.
1864 * It is cleared as a setup packet is read out of the buffer
1867 /* Process non-setup transaction complete interrupts */
1868 tmp = readl(&udc->op_regs->epcomplete);
1873 writel(tmp, &udc->op_regs->epcomplete);
1875 for (i = 0; i < udc->max_eps * 2; i++) {
1879 bit_pos = 1 << (ep_num + 16 * direction);
1881 if (!(bit_pos & tmp))
1885 curr_ep = &udc->eps[0];
1887 curr_ep = &udc->eps[i];
1888 /* process the req queue until an uncomplete request */
1889 list_for_each_entry_safe(curr_req, temp_req,
1890 &curr_ep->queue, queue) {
1891 status = process_ep_req(udc, i, curr_req);
1895 /* write back status to req */
1896 curr_req->req.status = status;
1898 /* ep0 request completion */
1900 ep0_req_complete(udc, curr_ep, curr_req);
1903 done(curr_ep, curr_req, status);
1909 void irq_process_reset(struct mv_udc *udc)
1914 udc->ep0_dir = EP_DIR_OUT;
1915 udc->ep0_state = WAIT_FOR_SETUP;
1916 udc->remote_wakeup = 0; /* default to 0 on reset */
1918 /* The address bits are past bit 25-31. Set the address */
1919 tmp = readl(&udc->op_regs->deviceaddr);
1920 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1921 writel(tmp, &udc->op_regs->deviceaddr);
1923 /* Clear all the setup token semaphores */
1924 tmp = readl(&udc->op_regs->epsetupstat);
1925 writel(tmp, &udc->op_regs->epsetupstat);
1927 /* Clear all the endpoint complete status bits */
1928 tmp = readl(&udc->op_regs->epcomplete);
1929 writel(tmp, &udc->op_regs->epcomplete);
1931 /* wait until all endptprime bits cleared */
1932 loops = LOOPS(PRIME_TIMEOUT);
1933 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1935 dev_err(&udc->dev->dev,
1936 "Timeout for ENDPTPRIME = 0x%x\n",
1937 readl(&udc->op_regs->epprime));
1944 /* Write 1s to the Flush register */
1945 writel((u32)~0, &udc->op_regs->epflush);
1947 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1948 dev_info(&udc->dev->dev, "usb bus reset\n");
1949 udc->usb_state = USB_STATE_DEFAULT;
1950 /* reset all the queues, stop all USB activities */
1951 stop_activity(udc, udc->driver);
1953 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1954 readl(&udc->op_regs->portsc));
1962 /* reset all the queues, stop all USB activities */
1963 stop_activity(udc, udc->driver);
1965 /* reset ep0 dQH and endptctrl */
1968 /* enable interrupt and set controller to run state */
1971 udc->usb_state = USB_STATE_ATTACHED;
1975 static void handle_bus_resume(struct mv_udc *udc)
1977 udc->usb_state = udc->resume_state;
1978 udc->resume_state = 0;
1980 /* report resume to the driver */
1982 if (udc->driver->resume) {
1983 spin_unlock(&udc->lock);
1984 udc->driver->resume(&udc->gadget);
1985 spin_lock(&udc->lock);
1990 static void irq_process_suspend(struct mv_udc *udc)
1992 udc->resume_state = udc->usb_state;
1993 udc->usb_state = USB_STATE_SUSPENDED;
1995 if (udc->driver->suspend) {
1996 spin_unlock(&udc->lock);
1997 udc->driver->suspend(&udc->gadget);
1998 spin_lock(&udc->lock);
2002 static void irq_process_port_change(struct mv_udc *udc)
2006 portsc = readl(&udc->op_regs->portsc[0]);
2007 if (!(portsc & PORTSCX_PORT_RESET)) {
2009 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
2011 case PORTSCX_PORT_SPEED_HIGH:
2012 udc->gadget.speed = USB_SPEED_HIGH;
2014 case PORTSCX_PORT_SPEED_FULL:
2015 udc->gadget.speed = USB_SPEED_FULL;
2017 case PORTSCX_PORT_SPEED_LOW:
2018 udc->gadget.speed = USB_SPEED_LOW;
2021 udc->gadget.speed = USB_SPEED_UNKNOWN;
2026 if (portsc & PORTSCX_PORT_SUSPEND) {
2027 udc->resume_state = udc->usb_state;
2028 udc->usb_state = USB_STATE_SUSPENDED;
2029 if (udc->driver->suspend) {
2030 spin_unlock(&udc->lock);
2031 udc->driver->suspend(&udc->gadget);
2032 spin_lock(&udc->lock);
2036 if (!(portsc & PORTSCX_PORT_SUSPEND)
2037 && udc->usb_state == USB_STATE_SUSPENDED) {
2038 handle_bus_resume(udc);
2041 if (!udc->resume_state)
2042 udc->usb_state = USB_STATE_DEFAULT;
2045 static void irq_process_error(struct mv_udc *udc)
2047 /* Increment the error count */
2051 static irqreturn_t mv_udc_irq(int irq, void *dev)
2053 struct mv_udc *udc = (struct mv_udc *)dev;
2056 /* Disable ISR when stopped bit is set */
2060 spin_lock(&udc->lock);
2062 status = readl(&udc->op_regs->usbsts);
2063 intr = readl(&udc->op_regs->usbintr);
2067 spin_unlock(&udc->lock);
2071 /* Clear all the interrupts occurred */
2072 writel(status, &udc->op_regs->usbsts);
2074 if (status & USBSTS_ERR)
2075 irq_process_error(udc);
2077 if (status & USBSTS_RESET)
2078 irq_process_reset(udc);
2080 if (status & USBSTS_PORT_CHANGE)
2081 irq_process_port_change(udc);
2083 if (status & USBSTS_INT)
2084 irq_process_tr_complete(udc);
2086 if (status & USBSTS_SUSPEND)
2087 irq_process_suspend(udc);
2089 spin_unlock(&udc->lock);
2094 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2096 struct mv_udc *udc = (struct mv_udc *)dev;
2098 /* polling VBUS and init phy may cause too much time*/
2100 queue_work(udc->qwork, &udc->vbus_work);
2105 static void mv_udc_vbus_work(struct work_struct *work)
2110 udc = container_of(work, struct mv_udc, vbus_work);
2111 if (!udc->pdata->vbus)
2114 vbus = udc->pdata->vbus->poll();
2115 dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2117 if (vbus == VBUS_HIGH)
2118 mv_udc_vbus_session(&udc->gadget, 1);
2119 else if (vbus == VBUS_LOW)
2120 mv_udc_vbus_session(&udc->gadget, 0);
2123 /* release device structure */
2124 static void gadget_release(struct device *_dev)
2126 struct mv_udc *udc = the_controller;
2128 complete(udc->done);
2131 static int mv_udc_remove(struct platform_device *dev)
2133 struct mv_udc *udc = the_controller;
2136 usb_del_gadget_udc(&udc->gadget);
2139 flush_workqueue(udc->qwork);
2140 destroy_workqueue(udc->qwork);
2144 * If we have transceiver inited,
2145 * then vbus irq will not be requested in udc driver.
2147 if (udc->pdata && udc->pdata->vbus
2148 && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
2149 free_irq(udc->pdata->vbus->irq, &dev->dev);
2151 /* free memory allocated in probe */
2153 dma_pool_destroy(udc->dtd_pool);
2156 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2157 udc->ep_dqh, udc->ep_dqh_dma);
2162 free_irq(udc->irq, &dev->dev);
2164 mv_udc_disable(udc);
2167 iounmap(udc->cap_regs);
2170 iounmap(udc->phy_regs);
2172 if (udc->status_req) {
2173 kfree(udc->status_req->req.buf);
2174 kfree(udc->status_req);
2177 for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
2178 clk_put(udc->clk[clk_i]);
2180 device_unregister(&udc->gadget.dev);
2182 /* free dev, wait for the release() finished */
2183 wait_for_completion(udc->done);
2186 the_controller = NULL;
2191 static int mv_udc_probe(struct platform_device *dev)
2193 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
2200 if (pdata == NULL) {
2201 dev_err(&dev->dev, "missing platform_data\n");
2205 size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
2206 udc = kzalloc(size, GFP_KERNEL);
2208 dev_err(&dev->dev, "failed to allocate memory for udc\n");
2212 the_controller = udc;
2213 udc->done = &release_done;
2214 udc->pdata = dev->dev.platform_data;
2215 spin_lock_init(&udc->lock);
2219 #ifdef CONFIG_USB_OTG_UTILS
2220 if (pdata->mode == MV_USB_MODE_OTG)
2221 udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
2224 udc->clknum = pdata->clknum;
2225 for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2226 udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2227 if (IS_ERR(udc->clk[clk_i])) {
2228 retval = PTR_ERR(udc->clk[clk_i]);
2233 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2235 dev_err(&dev->dev, "no I/O memory resource defined\n");
2240 udc->cap_regs = (struct mv_cap_regs __iomem *)
2241 ioremap(r->start, resource_size(r));
2242 if (udc->cap_regs == NULL) {
2243 dev_err(&dev->dev, "failed to map I/O memory\n");
2248 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2250 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2252 goto err_iounmap_capreg;
2255 udc->phy_regs = ioremap(r->start, resource_size(r));
2256 if (udc->phy_regs == NULL) {
2257 dev_err(&dev->dev, "failed to map phy I/O memory\n");
2259 goto err_iounmap_capreg;
2262 /* we will acces controller register, so enable the clk */
2263 retval = mv_udc_enable_internal(udc);
2265 goto err_iounmap_phyreg;
2268 (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2269 + (readl(&udc->cap_regs->caplength_hciversion)
2271 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2274 * some platform will use usb to download image, it may not disconnect
2275 * usb gadget before loading kernel. So first stop udc here.
2278 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2280 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2281 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2282 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2283 &udc->ep_dqh_dma, GFP_KERNEL);
2285 if (udc->ep_dqh == NULL) {
2286 dev_err(&dev->dev, "allocate dQH memory failed\n");
2288 goto err_disable_clock;
2290 udc->ep_dqh_size = size;
2292 /* create dTD dma_pool resource */
2293 udc->dtd_pool = dma_pool_create("mv_dtd",
2295 sizeof(struct mv_dtd),
2299 if (!udc->dtd_pool) {
2304 size = udc->max_eps * sizeof(struct mv_ep) *2;
2305 udc->eps = kzalloc(size, GFP_KERNEL);
2306 if (udc->eps == NULL) {
2307 dev_err(&dev->dev, "allocate ep memory failed\n");
2309 goto err_destroy_dma;
2312 /* initialize ep0 status request structure */
2313 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2314 if (!udc->status_req) {
2315 dev_err(&dev->dev, "allocate status_req memory failed\n");
2319 INIT_LIST_HEAD(&udc->status_req->queue);
2321 /* allocate a small amount of memory to get valid address */
2322 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2323 udc->status_req->req.dma = DMA_ADDR_INVALID;
2325 udc->resume_state = USB_STATE_NOTATTACHED;
2326 udc->usb_state = USB_STATE_POWERED;
2327 udc->ep0_dir = EP_DIR_OUT;
2328 udc->remote_wakeup = 0;
2330 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2332 dev_err(&dev->dev, "no IRQ resource defined\n");
2334 goto err_free_status_req;
2336 udc->irq = r->start;
2337 if (request_irq(udc->irq, mv_udc_irq,
2338 IRQF_SHARED, driver_name, udc)) {
2339 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2342 goto err_free_status_req;
2345 /* initialize gadget structure */
2346 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2347 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2348 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2349 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2350 udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
2352 /* the "gadget" abstracts/virtualizes the controller */
2353 dev_set_name(&udc->gadget.dev, "gadget");
2354 udc->gadget.dev.parent = &dev->dev;
2355 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2356 udc->gadget.dev.release = gadget_release;
2357 udc->gadget.name = driver_name; /* gadget name */
2359 retval = device_register(&udc->gadget.dev);
2365 /* VBUS detect: we can disable/enable clock on demand.*/
2366 if (!IS_ERR_OR_NULL(udc->transceiver))
2367 udc->clock_gating = 1;
2368 else if (pdata->vbus) {
2369 udc->clock_gating = 1;
2370 retval = request_threaded_irq(pdata->vbus->irq, NULL,
2371 mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2374 "Can not request irq for VBUS, "
2375 "disable clock gating\n");
2376 udc->clock_gating = 0;
2379 udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2381 dev_err(&dev->dev, "cannot create workqueue\n");
2383 goto err_unregister;
2386 INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2390 * When clock gating is supported, we can disable clk and phy.
2391 * If not, it means that VBUS detection is not supported, we
2392 * have to enable vbus active all the time to let controller work.
2394 if (udc->clock_gating)
2395 mv_udc_disable_internal(udc);
2397 udc->vbus_active = 1;
2399 retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2401 goto err_unregister;
2403 dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
2404 udc->clock_gating ? "with" : "without");
2409 if (udc->pdata && udc->pdata->vbus
2410 && udc->clock_gating && IS_ERR_OR_NULL(udc->transceiver))
2411 free_irq(pdata->vbus->irq, &dev->dev);
2412 device_unregister(&udc->gadget.dev);
2414 free_irq(udc->irq, &dev->dev);
2415 err_free_status_req:
2416 kfree(udc->status_req->req.buf);
2417 kfree(udc->status_req);
2421 dma_pool_destroy(udc->dtd_pool);
2423 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2424 udc->ep_dqh, udc->ep_dqh_dma);
2426 mv_udc_disable_internal(udc);
2428 iounmap(udc->phy_regs);
2430 iounmap(udc->cap_regs);
2432 for (clk_i--; clk_i >= 0; clk_i--)
2433 clk_put(udc->clk[clk_i]);
2434 the_controller = NULL;
2440 static int mv_udc_suspend(struct device *_dev)
2442 struct mv_udc *udc = the_controller;
2444 /* if OTG is enabled, the following will be done in OTG driver*/
2445 if (!IS_ERR_OR_NULL(udc->transceiver))
2448 if (udc->pdata->vbus && udc->pdata->vbus->poll)
2449 if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2450 dev_info(&udc->dev->dev, "USB cable is connected!\n");
2455 * only cable is unplugged, udc can suspend.
2456 * So do not care about clock_gating == 1.
2458 if (!udc->clock_gating) {
2461 spin_lock_irq(&udc->lock);
2462 /* stop all usb activities */
2463 stop_activity(udc, udc->driver);
2464 spin_unlock_irq(&udc->lock);
2466 mv_udc_disable_internal(udc);
2472 static int mv_udc_resume(struct device *_dev)
2474 struct mv_udc *udc = the_controller;
2477 /* if OTG is enabled, the following will be done in OTG driver*/
2478 if (!IS_ERR_OR_NULL(udc->transceiver))
2481 if (!udc->clock_gating) {
2482 retval = mv_udc_enable_internal(udc);
2486 if (udc->driver && udc->softconnect) {
2496 static const struct dev_pm_ops mv_udc_pm_ops = {
2497 .suspend = mv_udc_suspend,
2498 .resume = mv_udc_resume,
2502 static void mv_udc_shutdown(struct platform_device *dev)
2504 struct mv_udc *udc = the_controller;
2507 /* reset controller mode to IDLE */
2509 mode = readl(&udc->op_regs->usbmode);
2511 writel(mode, &udc->op_regs->usbmode);
2512 mv_udc_disable(udc);
2515 static struct platform_driver udc_driver = {
2516 .probe = mv_udc_probe,
2517 .remove = __exit_p(mv_udc_remove),
2518 .shutdown = mv_udc_shutdown,
2520 .owner = THIS_MODULE,
2523 .pm = &mv_udc_pm_ops,
2528 module_platform_driver(udc_driver);
2529 MODULE_ALIAS("platform:mv-udc");
2530 MODULE_DESCRIPTION(DRIVER_DESC);
2531 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2532 MODULE_VERSION(DRIVER_VERSION);
2533 MODULE_LICENSE("GPL");