Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[pandora-kernel.git] / arch / mips / au1000 / common / usbdev.c
1 /*
2  * BRIEF MODULE DESCRIPTION
3  *      Au1000 USB Device-Side (device layer)
4  *
5  * Copyright 2001-2002 MontaVista Software Inc.
6  * Author: MontaVista Software, Inc.
7  *              stevel@mvista.com or source@mvista.com
8  *
9  *  This program is free software; you can redistribute  it and/or modify it
10  *  under  the terms of  the GNU General  Public License as published by the
11  *  Free Software Foundation;  either version 2 of the  License, or (at your
12  *  option) any later version.
13  *
14  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
15  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
16  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17  *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
18  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
20  *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
22  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  *  You should have received a copy of the  GNU General Public License along
26  *  with this program; if not, write  to the Free Software Foundation, Inc.,
27  *  675 Mass Ave, Cambridge, MA 02139, USA.
28  */
29 #include <linux/kernel.h>
30 #include <linux/ioport.h>
31 #include <linux/sched.h>
32 #include <linux/signal.h>
33 #include <linux/errno.h>
34 #include <linux/poll.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/fcntl.h>
38 #include <linux/module.h>
39 #include <linux/spinlock.h>
40 #include <linux/list.h>
41 #include <linux/smp_lock.h>
42 #define DEBUG
43 #include <linux/usb.h>
44
45 #include <asm/io.h>
46 #include <asm/uaccess.h>
47 #include <asm/irq.h>
48 #include <asm/mipsregs.h>
49 #include <asm/au1000.h>
50 #include <asm/au1000_dma.h>
51 #include <asm/au1000_usbdev.h>
52
53 #ifdef DEBUG
54 #undef VDEBUG
55 #ifdef VDEBUG
56 #define vdbg(fmt, arg...) printk(KERN_DEBUG __FILE__ ": " fmt "\n" , ## arg)
57 #else
58 #define vdbg(fmt, arg...) do {} while (0)
59 #endif
60 #else
61 #define vdbg(fmt, arg...) do {} while (0)
62 #endif
63
64 #define ALLOC_FLAGS (in_interrupt () ? GFP_ATOMIC : GFP_KERNEL)
65
66 #define EP_FIFO_DEPTH 8
67
68 typedef enum {
69         SETUP_STAGE = 0,
70         DATA_STAGE,
71         STATUS_STAGE
72 } ep0_stage_t;
73
74 typedef struct {
75         int read_fifo;
76         int write_fifo;
77         int ctrl_stat;
78         int read_fifo_status;
79         int write_fifo_status;
80 } endpoint_reg_t;
81
82 typedef struct {
83         usbdev_pkt_t *head;
84         usbdev_pkt_t *tail;
85         int count;
86 } pkt_list_t;
87
88 typedef struct {
89         int active;
90         struct usb_endpoint_descriptor *desc;
91         endpoint_reg_t *reg;
92         /* Only one of these are used, unless this is the control ep */
93         pkt_list_t inlist;
94         pkt_list_t outlist;
95         unsigned int indma, outdma; /* DMA channel numbers for IN, OUT */
96         /* following are extracted from endpoint descriptor for easy access */
97         int max_pkt_size;
98         int type;
99         int direction;
100         /* WE assign endpoint addresses! */
101         int address;
102         spinlock_t lock;
103 } endpoint_t;
104
105
106 static struct usb_dev {
107         endpoint_t ep[6];
108         ep0_stage_t ep0_stage;
109
110         struct usb_device_descriptor *   dev_desc;
111         struct usb_interface_descriptor* if_desc;
112         struct usb_config_descriptor *   conf_desc;
113         u8 *                             full_conf_desc;
114         struct usb_string_descriptor *   str_desc[6];
115
116         /* callback to function layer */
117         void (*func_cb)(usbdev_cb_type_t type, unsigned long arg,
118                         void *cb_data);
119         void* cb_data;
120
121         usbdev_state_t state;   // device state
122         int suspended;          // suspended flag
123         int address;            // device address
124         int interface;
125         int num_ep;
126         u8 alternate_setting;
127         u8 configuration;       // configuration value
128         int remote_wakeup_en;
129 } usbdev;
130
131
132 static endpoint_reg_t ep_reg[] = {
133         // FIFO's 0 and 1 are EP0 default control
134         {USBD_EP0RD, USBD_EP0WR, USBD_EP0CS, USBD_EP0RDSTAT, USBD_EP0WRSTAT },
135         {0},
136         // FIFO 2 is EP2, IN
137         { -1, USBD_EP2WR, USBD_EP2CS, -1, USBD_EP2WRSTAT },
138         // FIFO 3 is EP3, IN
139         {    -1,     USBD_EP3WR, USBD_EP3CS,     -1,         USBD_EP3WRSTAT },
140         // FIFO 4 is EP4, OUT
141         {USBD_EP4RD,     -1,     USBD_EP4CS, USBD_EP4RDSTAT,     -1         },
142         // FIFO 5 is EP5, OUT
143         {USBD_EP5RD,     -1,     USBD_EP5CS, USBD_EP5RDSTAT,     -1         }
144 };
145
146 static struct {
147         unsigned int id;
148         const char *str;
149 } ep_dma_id[] = {
150         { DMA_ID_USBDEV_EP0_TX, "USBDev EP0 IN" },
151         { DMA_ID_USBDEV_EP0_RX, "USBDev EP0 OUT" },
152         { DMA_ID_USBDEV_EP2_TX, "USBDev EP2 IN" },
153         { DMA_ID_USBDEV_EP3_TX, "USBDev EP3 IN" },
154         { DMA_ID_USBDEV_EP4_RX, "USBDev EP4 OUT" },
155         { DMA_ID_USBDEV_EP5_RX, "USBDev EP5 OUT" }
156 };
157
158 #define DIR_OUT 0
159 #define DIR_IN  (1<<3)
160
161 #define CONTROL_EP USB_ENDPOINT_XFER_CONTROL
162 #define BULK_EP    USB_ENDPOINT_XFER_BULK
163
164 static inline endpoint_t *
165 epaddr_to_ep(struct usb_dev* dev, int ep_addr)
166 {
167         if (ep_addr >= 0 && ep_addr < 2)
168                 return &dev->ep[0];
169         if (ep_addr < 6)
170                 return &dev->ep[ep_addr];
171         return NULL;
172 }
173
174 static const char* std_req_name[] = {
175         "GET_STATUS",
176         "CLEAR_FEATURE",
177         "RESERVED",
178         "SET_FEATURE",
179         "RESERVED",
180         "SET_ADDRESS",
181         "GET_DESCRIPTOR",
182         "SET_DESCRIPTOR",
183         "GET_CONFIGURATION",
184         "SET_CONFIGURATION",
185         "GET_INTERFACE",
186         "SET_INTERFACE",
187         "SYNCH_FRAME"
188 };
189
190 static inline const char*
191 get_std_req_name(int req)
192 {
193         return (req >= 0 && req <= 12) ? std_req_name[req] : "UNKNOWN";
194 }
195
196 #if 0
197 static void
198 dump_setup(struct usb_ctrlrequest* s)
199 {
200         dbg("%s: requesttype=%d", __FUNCTION__, s->requesttype);
201         dbg("%s: request=%d %s", __FUNCTION__, s->request,
202             get_std_req_name(s->request));
203         dbg("%s: value=0x%04x", __FUNCTION__, s->wValue);
204         dbg("%s: index=%d", __FUNCTION__, s->index);
205         dbg("%s: length=%d", __FUNCTION__, s->length);
206 }
207 #endif
208
209 static inline usbdev_pkt_t *
210 alloc_packet(endpoint_t * ep, int data_size, void* data)
211 {
212         usbdev_pkt_t* pkt = kmalloc(sizeof(usbdev_pkt_t) + data_size,
213                                     ALLOC_FLAGS);
214         if (!pkt)
215                 return NULL;
216         pkt->ep_addr = ep->address;
217         pkt->size = data_size;
218         pkt->status = 0;
219         pkt->next = NULL;
220         if (data)
221                 memcpy(pkt->payload, data, data_size);
222
223         return pkt;
224 }
225
226
227 /*
228  * Link a packet to the tail of the enpoint's packet list.
229  * EP spinlock must be held when calling.
230  */
231 static void
232 link_tail(endpoint_t * ep, pkt_list_t * list, usbdev_pkt_t * pkt)
233 {
234         if (!list->tail) {
235                 list->head = list->tail = pkt;
236                 list->count = 1;
237         } else {
238                 list->tail->next = pkt;
239                 list->tail = pkt;
240                 list->count++;
241         }
242 }
243
244 /*
245  * Unlink and return a packet from the head of the given packet
246  * list. It is the responsibility of the caller to free the packet.
247  * EP spinlock must be held when calling.
248  */
249 static usbdev_pkt_t *
250 unlink_head(pkt_list_t * list)
251 {
252         usbdev_pkt_t *pkt;
253
254         pkt = list->head;
255         if (!pkt || !list->count) {
256                 return NULL;
257         }
258
259         list->head = pkt->next;
260         if (!list->head) {
261                 list->head = list->tail = NULL;
262                 list->count = 0;
263         } else
264                 list->count--;
265
266         return pkt;
267 }
268
269 /*
270  * Create and attach a new packet to the tail of the enpoint's
271  * packet list. EP spinlock must be held when calling.
272  */
273 static usbdev_pkt_t *
274 add_packet(endpoint_t * ep, pkt_list_t * list, int size)
275 {
276         usbdev_pkt_t *pkt = alloc_packet(ep, size, NULL);
277         if (!pkt)
278                 return NULL;
279
280         link_tail(ep, list, pkt);
281         return pkt;
282 }
283
284
285 /*
286  * Unlink and free a packet from the head of the enpoint's
287  * packet list. EP spinlock must be held when calling.
288  */
289 static inline void
290 free_packet(pkt_list_t * list)
291 {
292         kfree(unlink_head(list));
293 }
294
295 /* EP spinlock must be held when calling. */
296 static inline void
297 flush_pkt_list(pkt_list_t * list)
298 {
299         while (list->count)
300                 free_packet(list);
301 }
302
303 /* EP spinlock must be held when calling */
304 static inline void
305 flush_write_fifo(endpoint_t * ep)
306 {
307         if (ep->reg->write_fifo_status >= 0) {
308                 au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
309                           USBDEV_FSTAT_OF,
310                           ep->reg->write_fifo_status);
311                 //udelay(100);
312                 //au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
313                 //        ep->reg->write_fifo_status);
314         }
315 }
316
317 /* EP spinlock must be held when calling */
318 static inline void
319 flush_read_fifo(endpoint_t * ep)
320 {
321         if (ep->reg->read_fifo_status >= 0) {
322                 au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
323                           USBDEV_FSTAT_OF,
324                           ep->reg->read_fifo_status);
325                 //udelay(100);
326                 //au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
327                 //        ep->reg->read_fifo_status);
328         }
329 }
330
331
332 /* EP spinlock must be held when calling. */
333 static void
334 endpoint_flush(endpoint_t * ep)
335 {
336         // First, flush all packets
337         flush_pkt_list(&ep->inlist);
338         flush_pkt_list(&ep->outlist);
339
340         // Now flush the endpoint's h/w FIFO(s)
341         flush_write_fifo(ep);
342         flush_read_fifo(ep);
343 }
344
345 /* EP spinlock must be held when calling. */
346 static void
347 endpoint_stall(endpoint_t * ep)
348 {
349         u32 cs;
350
351         warn("%s", __FUNCTION__);
352
353         cs = au_readl(ep->reg->ctrl_stat) | USBDEV_CS_STALL;
354         au_writel(cs, ep->reg->ctrl_stat);
355 }
356
357 /* EP spinlock must be held when calling. */
358 static void
359 endpoint_unstall(endpoint_t * ep)
360 {
361         u32 cs;
362
363         warn("%s", __FUNCTION__);
364
365         cs = au_readl(ep->reg->ctrl_stat) & ~USBDEV_CS_STALL;
366         au_writel(cs, ep->reg->ctrl_stat);
367 }
368
369 static void
370 endpoint_reset_datatoggle(endpoint_t * ep)
371 {
372         // FIXME: is this possible?
373 }
374
375
376 /* EP spinlock must be held when calling. */
377 static int
378 endpoint_fifo_read(endpoint_t * ep)
379 {
380         int read_count = 0;
381         u8 *bufptr;
382         usbdev_pkt_t *pkt = ep->outlist.tail;
383
384         if (!pkt)
385                 return -EINVAL;
386
387         bufptr = &pkt->payload[pkt->size];
388         while (au_readl(ep->reg->read_fifo_status) & USBDEV_FSTAT_FCNT_MASK) {
389                 *bufptr++ = au_readl(ep->reg->read_fifo) & 0xff;
390                 read_count++;
391                 pkt->size++;
392         }
393
394         return read_count;
395 }
396
397 #if 0
398 /* EP spinlock must be held when calling. */
399 static int
400 endpoint_fifo_write(endpoint_t * ep, int index)
401 {
402         int write_count = 0;
403         u8 *bufptr;
404         usbdev_pkt_t *pkt = ep->inlist.head;
405
406         if (!pkt)
407                 return -EINVAL;
408
409         bufptr = &pkt->payload[index];
410         while ((au_readl(ep->reg->write_fifo_status) &
411                 USBDEV_FSTAT_FCNT_MASK) < EP_FIFO_DEPTH) {
412                 if (bufptr < pkt->payload + pkt->size) {
413                         au_writel(*bufptr++, ep->reg->write_fifo);
414                         write_count++;
415                 } else {
416                         break;
417                 }
418         }
419
420         return write_count;
421 }
422 #endif
423
424 /*
425  * This routine is called to restart transmission of a packet.
426  * The endpoint's TSIZE must be set to the new packet's size,
427  * and DMA to the write FIFO needs to be restarted.
428  * EP spinlock must be held when calling.
429  */
430 static void
431 kickstart_send_packet(endpoint_t * ep)
432 {
433         u32 cs;
434         usbdev_pkt_t *pkt = ep->inlist.head;
435
436         vdbg("%s: ep%d, pkt=%p", __FUNCTION__, ep->address, pkt);
437
438         if (!pkt) {
439                 err("%s: head=NULL! list->count=%d", __FUNCTION__,
440                     ep->inlist.count);
441                 return;
442         }
443
444         dma_cache_wback_inv((unsigned long)pkt->payload, pkt->size);
445
446         /*
447          * make sure FIFO is empty
448          */
449         flush_write_fifo(ep);
450
451         cs = au_readl(ep->reg->ctrl_stat) & USBDEV_CS_STALL;
452         cs |= (pkt->size << USBDEV_CS_TSIZE_BIT);
453         au_writel(cs, ep->reg->ctrl_stat);
454
455         if (get_dma_active_buffer(ep->indma) == 1) {
456                 set_dma_count1(ep->indma, pkt->size);
457                 set_dma_addr1(ep->indma, virt_to_phys(pkt->payload));
458                 enable_dma_buffer1(ep->indma);  // reenable
459         } else {
460                 set_dma_count0(ep->indma, pkt->size);
461                 set_dma_addr0(ep->indma, virt_to_phys(pkt->payload));
462                 enable_dma_buffer0(ep->indma);  // reenable
463         }
464         if (dma_halted(ep->indma))
465                 start_dma(ep->indma);
466 }
467
468
469 /*
470  * This routine is called when a packet in the inlist has been
471  * completed. Frees the completed packet and starts sending the
472  * next. EP spinlock must be held when calling.
473  */
474 static usbdev_pkt_t *
475 send_packet_complete(endpoint_t * ep)
476 {
477         usbdev_pkt_t *pkt = unlink_head(&ep->inlist);
478
479         if (pkt) {
480                 pkt->status =
481                         (au_readl(ep->reg->ctrl_stat) & USBDEV_CS_NAK) ?
482                         PKT_STATUS_NAK : PKT_STATUS_ACK;
483
484                 vdbg("%s: ep%d, %s pkt=%p, list count=%d", __FUNCTION__,
485                      ep->address, (pkt->status & PKT_STATUS_NAK) ?
486                      "NAK" : "ACK", pkt, ep->inlist.count);
487         }
488
489         /*
490          * The write fifo should already be drained if things are
491          * working right, but flush it anyway just in case.
492          */
493         flush_write_fifo(ep);
494
495         // begin transmitting next packet in the inlist
496         if (ep->inlist.count) {
497                 kickstart_send_packet(ep);
498         }
499
500         return pkt;
501 }
502
503 /*
504  * Add a new packet to the tail of the given ep's packet
505  * inlist. The transmit complete interrupt frees packets from
506  * the head of this list. EP spinlock must be held when calling.
507  */
508 static int
509 send_packet(struct usb_dev* dev, usbdev_pkt_t *pkt, int async)
510 {
511         pkt_list_t *list;
512         endpoint_t* ep;
513
514         if (!pkt || !(ep = epaddr_to_ep(dev, pkt->ep_addr)))
515                 return -EINVAL;
516
517         if (!pkt->size)
518                 return 0;
519
520         list = &ep->inlist;
521
522         if (!async && list->count) {
523                 halt_dma(ep->indma);
524                 flush_pkt_list(list);
525         }
526
527         link_tail(ep, list, pkt);
528
529         vdbg("%s: ep%d, pkt=%p, size=%d, list count=%d", __FUNCTION__,
530              ep->address, pkt, pkt->size, list->count);
531
532         if (list->count == 1) {
533                 /*
534                  * if the packet count is one, it means the list was empty,
535                  * and no more data will go out this ep until we kick-start
536                  * it again.
537                  */
538                 kickstart_send_packet(ep);
539         }
540
541         return pkt->size;
542 }
543
544 /*
545  * This routine is called to restart reception of a packet.
546  * EP spinlock must be held when calling.
547  */
548 static void
549 kickstart_receive_packet(endpoint_t * ep)
550 {
551         usbdev_pkt_t *pkt;
552
553         // get and link a new packet for next reception
554         if (!(pkt = add_packet(ep, &ep->outlist, ep->max_pkt_size))) {
555                 err("%s: could not alloc new packet", __FUNCTION__);
556                 return;
557         }
558
559         if (get_dma_active_buffer(ep->outdma) == 1) {
560                 clear_dma_done1(ep->outdma);
561                 set_dma_count1(ep->outdma, ep->max_pkt_size);
562                 set_dma_count0(ep->outdma, 0);
563                 set_dma_addr1(ep->outdma, virt_to_phys(pkt->payload));
564                 enable_dma_buffer1(ep->outdma); // reenable
565         } else {
566                 clear_dma_done0(ep->outdma);
567                 set_dma_count0(ep->outdma, ep->max_pkt_size);
568                 set_dma_count1(ep->outdma, 0);
569                 set_dma_addr0(ep->outdma, virt_to_phys(pkt->payload));
570                 enable_dma_buffer0(ep->outdma); // reenable
571         }
572         if (dma_halted(ep->outdma))
573                 start_dma(ep->outdma);
574 }
575
576
577 /*
578  * This routine is called when a packet in the outlist has been
579  * completed (received) and we need to prepare for a new packet
580  * to be received. Halts DMA and computes the packet size from the
581  * remaining DMA counter. Then prepares a new packet for reception
582  * and restarts DMA. FIXME: what if another packet comes in
583  * on top of the completed packet? Counter would be wrong.
584  * EP spinlock must be held when calling.
585  */
586 static usbdev_pkt_t *
587 receive_packet_complete(endpoint_t * ep)
588 {
589         usbdev_pkt_t *pkt = ep->outlist.tail;
590         u32 cs;
591
592         halt_dma(ep->outdma);
593
594         cs = au_readl(ep->reg->ctrl_stat);
595
596         if (!pkt)
597                 return NULL;
598
599         pkt->size = ep->max_pkt_size - get_dma_residue(ep->outdma);
600         if (pkt->size)
601                 dma_cache_inv((unsigned long)pkt->payload, pkt->size);
602         /*
603          * need to pull out any remaining bytes in the FIFO.
604          */
605         endpoint_fifo_read(ep);
606         /*
607          * should be drained now, but flush anyway just in case.
608          */
609         flush_read_fifo(ep);
610
611         pkt->status = (cs & USBDEV_CS_NAK) ? PKT_STATUS_NAK : PKT_STATUS_ACK;
612         if (ep->address == 0 && (cs & USBDEV_CS_SU))
613                 pkt->status |= PKT_STATUS_SU;
614
615         vdbg("%s: ep%d, %s pkt=%p, size=%d", __FUNCTION__,
616              ep->address, (pkt->status & PKT_STATUS_NAK) ?
617              "NAK" : "ACK", pkt, pkt->size);
618
619         kickstart_receive_packet(ep);
620
621         return pkt;
622 }
623
624
625 /*
626  ****************************************************************************
627  * Here starts the standard device request handlers. They are
628  * all called by do_setup() via a table of function pointers.
629  ****************************************************************************
630  */
631
632 static ep0_stage_t
633 do_get_status(struct usb_dev* dev, struct usb_ctrlrequest* setup)
634 {
635         switch (setup->bRequestType) {
636         case 0x80:      // Device
637                 // FIXME: send device status
638                 break;
639         case 0x81:      // Interface
640                 // FIXME: send interface status
641                 break;
642         case 0x82:      // End Point
643                 // FIXME: send endpoint status
644                 break;
645         default:
646                 // Invalid Command
647                 endpoint_stall(&dev->ep[0]); // Stall End Point 0
648                 break;
649         }
650
651         return STATUS_STAGE;
652 }
653
654 static ep0_stage_t
655 do_clear_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
656 {
657         switch (setup->bRequestType) {
658         case 0x00:      // Device
659                 if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
660                         dev->remote_wakeup_en = 0;
661         else
662                         endpoint_stall(&dev->ep[0]);
663                 break;
664         case 0x02:      // End Point
665                 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
666                         endpoint_t *ep =
667                                 epaddr_to_ep(dev,
668                                              le16_to_cpu(setup->wIndex) & 0xff);
669
670                         endpoint_unstall(ep);
671                         endpoint_reset_datatoggle(ep);
672                 } else
673                         endpoint_stall(&dev->ep[0]);
674                 break;
675         }
676
677         return SETUP_STAGE;
678 }
679
680 static ep0_stage_t
681 do_reserved(struct usb_dev* dev, struct usb_ctrlrequest* setup)
682 {
683         // Invalid request, stall End Point 0
684         endpoint_stall(&dev->ep[0]);
685         return SETUP_STAGE;
686 }
687
688 static ep0_stage_t
689 do_set_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
690 {
691         switch (setup->bRequestType) {
692         case 0x00:      // Device
693                 if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
694                         dev->remote_wakeup_en = 1;
695                 else
696                         endpoint_stall(&dev->ep[0]);
697                 break;
698         case 0x02:      // End Point
699                 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
700                         endpoint_t *ep =
701                                 epaddr_to_ep(dev,
702                                              le16_to_cpu(setup->wIndex) & 0xff);
703
704                         endpoint_stall(ep);
705                 } else
706                         endpoint_stall(&dev->ep[0]);
707                 break;
708         }
709
710         return SETUP_STAGE;
711 }
712
713 static ep0_stage_t
714 do_set_address(struct usb_dev* dev, struct usb_ctrlrequest* setup)
715 {
716         int new_state = dev->state;
717         int new_addr = le16_to_cpu(setup->wValue);
718
719         dbg("%s: our address=%d", __FUNCTION__, new_addr);
720
721         if (new_addr > 127) {
722                         // usb spec doesn't tell us what to do, so just go to
723                         // default state
724                 new_state = DEFAULT;
725                 dev->address = 0;
726         } else if (dev->address != new_addr) {
727                 dev->address = new_addr;
728                 new_state = ADDRESS;
729         }
730
731         if (dev->state != new_state) {
732                 dev->state = new_state;
733                 /* inform function layer of usbdev state change */
734                 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
735         }
736
737         return SETUP_STAGE;
738 }
739
740 static ep0_stage_t
741 do_get_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
742 {
743         int strnum, desc_len = le16_to_cpu(setup->wLength);
744
745                 switch (le16_to_cpu(setup->wValue) >> 8) {
746                 case USB_DT_DEVICE:
747                         // send device descriptor!
748                 desc_len = desc_len > dev->dev_desc->bLength ?
749                         dev->dev_desc->bLength : desc_len;
750                         dbg("sending device desc, size=%d", desc_len);
751                 send_packet(dev, alloc_packet(&dev->ep[0], desc_len,
752                                               dev->dev_desc), 0);
753                         break;
754                 case USB_DT_CONFIG:
755                         // If the config descr index in low-byte of
756                         // setup->wValue        is valid, send config descr,
757                         // otherwise stall ep0.
758                         if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
759                                 // send config descriptor!
760                                 if (desc_len <= USB_DT_CONFIG_SIZE) {
761                                         dbg("sending partial config desc, size=%d",
762                                              desc_len);
763                                 send_packet(dev,
764                                             alloc_packet(&dev->ep[0],
765                                                          desc_len,
766                                                          dev->conf_desc),
767                                             0);
768                                 } else {
769                                 int len = le16_to_cpu(dev->conf_desc->wTotalLength);
770                                 dbg("sending whole config desc,"
771                                     " size=%d, our size=%d", desc_len, len);
772                                 desc_len = desc_len > len ? len : desc_len;
773                                 send_packet(dev,
774                                             alloc_packet(&dev->ep[0],
775                                                          desc_len,
776                                                          dev->full_conf_desc),
777                                             0);
778                                 }
779                         } else
780                         endpoint_stall(&dev->ep[0]);
781                         break;
782                 case USB_DT_STRING:
783                         // If the string descr index in low-byte of setup->wValue
784                         // is valid, send string descr, otherwise stall ep0.
785                         strnum = le16_to_cpu(setup->wValue) & 0xff;
786                         if (strnum >= 0 && strnum < 6) {
787                                 struct usb_string_descriptor *desc =
788                                 dev->str_desc[strnum];
789                                 desc_len = desc_len > desc->bLength ?
790                                         desc->bLength : desc_len;
791                                 dbg("sending string desc %d", strnum);
792                         send_packet(dev,
793                                     alloc_packet(&dev->ep[0], desc_len,
794                                                  desc), 0);
795                         } else
796                         endpoint_stall(&dev->ep[0]);
797                         break;
798         default:
799                 // Invalid request
800                 err("invalid get desc=%d, stalled",
801                             le16_to_cpu(setup->wValue) >> 8);
802                 endpoint_stall(&dev->ep[0]);    // Stall endpoint 0
803                         break;
804                 }
805
806         return STATUS_STAGE;
807 }
808
809 static ep0_stage_t
810 do_set_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
811 {
812         // TODO: implement
813         // there will be an OUT data stage (the descriptor to set)
814         return DATA_STAGE;
815 }
816
817 static ep0_stage_t
818 do_get_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
819 {
820         // send dev->configuration
821         dbg("sending config");
822         send_packet(dev, alloc_packet(&dev->ep[0], 1, &dev->configuration),
823                     0);
824         return STATUS_STAGE;
825 }
826
827 static ep0_stage_t
828 do_set_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
829 {
830         // set active config to low-byte of setup->wValue
831         dev->configuration = le16_to_cpu(setup->wValue) & 0xff;
832         dbg("set config, config=%d", dev->configuration);
833         if (!dev->configuration && dev->state > DEFAULT) {
834                 dev->state = ADDRESS;
835                 /* inform function layer of usbdev state change */
836                 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
837         } else if (dev->configuration == 1) {
838                 dev->state = CONFIGURED;
839                 /* inform function layer of usbdev state change */
840                 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
841         } else {
842                 // FIXME: "respond with request error" - how?
843         }
844
845         return SETUP_STAGE;
846 }
847
848 static ep0_stage_t
849 do_get_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
850 {
851                 // interface must be zero.
852         if ((le16_to_cpu(setup->wIndex) & 0xff) || dev->state == ADDRESS) {
853                         // FIXME: respond with "request error". how?
854         } else if (dev->state == CONFIGURED) {
855                 // send dev->alternate_setting
856                         dbg("sending alt setting");
857                 send_packet(dev, alloc_packet(&dev->ep[0], 1,
858                                               &dev->alternate_setting), 0);
859                 }
860
861         return STATUS_STAGE;
862
863 }
864
865 static ep0_stage_t
866 do_set_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
867 {
868         if (dev->state == ADDRESS) {
869                         // FIXME: respond with "request error". how?
870         } else if (dev->state == CONFIGURED) {
871                 dev->interface = le16_to_cpu(setup->wIndex) & 0xff;
872                 dev->alternate_setting =
873                             le16_to_cpu(setup->wValue) & 0xff;
874                         // interface and alternate_setting must be zero
875                 if (dev->interface || dev->alternate_setting) {
876                                 // FIXME: respond with "request error". how?
877                         }
878                 }
879
880         return SETUP_STAGE;
881 }
882
883 static ep0_stage_t
884 do_synch_frame(struct usb_dev* dev, struct usb_ctrlrequest* setup)
885 {
886         // TODO
887         return SETUP_STAGE;
888 }
889
890 typedef ep0_stage_t (*req_method_t)(struct usb_dev* dev,
891                                     struct usb_ctrlrequest* setup);
892
893
894 /* Table of the standard device request handlers */
895 static const req_method_t req_method[] = {
896         do_get_status,
897         do_clear_feature,
898         do_reserved,
899         do_set_feature,
900         do_reserved,
901         do_set_address,
902         do_get_descriptor,
903         do_set_descriptor,
904         do_get_configuration,
905         do_set_configuration,
906         do_get_interface,
907         do_set_interface,
908         do_synch_frame
909 };
910
911
912 // SETUP packet request dispatcher
913 static void
914 do_setup (struct usb_dev* dev, struct usb_ctrlrequest* setup)
915 {
916         req_method_t m;
917
918         dbg("%s: req %d %s", __FUNCTION__, setup->bRequestType,
919             get_std_req_name(setup->bRequestType));
920
921         if ((setup->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
922             (setup->bRequestType & USB_RECIP_MASK) != USB_RECIP_DEVICE) {
923                 err("%s: invalid requesttype 0x%02x", __FUNCTION__,
924                     setup->bRequestType);
925                 return;
926                 }
927
928         if ((setup->bRequestType & 0x80) == USB_DIR_OUT && setup->wLength)
929                 dbg("%s: OUT phase! length=%d", __FUNCTION__, setup->wLength);
930
931         if (setup->bRequestType < sizeof(req_method)/sizeof(req_method_t))
932                 m = req_method[setup->bRequestType];
933                         else
934                 m = do_reserved;
935
936         dev->ep0_stage = (*m)(dev, setup);
937 }
938
939 /*
940  * A SETUP, DATA0, or DATA1 packet has been received
941  * on the default control endpoint's fifo.
942  */
943 static void
944 process_ep0_receive (struct usb_dev* dev)
945 {
946         endpoint_t *ep0 = &dev->ep[0];
947         usbdev_pkt_t *pkt;
948
949         spin_lock(&ep0->lock);
950
951                 // complete packet and prepare a new packet
952         pkt = receive_packet_complete(ep0);
953         if (!pkt) {
954                 // FIXME: should  put a warn/err here.
955                 spin_unlock(&ep0->lock);
956                         return;
957                 }
958
959         // unlink immediately from endpoint.
960         unlink_head(&ep0->outlist);
961
962         // override current stage if h/w says it's a setup packet
963         if (pkt->status & PKT_STATUS_SU)
964                 dev->ep0_stage = SETUP_STAGE;
965
966         switch (dev->ep0_stage) {
967         case SETUP_STAGE:
968                 vdbg("SU bit is %s in setup stage",
969                      (pkt->status & PKT_STATUS_SU) ? "set" : "not set");
970
971                         if (pkt->size == sizeof(struct usb_ctrlrequest)) {
972 #ifdef VDEBUG
973                         if (pkt->status & PKT_STATUS_ACK)
974                                 vdbg("received SETUP");
975                                 else
976                                 vdbg("received NAK SETUP");
977 #endif
978                         do_setup(dev, (struct usb_ctrlrequest*)pkt->payload);
979                 } else
980                         err("%s: wrong size SETUP received", __FUNCTION__);
981                 break;
982         case DATA_STAGE:
983                 /*
984                  * this setup has an OUT data stage. Of the standard
985                  * device requests, only set_descriptor has this stage,
986                  * so this packet is that descriptor. TODO: drop it for
987                  * now, set_descriptor not implemented.
988                  *
989                  * Need to place a byte in the write FIFO here, to prepare
990                  * to send a zero-length DATA ack packet to the host in the
991                  * STATUS stage.
992                  */
993                 au_writel(0, ep0->reg->write_fifo);
994                 dbg("received OUT stage DATAx on EP0, size=%d", pkt->size);
995                 dev->ep0_stage = SETUP_STAGE;
996                 break;
997         case STATUS_STAGE:
998                 // this setup had an IN data stage, and host is ACK'ing
999                 // the packet we sent during that stage.
1000                 if (pkt->size != 0)
1001                         warn("received non-zero ACK on EP0??");
1002 #ifdef VDEBUG
1003                 else
1004                         vdbg("received ACK on EP0");
1005 #endif
1006                 dev->ep0_stage = SETUP_STAGE;
1007                 break;
1008         }
1009
1010         spin_unlock(&ep0->lock);
1011         // we're done processing the packet, free it
1012         kfree(pkt);
1013 }
1014
1015
1016 /*
1017  * A DATA0/1 packet has been received on one of the OUT endpoints (4 or 5)
1018  */
1019 static void
1020 process_ep_receive (struct usb_dev* dev, endpoint_t *ep)
1021 {
1022         usbdev_pkt_t *pkt;
1023
1024                 spin_lock(&ep->lock);
1025         pkt = receive_packet_complete(ep);
1026                 spin_unlock(&ep->lock);
1027
1028         dev->func_cb(CB_PKT_COMPLETE, (unsigned long)pkt, dev->cb_data);
1029 }
1030
1031
1032
1033 /* This ISR handles the receive complete and suspend events */
1034 static void
1035 req_sus_intr (int irq, void *dev_id, struct pt_regs *regs)
1036 {
1037         struct usb_dev *dev = (struct usb_dev *) dev_id;
1038         u32 status;
1039
1040         status = au_readl(USBD_INTSTAT);
1041         au_writel(status, USBD_INTSTAT);        // ack'em
1042
1043         if (status & (1<<0))
1044                 process_ep0_receive(dev);
1045         if (status & (1<<4))
1046                 process_ep_receive(dev, &dev->ep[4]);
1047         if (status & (1<<5))
1048                 process_ep_receive(dev, &dev->ep[5]);
1049 }
1050
1051
1052 /* This ISR handles the DMA done events on EP0 */
1053 static void
1054 dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs)
1055 {
1056         struct usb_dev *dev = (struct usb_dev *) dev_id;
1057         usbdev_pkt_t* pkt;
1058         endpoint_t *ep0 = &dev->ep[0];
1059         u32 cs0, buff_done;
1060
1061         spin_lock(&ep0->lock);
1062         cs0 = au_readl(ep0->reg->ctrl_stat);
1063
1064         // first check packet transmit done
1065         if ((buff_done = get_dma_buffer_done(ep0->indma)) != 0) {
1066                 // transmitted a DATAx packet during DATA stage
1067                 // on control endpoint 0
1068                 // clear DMA done bit
1069                 if (buff_done & DMA_D0)
1070                         clear_dma_done0(ep0->indma);
1071                 if (buff_done & DMA_D1)
1072                         clear_dma_done1(ep0->indma);
1073
1074                 pkt = send_packet_complete(ep0);
1075                 kfree(pkt);
1076         }
1077
1078         /*
1079          * Now check packet receive done. Shouldn't get these,
1080          * the receive packet complete intr should happen
1081          * before the DMA done intr occurs.
1082          */
1083         if ((buff_done = get_dma_buffer_done(ep0->outdma)) != 0) {
1084                 // clear DMA done bit
1085                 if (buff_done & DMA_D0)
1086                         clear_dma_done0(ep0->outdma);
1087                 if (buff_done & DMA_D1)
1088                         clear_dma_done1(ep0->outdma);
1089
1090                 //process_ep0_receive(dev);
1091         }
1092
1093         spin_unlock(&ep0->lock);
1094 }
1095
1096 /* This ISR handles the DMA done events on endpoints 2,3,4,5 */
1097 static void
1098 dma_done_ep_intr(int irq, void *dev_id, struct pt_regs *regs)
1099 {
1100         struct usb_dev *dev = (struct usb_dev *) dev_id;
1101         int i;
1102
1103         for (i = 2; i < 6; i++) {
1104         u32 buff_done;
1105                 usbdev_pkt_t* pkt;
1106                 endpoint_t *ep = &dev->ep[i];
1107
1108                 if (!ep->active) continue;
1109
1110         spin_lock(&ep->lock);
1111
1112                 if (ep->direction == USB_DIR_IN) {
1113                         buff_done = get_dma_buffer_done(ep->indma);
1114                         if (buff_done != 0) {
1115                                 // transmitted a DATAx pkt on the IN ep
1116                 // clear DMA done bit
1117                 if (buff_done & DMA_D0)
1118                         clear_dma_done0(ep->indma);
1119                 if (buff_done & DMA_D1)
1120                         clear_dma_done1(ep->indma);
1121
1122                                 pkt = send_packet_complete(ep);
1123
1124                                 spin_unlock(&ep->lock);
1125                                 dev->func_cb(CB_PKT_COMPLETE,
1126                                              (unsigned long)pkt,
1127                                              dev->cb_data);
1128                                 spin_lock(&ep->lock);
1129                         }
1130                 } else {
1131         /*
1132                          * Check packet receive done (OUT ep). Shouldn't get
1133                          * these, the rx packet complete intr should happen
1134          * before the DMA done intr occurs.
1135          */
1136                         buff_done = get_dma_buffer_done(ep->outdma);
1137                         if (buff_done != 0) {
1138                                 // received a DATAx pkt on the OUT ep
1139                 // clear DMA done bit
1140                 if (buff_done & DMA_D0)
1141                         clear_dma_done0(ep->outdma);
1142                 if (buff_done & DMA_D1)
1143                         clear_dma_done1(ep->outdma);
1144
1145                                 //process_ep_receive(dev, ep);
1146         }
1147         }
1148
1149                 spin_unlock(&ep->lock);
1150         }
1151 }
1152
1153
1154 /***************************************************************************
1155  * Here begins the external interface functions
1156  ***************************************************************************
1157  */
1158
1159 /*
1160  * allocate a new packet
1161  */
1162 int
1163 usbdev_alloc_packet(int ep_addr, int data_size, usbdev_pkt_t** pkt)
1164 {
1165         endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1166         usbdev_pkt_t* lpkt = NULL;
1167
1168         if (!ep || !ep->active || ep->address < 2)
1169                 return -ENODEV;
1170         if (data_size > ep->max_pkt_size)
1171                 return -EINVAL;
1172
1173         lpkt = *pkt = alloc_packet(ep, data_size, NULL);
1174         if (!lpkt)
1175                 return -ENOMEM;
1176         return 0;
1177 }
1178
1179
1180 /*
1181  * packet send
1182  */
1183 int
1184 usbdev_send_packet(int ep_addr, usbdev_pkt_t * pkt)
1185 {
1186         unsigned long flags;
1187         int count;
1188         endpoint_t * ep;
1189
1190         if (!pkt || !(ep = epaddr_to_ep(&usbdev, pkt->ep_addr)) ||
1191             !ep->active || ep->address < 2)
1192                 return -ENODEV;
1193         if (ep->direction != USB_DIR_IN)
1194                 return -EINVAL;
1195
1196         spin_lock_irqsave(&ep->lock, flags);
1197         count = send_packet(&usbdev, pkt, 1);
1198         spin_unlock_irqrestore(&ep->lock, flags);
1199
1200         return count;
1201 }
1202
1203 /*
1204  * packet receive
1205  */
1206 int
1207 usbdev_receive_packet(int ep_addr, usbdev_pkt_t** pkt)
1208 {
1209         unsigned long flags;
1210         usbdev_pkt_t* lpkt = NULL;
1211         endpoint_t *ep = epaddr_to_ep(&usbdev, ep_addr);
1212
1213         if (!ep || !ep->active || ep->address < 2)
1214                 return -ENODEV;
1215         if (ep->direction != USB_DIR_OUT)
1216                 return -EINVAL;
1217
1218         spin_lock_irqsave(&ep->lock, flags);
1219         if (ep->outlist.count > 1)
1220                 lpkt = unlink_head(&ep->outlist);
1221         spin_unlock_irqrestore(&ep->lock, flags);
1222
1223         if (!lpkt) {
1224                 /* no packet available */
1225                 *pkt = NULL;
1226                 return -ENODATA;
1227         }
1228
1229         *pkt = lpkt;
1230
1231         return lpkt->size;
1232 }
1233
1234
1235 /*
1236  * return total queued byte count on the endpoint.
1237  */
1238 int
1239 usbdev_get_byte_count(int ep_addr)
1240 {
1241         unsigned long flags;
1242         pkt_list_t *list;
1243         usbdev_pkt_t *scan;
1244         int count = 0;
1245         endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1246
1247         if (!ep || !ep->active || ep->address < 2)
1248                 return -ENODEV;
1249
1250         if (ep->direction == USB_DIR_IN) {
1251                 list = &ep->inlist;
1252
1253                 spin_lock_irqsave(&ep->lock, flags);
1254                 for (scan = list->head; scan; scan = scan->next)
1255                         count += scan->size;
1256                 spin_unlock_irqrestore(&ep->lock, flags);
1257         } else {
1258                 list = &ep->outlist;
1259
1260                 spin_lock_irqsave(&ep->lock, flags);
1261                 if (list->count > 1) {
1262                         for (scan = list->head; scan != list->tail;
1263                              scan = scan->next)
1264                                 count += scan->size;
1265         }
1266                 spin_unlock_irqrestore(&ep->lock, flags);
1267         }
1268
1269         return count;
1270 }
1271
1272
1273 void
1274 usbdev_exit(void)
1275 {
1276         endpoint_t *ep;
1277         int i;
1278
1279         au_writel(0, USBD_INTEN);       // disable usb dev ints
1280         au_writel(0, USBD_ENABLE);      // disable usb dev
1281
1282         free_irq(AU1000_USB_DEV_REQ_INT, &usbdev);
1283         free_irq(AU1000_USB_DEV_SUS_INT, &usbdev);
1284
1285         // free all control endpoint resources
1286         ep = &usbdev.ep[0];
1287         free_au1000_dma(ep->indma);
1288         free_au1000_dma(ep->outdma);
1289         endpoint_flush(ep);
1290
1291         // free ep resources
1292         for (i = 2; i < 6; i++) {
1293                 ep = &usbdev.ep[i];
1294                 if (!ep->active) continue;
1295
1296                 if (ep->direction == USB_DIR_IN) {
1297                         free_au1000_dma(ep->indma);
1298                 } else {
1299                 free_au1000_dma(ep->outdma);
1300                 }
1301                 endpoint_flush(ep);
1302         }
1303
1304         kfree(usbdev.full_conf_desc);
1305 }
1306
1307 int
1308 usbdev_init(struct usb_device_descriptor* dev_desc,
1309             struct usb_config_descriptor* config_desc,
1310             struct usb_interface_descriptor* if_desc,
1311             struct usb_endpoint_descriptor* ep_desc,
1312             struct usb_string_descriptor* str_desc[],
1313             void (*cb)(usbdev_cb_type_t, unsigned long, void *),
1314             void* cb_data)
1315 {
1316         endpoint_t *ep0;
1317         int i, ret=0;
1318         u8* fcd;
1319
1320         if (dev_desc->bNumConfigurations > 1 ||
1321             config_desc->bNumInterfaces > 1 ||
1322             if_desc->bNumEndpoints > 4) {
1323                 err("Only one config, one i/f, and no more "
1324                     "than 4 ep's allowed");
1325                 ret = -EINVAL;
1326                 goto out;
1327         }
1328
1329         if (!cb) {
1330                 err("Function-layer callback required");
1331                 ret = -EINVAL;
1332                 goto out;
1333         }
1334
1335         if (dev_desc->bMaxPacketSize0 != USBDEV_EP0_MAX_PACKET_SIZE) {
1336                 warn("EP0 Max Packet size must be %d",
1337                      USBDEV_EP0_MAX_PACKET_SIZE);
1338                 dev_desc->bMaxPacketSize0 = USBDEV_EP0_MAX_PACKET_SIZE;
1339         }
1340
1341         memset(&usbdev, 0, sizeof(struct usb_dev));
1342
1343         usbdev.state = DEFAULT;
1344         usbdev.dev_desc = dev_desc;
1345         usbdev.if_desc = if_desc;
1346         usbdev.conf_desc = config_desc;
1347         for (i=0; i<6; i++)
1348                 usbdev.str_desc[i] = str_desc[i];
1349         usbdev.func_cb = cb;
1350         usbdev.cb_data = cb_data;
1351
1352         /* Initialize default control endpoint */
1353         ep0 = &usbdev.ep[0];
1354         ep0->active = 1;
1355         ep0->type = CONTROL_EP;
1356         ep0->max_pkt_size = USBDEV_EP0_MAX_PACKET_SIZE;
1357         spin_lock_init(&ep0->lock);
1358         ep0->desc = NULL;       // ep0 has no descriptor
1359         ep0->address = 0;
1360         ep0->direction = 0;
1361         ep0->reg = &ep_reg[0];
1362
1363         /* Initialize the other requested endpoints */
1364         for (i = 0; i < if_desc->bNumEndpoints; i++) {
1365                 struct usb_endpoint_descriptor* epd = &ep_desc[i];
1366         endpoint_t *ep;
1367
1368                 if ((epd->bEndpointAddress & 0x80) == USB_DIR_IN) {
1369                         ep = &usbdev.ep[2];
1370                         ep->address = 2;
1371                         if (ep->active) {
1372                                 ep = &usbdev.ep[3];
1373                                 ep->address = 3;
1374                                 if (ep->active) {
1375                                         err("too many IN ep's requested");
1376                                         ret = -ENODEV;
1377                                         goto out;
1378         }
1379         }
1380                 } else {
1381                         ep = &usbdev.ep[4];
1382                         ep->address = 4;
1383                         if (ep->active) {
1384                                 ep = &usbdev.ep[5];
1385                                 ep->address = 5;
1386                                 if (ep->active) {
1387                                         err("too many OUT ep's requested");
1388                                         ret = -ENODEV;
1389                                         goto out;
1390         }
1391         }
1392                 }
1393
1394                 ep->active = 1;
1395                 epd->bEndpointAddress &= ~0x0f;
1396                 epd->bEndpointAddress |= (u8)ep->address;
1397                 ep->direction = epd->bEndpointAddress & 0x80;
1398                 ep->type = epd->bmAttributes & 0x03;
1399                 ep->max_pkt_size = le16_to_cpu(epd->wMaxPacketSize);
1400                 spin_lock_init(&ep->lock);
1401                 ep->desc = epd;
1402                 ep->reg = &ep_reg[ep->address];
1403                 }
1404
1405         /*
1406          * initialize the full config descriptor
1407          */
1408         usbdev.full_conf_desc = fcd = kmalloc(le16_to_cpu(config_desc->wTotalLength),
1409                                               ALLOC_FLAGS);
1410         if (!fcd) {
1411                 err("failed to alloc full config descriptor");
1412                 ret = -ENOMEM;
1413                 goto out;
1414         }
1415
1416         memcpy(fcd, config_desc, USB_DT_CONFIG_SIZE);
1417         fcd += USB_DT_CONFIG_SIZE;
1418         memcpy(fcd, if_desc, USB_DT_INTERFACE_SIZE);
1419         fcd += USB_DT_INTERFACE_SIZE;
1420         for (i = 0; i < if_desc->bNumEndpoints; i++) {
1421                 memcpy(fcd, &ep_desc[i], USB_DT_ENDPOINT_SIZE);
1422                 fcd += USB_DT_ENDPOINT_SIZE;
1423         }
1424
1425         /* Now we're ready to enable the controller */
1426         au_writel(0x0002, USBD_ENABLE);
1427         udelay(100);
1428         au_writel(0x0003, USBD_ENABLE);
1429         udelay(100);
1430
1431         /* build and send config table based on ep descriptors */
1432         for (i = 0; i < 6; i++) {
1433                 endpoint_t *ep;
1434                 if (i == 1)
1435                         continue; // skip dummy ep
1436                 ep = &usbdev.ep[i];
1437                 if (ep->active) {
1438                         au_writel((ep->address << 4) | 0x04, USBD_CONFIG);
1439                         au_writel(((ep->max_pkt_size & 0x380) >> 7) |
1440                                   (ep->direction >> 4) | (ep->type << 4),
1441                                   USBD_CONFIG);
1442                         au_writel((ep->max_pkt_size & 0x7f) << 1, USBD_CONFIG);
1443                         au_writel(0x00, USBD_CONFIG);
1444                         au_writel(ep->address, USBD_CONFIG);
1445                 } else {
1446                         u8 dir = (i==2 || i==3) ? DIR_IN : DIR_OUT;
1447                         au_writel((i << 4) | 0x04, USBD_CONFIG);
1448                         au_writel(((16 & 0x380) >> 7) | dir |
1449                                   (BULK_EP << 4), USBD_CONFIG);
1450                         au_writel((16 & 0x7f) << 1, USBD_CONFIG);
1451                         au_writel(0x00, USBD_CONFIG);
1452                         au_writel(i, USBD_CONFIG);
1453                 }
1454         }
1455
1456         /*
1457          * Enable Receive FIFO Complete interrupts only. Transmit
1458          * complete is being handled by the DMA done interrupts.
1459          */
1460         au_writel(0x31, USBD_INTEN);
1461
1462         /*
1463          * Controller is now enabled, request DMA and IRQ
1464          * resources.
1465          */
1466
1467         /* request the USB device transfer complete interrupt */
1468         if (request_irq(AU1000_USB_DEV_REQ_INT, req_sus_intr, IRQF_DISABLED,
1469                         "USBdev req", &usbdev)) {
1470                 err("Can't get device request intr");
1471                 ret = -ENXIO;
1472                 goto out;
1473         }
1474         /* request the USB device suspend interrupt */
1475         if (request_irq(AU1000_USB_DEV_SUS_INT, req_sus_intr, IRQF_DISABLED,
1476                         "USBdev sus", &usbdev)) {
1477                 err("Can't get device suspend intr");
1478                 ret = -ENXIO;
1479                 goto out;
1480         }
1481
1482         /* Request EP0 DMA and IRQ */
1483         if ((ep0->indma = request_au1000_dma(ep_dma_id[0].id,
1484                                              ep_dma_id[0].str,
1485                                              dma_done_ep0_intr,
1486                                              IRQF_DISABLED,
1487                                              &usbdev)) < 0) {
1488                 err("Can't get %s DMA", ep_dma_id[0].str);
1489                 ret = -ENXIO;
1490                 goto out;
1491         }
1492         if ((ep0->outdma = request_au1000_dma(ep_dma_id[1].id,
1493                                               ep_dma_id[1].str,
1494                                               NULL, 0, NULL)) < 0) {
1495                 err("Can't get %s DMA", ep_dma_id[1].str);
1496                 ret = -ENXIO;
1497                 goto out;
1498         }
1499
1500         // Flush the ep0 buffers and FIFOs
1501         endpoint_flush(ep0);
1502         // start packet reception on ep0
1503         kickstart_receive_packet(ep0);
1504
1505         /* Request DMA and IRQ for the other endpoints */
1506         for (i = 2; i < 6; i++) {
1507                 endpoint_t *ep = &usbdev.ep[i];
1508                 if (!ep->active)
1509                         continue;
1510
1511                 // Flush the endpoint buffers and FIFOs
1512                 endpoint_flush(ep);
1513
1514                 if (ep->direction == USB_DIR_IN) {
1515                         ep->indma =
1516                                 request_au1000_dma(ep_dma_id[ep->address].id,
1517                                                    ep_dma_id[ep->address].str,
1518                                                    dma_done_ep_intr,
1519                                                    IRQF_DISABLED,
1520                                                    &usbdev);
1521                         if (ep->indma < 0) {
1522                                 err("Can't get %s DMA",
1523                                     ep_dma_id[ep->address].str);
1524                                 ret = -ENXIO;
1525                                 goto out;
1526                         }
1527                 } else {
1528                         ep->outdma =
1529                                 request_au1000_dma(ep_dma_id[ep->address].id,
1530                                                    ep_dma_id[ep->address].str,
1531                                                    NULL, 0, NULL);
1532                         if (ep->outdma < 0) {
1533                                 err("Can't get %s DMA",
1534                                     ep_dma_id[ep->address].str);
1535                                 ret = -ENXIO;
1536                                 goto out;
1537                         }
1538
1539                         // start packet reception on OUT endpoint
1540                         kickstart_receive_packet(ep);
1541                 }
1542         }
1543
1544  out:
1545         if (ret)
1546                 usbdev_exit();
1547         return ret;
1548 }
1549
1550 EXPORT_SYMBOL(usbdev_init);
1551 EXPORT_SYMBOL(usbdev_exit);
1552 EXPORT_SYMBOL(usbdev_alloc_packet);
1553 EXPORT_SYMBOL(usbdev_receive_packet);
1554 EXPORT_SYMBOL(usbdev_send_packet);
1555 EXPORT_SYMBOL(usbdev_get_byte_count);