Merge branch 'hwmon-for-linus' of git://jdelvare.pck.nerim.net/jdelvare-2.6
[pandora-kernel.git] / drivers / usb / gadget / goku_udc.c
1 /*
2  * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
3  *
4  * Copyright (C) 2000-2002 Lineo
5  *      by Stuart Lynne, Tom Rushworth, and Bruce Balden
6  * Copyright (C) 2002 Toshiba Corporation
7  * Copyright (C) 2003 MontaVista Software (source@mvista.com)
8  *
9  * This file is licensed under the terms of the GNU General Public
10  * License version 2.  This program is licensed "as is" without any
11  * warranty of any kind, whether express or implied.
12  */
13
14 /*
15  * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
16  *
17  *  - Endpoint numbering is fixed: ep{1,2,3}-bulk
18  *  - Gadget drivers can choose ep maxpacket (8/16/32/64)
19  *  - Gadget drivers can choose direction (IN, OUT)
20  *  - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
21  */
22
23 #undef DEBUG
24 // #define      VERBOSE         /* extra debug messages (success too) */
25 // #define      USB_TRACE       /* packet-level success messages */
26
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/delay.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/smp_lock.h>
34 #include <linux/errno.h>
35 #include <linux/init.h>
36 #include <linux/timer.h>
37 #include <linux/list.h>
38 #include <linux/interrupt.h>
39 #include <linux/proc_fs.h>
40 #include <linux/device.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb_gadget.h>
43
44 #include <asm/byteorder.h>
45 #include <asm/io.h>
46 #include <asm/irq.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
49
50
51 #include "goku_udc.h"
52
53 #define DRIVER_DESC             "TC86C001 USB Device Controller"
54 #define DRIVER_VERSION          "30-Oct 2003"
55
56 #define DMA_ADDR_INVALID        (~(dma_addr_t)0)
57
58 static const char driver_name [] = "goku_udc";
59 static const char driver_desc [] = DRIVER_DESC;
60
61 MODULE_AUTHOR("source@mvista.com");
62 MODULE_DESCRIPTION(DRIVER_DESC);
63 MODULE_LICENSE("GPL");
64
65
66 /*
67  * IN dma behaves ok under testing, though the IN-dma abort paths don't
68  * seem to behave quite as expected.  Used by default.
69  *
70  * OUT dma documents design problems handling the common "short packet"
71  * transfer termination policy; it couldn't be enabled by default, even
72  * if the OUT-dma abort problems had a resolution.
73  */
74 static unsigned use_dma = 1;
75
76 #if 0
77 //#include <linux/moduleparam.h>
78 /* "modprobe goku_udc use_dma=1" etc
79  *      0 to disable dma
80  *      1 to use IN dma only (normal operation)
81  *      2 to use IN and OUT dma
82  */
83 module_param(use_dma, uint, S_IRUGO);
84 #endif
85
86 /*-------------------------------------------------------------------------*/
87
88 static void nuke(struct goku_ep *, int status);
89
90 static inline void
91 command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
92 {
93         writel(COMMAND_EP(epnum) | command, &regs->Command);
94         udelay(300);
95 }
96
97 static int
98 goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
99 {
100         struct goku_udc *dev;
101         struct goku_ep  *ep;
102         u32             mode;
103         u16             max;
104         unsigned long   flags;
105
106         ep = container_of(_ep, struct goku_ep, ep);
107         if (!_ep || !desc || ep->desc
108                         || desc->bDescriptorType != USB_DT_ENDPOINT)
109                 return -EINVAL;
110         dev = ep->dev;
111         if (ep == &dev->ep[0])
112                 return -EINVAL;
113         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
114                 return -ESHUTDOWN;
115         if (ep->num != (desc->bEndpointAddress & 0x0f))
116                 return -EINVAL;
117
118         switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
119         case USB_ENDPOINT_XFER_BULK:
120         case USB_ENDPOINT_XFER_INT:
121                 break;
122         default:
123                 return -EINVAL;
124         }
125
126         if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
127                         != EPxSTATUS_EP_INVALID)
128                 return -EBUSY;
129
130         /* enabling the no-toggle interrupt mode would need an api hook */
131         mode = 0;
132         max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
133         switch (max) {
134         case 64:        mode++;
135         case 32:        mode++;
136         case 16:        mode++;
137         case 8:         mode <<= 3;
138                         break;
139         default:
140                 return -EINVAL;
141         }
142         mode |= 2 << 1;         /* bulk, or intr-with-toggle */
143
144         /* ep1/ep2 dma direction is chosen early; it works in the other
145          * direction, with pio.  be cautious with out-dma.
146          */
147         ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
148         if (ep->is_in) {
149                 mode |= 1;
150                 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
151         } else {
152                 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
153                 if (ep->dma)
154                         DBG(dev, "%s out-dma hides short packets\n",
155                                 ep->ep.name);
156         }
157
158         spin_lock_irqsave(&ep->dev->lock, flags);
159
160         /* ep1 and ep2 can do double buffering and/or dma */
161         if (ep->num < 3) {
162                 struct goku_udc_regs __iomem    *regs = ep->dev->regs;
163                 u32                             tmp;
164
165                 /* double buffer except (for now) with pio in */
166                 tmp = ((ep->dma || !ep->is_in)
167                                 ? 0x10  /* double buffered */
168                                 : 0x11  /* single buffer */
169                         ) << ep->num;
170                 tmp |= readl(&regs->EPxSingle);
171                 writel(tmp, &regs->EPxSingle);
172
173                 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
174                 tmp |= readl(&regs->EPxBCS);
175                 writel(tmp, &regs->EPxBCS);
176         }
177         writel(mode, ep->reg_mode);
178         command(ep->dev->regs, COMMAND_RESET, ep->num);
179         ep->ep.maxpacket = max;
180         ep->stopped = 0;
181         ep->desc = desc;
182         spin_unlock_irqrestore(&ep->dev->lock, flags);
183
184         DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
185                 ep->is_in ? "IN" : "OUT",
186                 ep->dma ? "dma" : "pio",
187                 max);
188
189         return 0;
190 }
191
192 static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
193 {
194         struct goku_udc         *dev = ep->dev;
195
196         if (regs) {
197                 command(regs, COMMAND_INVALID, ep->num);
198                 if (ep->num) {
199                         if (ep->num == UDC_MSTWR_ENDPOINT)
200                                 dev->int_enable &= ~(INT_MSTWREND
201                                                         |INT_MSTWRTMOUT);
202                         else if (ep->num == UDC_MSTRD_ENDPOINT)
203                                 dev->int_enable &= ~INT_MSTRDEND;
204                         dev->int_enable &= ~INT_EPxDATASET (ep->num);
205                 } else
206                         dev->int_enable &= ~INT_EP0;
207                 writel(dev->int_enable, &regs->int_enable);
208                 readl(&regs->int_enable);
209                 if (ep->num < 3) {
210                         struct goku_udc_regs __iomem    *r = ep->dev->regs;
211                         u32                             tmp;
212
213                         tmp = readl(&r->EPxSingle);
214                         tmp &= ~(0x11 << ep->num);
215                         writel(tmp, &r->EPxSingle);
216
217                         tmp = readl(&r->EPxBCS);
218                         tmp &= ~(0x11 << ep->num);
219                         writel(tmp, &r->EPxBCS);
220                 }
221                 /* reset dma in case we're still using it */
222                 if (ep->dma) {
223                         u32     master;
224
225                         master = readl(&regs->dma_master) & MST_RW_BITS;
226                         if (ep->num == UDC_MSTWR_ENDPOINT) {
227                                 master &= ~MST_W_BITS;
228                                 master |= MST_WR_RESET;
229                         } else {
230                                 master &= ~MST_R_BITS;
231                                 master |= MST_RD_RESET;
232                         }
233                         writel(master, &regs->dma_master);
234                 }
235         }
236
237         ep->ep.maxpacket = MAX_FIFO_SIZE;
238         ep->desc = NULL;
239         ep->stopped = 1;
240         ep->irqs = 0;
241         ep->dma = 0;
242 }
243
244 static int goku_ep_disable(struct usb_ep *_ep)
245 {
246         struct goku_ep  *ep;
247         struct goku_udc *dev;
248         unsigned long   flags;
249
250         ep = container_of(_ep, struct goku_ep, ep);
251         if (!_ep || !ep->desc)
252                 return -ENODEV;
253         dev = ep->dev;
254         if (dev->ep0state == EP0_SUSPEND)
255                 return -EBUSY;
256
257         VDBG(dev, "disable %s\n", _ep->name);
258
259         spin_lock_irqsave(&dev->lock, flags);
260         nuke(ep, -ESHUTDOWN);
261         ep_reset(dev->regs, ep);
262         spin_unlock_irqrestore(&dev->lock, flags);
263
264         return 0;
265 }
266
267 /*-------------------------------------------------------------------------*/
268
269 static struct usb_request *
270 goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
271 {
272         struct goku_request     *req;
273
274         if (!_ep)
275                 return NULL;
276         req = kzalloc(sizeof *req, gfp_flags);
277         if (!req)
278                 return NULL;
279
280         req->req.dma = DMA_ADDR_INVALID;
281         INIT_LIST_HEAD(&req->queue);
282         return &req->req;
283 }
284
285 static void
286 goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
287 {
288         struct goku_request     *req;
289
290         if (!_ep || !_req)
291                 return;
292
293         req = container_of(_req, struct goku_request, req);
294         WARN_ON(!list_empty(&req->queue));
295         kfree(req);
296 }
297
298 /*-------------------------------------------------------------------------*/
299
300 #undef USE_KMALLOC
301
302 /* many common platforms have dma-coherent caches, which means that it's
303  * safe to use kmalloc() memory for all i/o buffers without using any
304  * cache flushing calls.  (unless you're trying to share cache lines
305  * between dma and non-dma activities, which is a slow idea in any case.)
306  *
307  * other platforms need more care, with 2.6 having a moderately general
308  * solution except for the common "buffer is smaller than a page" case.
309  */
310 #if     defined(CONFIG_X86)
311 #define USE_KMALLOC
312
313 #elif   defined(CONFIG_MIPS) && !defined(CONFIG_DMA_NONCOHERENT)
314 #define USE_KMALLOC
315
316 #elif   defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
317 #define USE_KMALLOC
318
319 #endif
320
321 /* allocating buffers this way eliminates dma mapping overhead, which
322  * on some platforms will mean eliminating a per-io buffer copy.  with
323  * some kinds of system caches, further tweaks may still be needed.
324  */
325 static void *
326 goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
327                         dma_addr_t *dma, gfp_t gfp_flags)
328 {
329         void            *retval;
330         struct goku_ep  *ep;
331
332         ep = container_of(_ep, struct goku_ep, ep);
333         if (!_ep)
334                 return NULL;
335         *dma = DMA_ADDR_INVALID;
336
337 #if     defined(USE_KMALLOC)
338         retval = kmalloc(bytes, gfp_flags);
339         if (retval)
340                 *dma = virt_to_phys(retval);
341 #else
342         if (ep->dma) {
343                 /* the main problem with this call is that it wastes memory
344                  * on typical 1/N page allocations: it allocates 1-N pages.
345                  */
346 #warning Using dma_alloc_coherent even with buffers smaller than a page.
347                 retval = dma_alloc_coherent(&ep->dev->pdev->dev,
348                                 bytes, dma, gfp_flags);
349         } else
350                 retval = kmalloc(bytes, gfp_flags);
351 #endif
352         return retval;
353 }
354
355 static void
356 goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
357 {
358         /* free memory into the right allocator */
359 #ifndef USE_KMALLOC
360         if (dma != DMA_ADDR_INVALID) {
361                 struct goku_ep  *ep;
362
363                 ep = container_of(_ep, struct goku_ep, ep);
364                 if (!_ep)
365                         return;
366                 dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
367         } else
368 #endif
369                 kfree (buf);
370 }
371
372 /*-------------------------------------------------------------------------*/
373
374 static void
375 done(struct goku_ep *ep, struct goku_request *req, int status)
376 {
377         struct goku_udc         *dev;
378         unsigned                stopped = ep->stopped;
379
380         list_del_init(&req->queue);
381
382         if (likely(req->req.status == -EINPROGRESS))
383                 req->req.status = status;
384         else
385                 status = req->req.status;
386
387         dev = ep->dev;
388         if (req->mapped) {
389                 pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
390                         ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
391                 req->req.dma = DMA_ADDR_INVALID;
392                 req->mapped = 0;
393         }
394
395 #ifndef USB_TRACE
396         if (status && status != -ESHUTDOWN)
397 #endif
398                 VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
399                         ep->ep.name, &req->req, status,
400                         req->req.actual, req->req.length);
401
402         /* don't modify queue heads during completion callback */
403         ep->stopped = 1;
404         spin_unlock(&dev->lock);
405         req->req.complete(&ep->ep, &req->req);
406         spin_lock(&dev->lock);
407         ep->stopped = stopped;
408 }
409
410 /*-------------------------------------------------------------------------*/
411
412 static inline int
413 write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
414 {
415         unsigned        length, count;
416
417         length = min(req->req.length - req->req.actual, max);
418         req->req.actual += length;
419
420         count = length;
421         while (likely(count--))
422                 writel(*buf++, fifo);
423         return length;
424 }
425
426 // return:  0 = still running, 1 = completed, negative = errno
427 static int write_fifo(struct goku_ep *ep, struct goku_request *req)
428 {
429         struct goku_udc *dev = ep->dev;
430         u32             tmp;
431         u8              *buf;
432         unsigned        count;
433         int             is_last;
434
435         tmp = readl(&dev->regs->DataSet);
436         buf = req->req.buf + req->req.actual;
437         prefetch(buf);
438
439         dev = ep->dev;
440         if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
441                 return -EL2HLT;
442
443         /* NOTE:  just single-buffered PIO-IN for now.  */
444         if (unlikely((tmp & DATASET_A(ep->num)) != 0))
445                 return 0;
446
447         /* clear our "packet available" irq */
448         if (ep->num != 0)
449                 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
450
451         count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
452
453         /* last packet often short (sometimes a zlp, especially on ep0) */
454         if (unlikely(count != ep->ep.maxpacket)) {
455                 writel(~(1<<ep->num), &dev->regs->EOP);
456                 if (ep->num == 0) {
457                         dev->ep[0].stopped = 1;
458                         dev->ep0state = EP0_STATUS;
459                 }
460                 is_last = 1;
461         } else {
462                 if (likely(req->req.length != req->req.actual)
463                                 || req->req.zero)
464                         is_last = 0;
465                 else
466                         is_last = 1;
467         }
468 #if 0           /* printk seemed to trash is_last...*/
469 //#ifdef USB_TRACE
470         VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
471                 ep->ep.name, count, is_last ? "/last" : "",
472                 req->req.length - req->req.actual, req);
473 #endif
474
475         /* requests complete when all IN data is in the FIFO,
476          * or sometimes later, if a zlp was needed.
477          */
478         if (is_last) {
479                 done(ep, req, 0);
480                 return 1;
481         }
482
483         return 0;
484 }
485
486 static int read_fifo(struct goku_ep *ep, struct goku_request *req)
487 {
488         struct goku_udc_regs __iomem    *regs;
489         u32                             size, set;
490         u8                              *buf;
491         unsigned                        bufferspace, is_short, dbuff;
492
493         regs = ep->dev->regs;
494 top:
495         buf = req->req.buf + req->req.actual;
496         prefetchw(buf);
497
498         if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
499                 return -EL2HLT;
500
501         dbuff = (ep->num == 1 || ep->num == 2);
502         do {
503                 /* ack dataset irq matching the status we'll handle */
504                 if (ep->num != 0)
505                         writel(~INT_EPxDATASET(ep->num), &regs->int_status);
506
507                 set = readl(&regs->DataSet) & DATASET_AB(ep->num);
508                 size = readl(&regs->EPxSizeLA[ep->num]);
509                 bufferspace = req->req.length - req->req.actual;
510
511                 /* usually do nothing without an OUT packet */
512                 if (likely(ep->num != 0 || bufferspace != 0)) {
513                         if (unlikely(set == 0))
514                                 break;
515                         /* use ep1/ep2 double-buffering for OUT */
516                         if (!(size & PACKET_ACTIVE))
517                                 size = readl(&regs->EPxSizeLB[ep->num]);
518                         if (!(size & PACKET_ACTIVE))    // "can't happen"
519                                 break;
520                         size &= DATASIZE;       /* EPxSizeH == 0 */
521
522                 /* ep0out no-out-data case for set_config, etc */
523                 } else
524                         size = 0;
525
526                 /* read all bytes from this packet */
527                 req->req.actual += size;
528                 is_short = (size < ep->ep.maxpacket);
529 #ifdef USB_TRACE
530                 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
531                         ep->ep.name, size, is_short ? "/S" : "",
532                         req, req->req.actual, req->req.length);
533 #endif
534                 while (likely(size-- != 0)) {
535                         u8      byte = (u8) readl(ep->reg_fifo);
536
537                         if (unlikely(bufferspace == 0)) {
538                                 /* this happens when the driver's buffer
539                                  * is smaller than what the host sent.
540                                  * discard the extra data in this packet.
541                                  */
542                                 if (req->req.status != -EOVERFLOW)
543                                         DBG(ep->dev, "%s overflow %u\n",
544                                                 ep->ep.name, size);
545                                 req->req.status = -EOVERFLOW;
546                         } else {
547                                 *buf++ = byte;
548                                 bufferspace--;
549                         }
550                 }
551
552                 /* completion */
553                 if (unlikely(is_short || req->req.actual == req->req.length)) {
554                         if (unlikely(ep->num == 0)) {
555                                 /* non-control endpoints now usable? */
556                                 if (ep->dev->req_config)
557                                         writel(ep->dev->configured
558                                                         ? USBSTATE_CONFIGURED
559                                                         : 0,
560                                                 &regs->UsbState);
561                                 /* ep0out status stage */
562                                 writel(~(1<<0), &regs->EOP);
563                                 ep->stopped = 1;
564                                 ep->dev->ep0state = EP0_STATUS;
565                         }
566                         done(ep, req, 0);
567
568                         /* empty the second buffer asap */
569                         if (dbuff && !list_empty(&ep->queue)) {
570                                 req = list_entry(ep->queue.next,
571                                                 struct goku_request, queue);
572                                 goto top;
573                         }
574                         return 1;
575                 }
576         } while (dbuff);
577         return 0;
578 }
579
580 static inline void
581 pio_irq_enable(struct goku_udc *dev,
582                 struct goku_udc_regs __iomem *regs, int epnum)
583 {
584         dev->int_enable |= INT_EPxDATASET (epnum);
585         writel(dev->int_enable, &regs->int_enable);
586         /* write may still be posted */
587 }
588
589 static inline void
590 pio_irq_disable(struct goku_udc *dev,
591                 struct goku_udc_regs __iomem *regs, int epnum)
592 {
593         dev->int_enable &= ~INT_EPxDATASET (epnum);
594         writel(dev->int_enable, &regs->int_enable);
595         /* write may still be posted */
596 }
597
598 static inline void
599 pio_advance(struct goku_ep *ep)
600 {
601         struct goku_request     *req;
602
603         if (unlikely(list_empty (&ep->queue)))
604                 return;
605         req = list_entry(ep->queue.next, struct goku_request, queue);
606         (ep->is_in ? write_fifo : read_fifo)(ep, req);
607 }
608
609
610 /*-------------------------------------------------------------------------*/
611
612 // return:  0 = q running, 1 = q stopped, negative = errno
613 static int start_dma(struct goku_ep *ep, struct goku_request *req)
614 {
615         struct goku_udc_regs __iomem    *regs = ep->dev->regs;
616         u32                             master;
617         u32                             start = req->req.dma;
618         u32                             end = start + req->req.length - 1;
619
620         master = readl(&regs->dma_master) & MST_RW_BITS;
621
622         /* re-init the bits affecting IN dma; careful with zlps */
623         if (likely(ep->is_in)) {
624                 if (unlikely(master & MST_RD_ENA)) {
625                         DBG (ep->dev, "start, IN active dma %03x!!\n",
626                                 master);
627 //                      return -EL2HLT;
628                 }
629                 writel(end, &regs->in_dma_end);
630                 writel(start, &regs->in_dma_start);
631
632                 master &= ~MST_R_BITS;
633                 if (unlikely(req->req.length == 0))
634                         master = MST_RD_ENA | MST_RD_EOPB;
635                 else if ((req->req.length % ep->ep.maxpacket) != 0
636                                         || req->req.zero)
637                         master = MST_RD_ENA | MST_EOPB_ENA;
638                 else
639                         master = MST_RD_ENA | MST_EOPB_DIS;
640
641                 ep->dev->int_enable |= INT_MSTRDEND;
642
643         /* Goku DMA-OUT merges short packets, which plays poorly with
644          * protocols where short packets mark the transfer boundaries.
645          * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
646          * ending transfers after 3 SOFs; we don't turn it on.
647          */
648         } else {
649                 if (unlikely(master & MST_WR_ENA)) {
650                         DBG (ep->dev, "start, OUT active dma %03x!!\n",
651                                 master);
652 //                      return -EL2HLT;
653                 }
654                 writel(end, &regs->out_dma_end);
655                 writel(start, &regs->out_dma_start);
656
657                 master &= ~MST_W_BITS;
658                 master |= MST_WR_ENA | MST_TIMEOUT_DIS;
659
660                 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
661         }
662
663         writel(master, &regs->dma_master);
664         writel(ep->dev->int_enable, &regs->int_enable);
665         return 0;
666 }
667
668 static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
669 {
670         struct goku_request             *req;
671         struct goku_udc_regs __iomem    *regs = ep->dev->regs;
672         u32                             master;
673
674         master = readl(&regs->dma_master);
675
676         if (unlikely(list_empty(&ep->queue))) {
677 stop:
678                 if (ep->is_in)
679                         dev->int_enable &= ~INT_MSTRDEND;
680                 else
681                         dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
682                 writel(dev->int_enable, &regs->int_enable);
683                 return;
684         }
685         req = list_entry(ep->queue.next, struct goku_request, queue);
686
687         /* normal hw dma completion (not abort) */
688         if (likely(ep->is_in)) {
689                 if (unlikely(master & MST_RD_ENA))
690                         return;
691                 req->req.actual = readl(&regs->in_dma_current);
692         } else {
693                 if (unlikely(master & MST_WR_ENA))
694                         return;
695
696                 /* hardware merges short packets, and also hides packet
697                  * overruns.  a partial packet MAY be in the fifo here.
698                  */
699                 req->req.actual = readl(&regs->out_dma_current);
700         }
701         req->req.actual -= req->req.dma;
702         req->req.actual++;
703
704 #ifdef USB_TRACE
705         VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
706                 ep->ep.name, ep->is_in ? "IN" : "OUT",
707                 req->req.actual, req->req.length, req);
708 #endif
709         done(ep, req, 0);
710         if (list_empty(&ep->queue))
711                 goto stop;
712         req = list_entry(ep->queue.next, struct goku_request, queue);
713         (void) start_dma(ep, req);
714 }
715
716 static void abort_dma(struct goku_ep *ep, int status)
717 {
718         struct goku_udc_regs __iomem    *regs = ep->dev->regs;
719         struct goku_request             *req;
720         u32                             curr, master;
721
722         /* NAK future host requests, hoping the implicit delay lets the
723          * dma engine finish reading (or writing) its latest packet and
724          * empty the dma buffer (up to 16 bytes).
725          *
726          * This avoids needing to clean up a partial packet in the fifo;
727          * we can't do that for IN without side effects to HALT and TOGGLE.
728          */
729         command(regs, COMMAND_FIFO_DISABLE, ep->num);
730         req = list_entry(ep->queue.next, struct goku_request, queue);
731         master = readl(&regs->dma_master) & MST_RW_BITS;
732
733         /* FIXME using these resets isn't usably documented. this may
734          * not work unless it's followed by disabling the endpoint.
735          *
736          * FIXME the OUT reset path doesn't even behave consistently.
737          */
738         if (ep->is_in) {
739                 if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
740                         goto finished;
741                 curr = readl(&regs->in_dma_current);
742
743                 writel(curr, &regs->in_dma_end);
744                 writel(curr, &regs->in_dma_start);
745
746                 master &= ~MST_R_BITS;
747                 master |= MST_RD_RESET;
748                 writel(master, &regs->dma_master);
749
750                 if (readl(&regs->dma_master) & MST_RD_ENA)
751                         DBG(ep->dev, "IN dma active after reset!\n");
752
753         } else {
754                 if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
755                         goto finished;
756                 curr = readl(&regs->out_dma_current);
757
758                 writel(curr, &regs->out_dma_end);
759                 writel(curr, &regs->out_dma_start);
760
761                 master &= ~MST_W_BITS;
762                 master |= MST_WR_RESET;
763                 writel(master, &regs->dma_master);
764
765                 if (readl(&regs->dma_master) & MST_WR_ENA)
766                         DBG(ep->dev, "OUT dma active after reset!\n");
767         }
768         req->req.actual = (curr - req->req.dma) + 1;
769         req->req.status = status;
770
771         VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
772                 ep->is_in ? "IN" : "OUT",
773                 req->req.actual, req->req.length);
774
775         command(regs, COMMAND_FIFO_ENABLE, ep->num);
776
777         return;
778
779 finished:
780         /* dma already completed; no abort needed */
781         command(regs, COMMAND_FIFO_ENABLE, ep->num);
782         req->req.actual = req->req.length;
783         req->req.status = 0;
784 }
785
786 /*-------------------------------------------------------------------------*/
787
788 static int
789 goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
790 {
791         struct goku_request     *req;
792         struct goku_ep          *ep;
793         struct goku_udc         *dev;
794         unsigned long           flags;
795         int                     status;
796
797         /* always require a cpu-view buffer so pio works */
798         req = container_of(_req, struct goku_request, req);
799         if (unlikely(!_req || !_req->complete
800                         || !_req->buf || !list_empty(&req->queue)))
801                 return -EINVAL;
802         ep = container_of(_ep, struct goku_ep, ep);
803         if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
804                 return -EINVAL;
805         dev = ep->dev;
806         if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
807                 return -ESHUTDOWN;
808
809         /* can't touch registers when suspended */
810         if (dev->ep0state == EP0_SUSPEND)
811                 return -EBUSY;
812
813         /* set up dma mapping in case the caller didn't */
814         if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
815                 _req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
816                         ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
817                 req->mapped = 1;
818         }
819
820 #ifdef USB_TRACE
821         VDBG(dev, "%s queue req %p, len %u buf %p\n",
822                         _ep->name, _req, _req->length, _req->buf);
823 #endif
824
825         spin_lock_irqsave(&dev->lock, flags);
826
827         _req->status = -EINPROGRESS;
828         _req->actual = 0;
829
830         /* for ep0 IN without premature status, zlp is required and
831          * writing EOP starts the status stage (OUT).
832          */
833         if (unlikely(ep->num == 0 && ep->is_in))
834                 _req->zero = 1;
835
836         /* kickstart this i/o queue? */
837         status = 0;
838         if (list_empty(&ep->queue) && likely(!ep->stopped)) {
839                 /* dma:  done after dma completion IRQ (or error)
840                  * pio:  done after last fifo operation
841                  */
842                 if (ep->dma)
843                         status = start_dma(ep, req);
844                 else
845                         status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
846
847                 if (unlikely(status != 0)) {
848                         if (status > 0)
849                                 status = 0;
850                         req = NULL;
851                 }
852
853         } /* else pio or dma irq handler advances the queue. */
854
855         if (likely(req != 0))
856                 list_add_tail(&req->queue, &ep->queue);
857
858         if (likely(!list_empty(&ep->queue))
859                         && likely(ep->num != 0)
860                         && !ep->dma
861                         && !(dev->int_enable & INT_EPxDATASET (ep->num)))
862                 pio_irq_enable(dev, dev->regs, ep->num);
863
864         spin_unlock_irqrestore(&dev->lock, flags);
865
866         /* pci writes may still be posted */
867         return status;
868 }
869
870 /* dequeue ALL requests */
871 static void nuke(struct goku_ep *ep, int status)
872 {
873         struct goku_request     *req;
874
875         ep->stopped = 1;
876         if (list_empty(&ep->queue))
877                 return;
878         if (ep->dma)
879                 abort_dma(ep, status);
880         while (!list_empty(&ep->queue)) {
881                 req = list_entry(ep->queue.next, struct goku_request, queue);
882                 done(ep, req, status);
883         }
884 }
885
886 /* dequeue JUST ONE request */
887 static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
888 {
889         struct goku_request     *req;
890         struct goku_ep          *ep;
891         struct goku_udc         *dev;
892         unsigned long           flags;
893
894         ep = container_of(_ep, struct goku_ep, ep);
895         if (!_ep || !_req || (!ep->desc && ep->num != 0))
896                 return -EINVAL;
897         dev = ep->dev;
898         if (!dev->driver)
899                 return -ESHUTDOWN;
900
901         /* we can't touch (dma) registers when suspended */
902         if (dev->ep0state == EP0_SUSPEND)
903                 return -EBUSY;
904
905         VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
906                 ep->is_in ? "IN" : "OUT",
907                 ep->dma ? "dma" : "pio",
908                 _req);
909
910         spin_lock_irqsave(&dev->lock, flags);
911
912         /* make sure it's actually queued on this endpoint */
913         list_for_each_entry (req, &ep->queue, queue) {
914                 if (&req->req == _req)
915                         break;
916         }
917         if (&req->req != _req) {
918                 spin_unlock_irqrestore (&dev->lock, flags);
919                 return -EINVAL;
920         }
921
922         if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
923                 abort_dma(ep, -ECONNRESET);
924                 done(ep, req, -ECONNRESET);
925                 dma_advance(dev, ep);
926         } else if (!list_empty(&req->queue))
927                 done(ep, req, -ECONNRESET);
928         else
929                 req = NULL;
930         spin_unlock_irqrestore(&dev->lock, flags);
931
932         return req ? 0 : -EOPNOTSUPP;
933 }
934
935 /*-------------------------------------------------------------------------*/
936
937 static void goku_clear_halt(struct goku_ep *ep)
938 {
939         // assert (ep->num !=0)
940         VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
941         command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
942         command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
943         if (ep->stopped) {
944                 ep->stopped = 0;
945                 if (ep->dma) {
946                         struct goku_request     *req;
947
948                         if (list_empty(&ep->queue))
949                                 return;
950                         req = list_entry(ep->queue.next, struct goku_request,
951                                                 queue);
952                         (void) start_dma(ep, req);
953                 } else
954                         pio_advance(ep);
955         }
956 }
957
958 static int goku_set_halt(struct usb_ep *_ep, int value)
959 {
960         struct goku_ep  *ep;
961         unsigned long   flags;
962         int             retval = 0;
963
964         if (!_ep)
965                 return -ENODEV;
966         ep = container_of (_ep, struct goku_ep, ep);
967
968         if (ep->num == 0) {
969                 if (value) {
970                         ep->dev->ep0state = EP0_STALL;
971                         ep->dev->ep[0].stopped = 1;
972                 } else
973                         return -EINVAL;
974
975         /* don't change EPxSTATUS_EP_INVALID to READY */
976         } else if (!ep->desc) {
977                 DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
978                 return -EINVAL;
979         }
980
981         spin_lock_irqsave(&ep->dev->lock, flags);
982         if (!list_empty(&ep->queue))
983                 retval = -EAGAIN;
984         else if (ep->is_in && value
985                         /* data in (either) packet buffer? */
986                         && (readl(&ep->dev->regs->DataSet)
987                                         & DATASET_AB(ep->num)))
988                 retval = -EAGAIN;
989         else if (!value)
990                 goku_clear_halt(ep);
991         else {
992                 ep->stopped = 1;
993                 VDBG(ep->dev, "%s set halt\n", ep->ep.name);
994                 command(ep->dev->regs, COMMAND_STALL, ep->num);
995                 readl(ep->reg_status);
996         }
997         spin_unlock_irqrestore(&ep->dev->lock, flags);
998         return retval;
999 }
1000
1001 static int goku_fifo_status(struct usb_ep *_ep)
1002 {
1003         struct goku_ep                  *ep;
1004         struct goku_udc_regs __iomem    *regs;
1005         u32                             size;
1006
1007         if (!_ep)
1008                 return -ENODEV;
1009         ep = container_of(_ep, struct goku_ep, ep);
1010
1011         /* size is only reported sanely for OUT */
1012         if (ep->is_in)
1013                 return -EOPNOTSUPP;
1014
1015         /* ignores 16-byte dma buffer; SizeH == 0 */
1016         regs = ep->dev->regs;
1017         size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
1018         size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
1019         VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
1020         return size;
1021 }
1022
1023 static void goku_fifo_flush(struct usb_ep *_ep)
1024 {
1025         struct goku_ep                  *ep;
1026         struct goku_udc_regs __iomem    *regs;
1027         u32                             size;
1028
1029         if (!_ep)
1030                 return;
1031         ep = container_of(_ep, struct goku_ep, ep);
1032         VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
1033
1034         /* don't change EPxSTATUS_EP_INVALID to READY */
1035         if (!ep->desc && ep->num != 0) {
1036                 DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
1037                 return;
1038         }
1039
1040         regs = ep->dev->regs;
1041         size = readl(&regs->EPxSizeLA[ep->num]);
1042         size &= DATASIZE;
1043
1044         /* Non-desirable behavior:  FIFO_CLEAR also clears the
1045          * endpoint halt feature.  For OUT, we _could_ just read
1046          * the bytes out (PIO, if !ep->dma); for in, no choice.
1047          */
1048         if (size)
1049                 command(regs, COMMAND_FIFO_CLEAR, ep->num);
1050 }
1051
1052 static struct usb_ep_ops goku_ep_ops = {
1053         .enable         = goku_ep_enable,
1054         .disable        = goku_ep_disable,
1055
1056         .alloc_request  = goku_alloc_request,
1057         .free_request   = goku_free_request,
1058
1059         .alloc_buffer   = goku_alloc_buffer,
1060         .free_buffer    = goku_free_buffer,
1061
1062         .queue          = goku_queue,
1063         .dequeue        = goku_dequeue,
1064
1065         .set_halt       = goku_set_halt,
1066         .fifo_status    = goku_fifo_status,
1067         .fifo_flush     = goku_fifo_flush,
1068 };
1069
1070 /*-------------------------------------------------------------------------*/
1071
1072 static int goku_get_frame(struct usb_gadget *_gadget)
1073 {
1074         return -EOPNOTSUPP;
1075 }
1076
1077 static const struct usb_gadget_ops goku_ops = {
1078         .get_frame      = goku_get_frame,
1079         // no remote wakeup
1080         // not selfpowered
1081 };
1082
1083 /*-------------------------------------------------------------------------*/
1084
1085 static inline char *dmastr(void)
1086 {
1087         if (use_dma == 0)
1088                 return "(dma disabled)";
1089         else if (use_dma == 2)
1090                 return "(dma IN and OUT)";
1091         else
1092                 return "(dma IN)";
1093 }
1094
1095 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1096
1097 static const char proc_node_name [] = "driver/udc";
1098
1099 #define FOURBITS "%s%s%s%s"
1100 #define EIGHTBITS FOURBITS FOURBITS
1101
1102 static void
1103 dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
1104 {
1105         int t;
1106
1107         /* int_status is the same format ... */
1108         t = scnprintf(*next, *size,
1109                 "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1110                 label, mask,
1111                 (mask & INT_PWRDETECT) ? " power" : "",
1112                 (mask & INT_SYSERROR) ? " sys" : "",
1113                 (mask & INT_MSTRDEND) ? " in-dma" : "",
1114                 (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1115
1116                 (mask & INT_MSTWREND) ? " out-dma" : "",
1117                 (mask & INT_MSTWRSET) ? " wrset" : "",
1118                 (mask & INT_ERR) ? " err" : "",
1119                 (mask & INT_SOF) ? " sof" : "",
1120
1121                 (mask & INT_EP3NAK) ? " ep3nak" : "",
1122                 (mask & INT_EP2NAK) ? " ep2nak" : "",
1123                 (mask & INT_EP1NAK) ? " ep1nak" : "",
1124                 (mask & INT_EP3DATASET) ? " ep3" : "",
1125
1126                 (mask & INT_EP2DATASET) ? " ep2" : "",
1127                 (mask & INT_EP1DATASET) ? " ep1" : "",
1128                 (mask & INT_STATUSNAK) ? " ep0snak" : "",
1129                 (mask & INT_STATUS) ? " ep0status" : "",
1130
1131                 (mask & INT_SETUP) ? " setup" : "",
1132                 (mask & INT_ENDPOINT0) ? " ep0" : "",
1133                 (mask & INT_USBRESET) ? " reset" : "",
1134                 (mask & INT_SUSPEND) ? " suspend" : "");
1135         *size -= t;
1136         *next += t;
1137 }
1138
1139
1140 static int
1141 udc_proc_read(char *buffer, char **start, off_t off, int count,
1142                 int *eof, void *_dev)
1143 {
1144         char                            *buf = buffer;
1145         struct goku_udc                 *dev = _dev;
1146         struct goku_udc_regs __iomem    *regs = dev->regs;
1147         char                            *next = buf;
1148         unsigned                        size = count;
1149         unsigned long                   flags;
1150         int                             i, t, is_usb_connected;
1151         u32                             tmp;
1152
1153         if (off != 0)
1154                 return 0;
1155
1156         local_irq_save(flags);
1157
1158         /* basic device status */
1159         tmp = readl(&regs->power_detect);
1160         is_usb_connected = tmp & PW_DETECT;
1161         t = scnprintf(next, size,
1162                 "%s - %s\n"
1163                 "%s version: %s %s\n"
1164                 "Gadget driver: %s\n"
1165                 "Host %s, %s\n"
1166                 "\n",
1167                 pci_name(dev->pdev), driver_desc,
1168                 driver_name, DRIVER_VERSION, dmastr(),
1169                 dev->driver ? dev->driver->driver.name : "(none)",
1170                 is_usb_connected
1171                         ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1172                         : "disconnected",
1173                 ({char *tmp;
1174                 switch(dev->ep0state){
1175                 case EP0_DISCONNECT:    tmp = "ep0_disconnect"; break;
1176                 case EP0_IDLE:          tmp = "ep0_idle"; break;
1177                 case EP0_IN:            tmp = "ep0_in"; break;
1178                 case EP0_OUT:           tmp = "ep0_out"; break;
1179                 case EP0_STATUS:        tmp = "ep0_status"; break;
1180                 case EP0_STALL:         tmp = "ep0_stall"; break;
1181                 case EP0_SUSPEND:       tmp = "ep0_suspend"; break;
1182                 default:                tmp = "ep0_?"; break;
1183                 } tmp; })
1184                 );
1185         size -= t;
1186         next += t;
1187
1188         dump_intmask("int_status", readl(&regs->int_status), &next, &size);
1189         dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
1190
1191         if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1192                 goto done;
1193
1194         /* registers for (active) device and ep0 */
1195         t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
1196                         "single.bcs %02x.%02x state %x addr %u\n",
1197                         dev->irqs, readl(&regs->DataSet),
1198                         readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1199                         readl(&regs->UsbState),
1200                         readl(&regs->address));
1201         size -= t;
1202         next += t;
1203
1204         tmp = readl(&regs->dma_master);
1205         t = scnprintf(next, size,
1206                 "dma %03X =" EIGHTBITS "%s %s\n", tmp,
1207                 (tmp & MST_EOPB_DIS) ? " eopb-" : "",
1208                 (tmp & MST_EOPB_ENA) ? " eopb+" : "",
1209                 (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1210                 (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1211
1212                 (tmp & MST_RD_EOPB) ? " eopb" : "",
1213                 (tmp & MST_RD_RESET) ? " in_reset" : "",
1214                 (tmp & MST_WR_RESET) ? " out_reset" : "",
1215                 (tmp & MST_RD_ENA) ? " IN" : "",
1216
1217                 (tmp & MST_WR_ENA) ? " OUT" : "",
1218                 (tmp & MST_CONNECTION)
1219                         ? "ep1in/ep2out"
1220                         : "ep1out/ep2in");
1221         size -= t;
1222         next += t;
1223
1224         /* dump endpoint queues */
1225         for (i = 0; i < 4; i++) {
1226                 struct goku_ep          *ep = &dev->ep [i];
1227                 struct goku_request     *req;
1228                 int                     t;
1229
1230                 if (i && !ep->desc)
1231                         continue;
1232
1233                 tmp = readl(ep->reg_status);
1234                 t = scnprintf(next, size,
1235                         "%s %s max %u %s, irqs %lu, "
1236                         "status %02x (%s) " FOURBITS "\n",
1237                         ep->ep.name,
1238                         ep->is_in ? "in" : "out",
1239                         ep->ep.maxpacket,
1240                         ep->dma ? "dma" : "pio",
1241                         ep->irqs,
1242                         tmp, ({ char *s;
1243                         switch (tmp & EPxSTATUS_EP_MASK) {
1244                         case EPxSTATUS_EP_READY:
1245                                 s = "ready"; break;
1246                         case EPxSTATUS_EP_DATAIN:
1247                                 s = "packet"; break;
1248                         case EPxSTATUS_EP_FULL:
1249                                 s = "full"; break;
1250                         case EPxSTATUS_EP_TX_ERR:       // host will retry
1251                                 s = "tx_err"; break;
1252                         case EPxSTATUS_EP_RX_ERR:
1253                                 s = "rx_err"; break;
1254                         case EPxSTATUS_EP_BUSY:         /* ep0 only */
1255                                 s = "busy"; break;
1256                         case EPxSTATUS_EP_STALL:
1257                                 s = "stall"; break;
1258                         case EPxSTATUS_EP_INVALID:      // these "can't happen"
1259                                 s = "invalid"; break;
1260                         default:
1261                                 s = "?"; break;
1262                         }; s; }),
1263                         (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1264                         (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1265                         (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1266                         (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
1267                         );
1268                 if (t <= 0 || t > size)
1269                         goto done;
1270                 size -= t;
1271                 next += t;
1272
1273                 if (list_empty(&ep->queue)) {
1274                         t = scnprintf(next, size, "\t(nothing queued)\n");
1275                         if (t <= 0 || t > size)
1276                                 goto done;
1277                         size -= t;
1278                         next += t;
1279                         continue;
1280                 }
1281                 list_for_each_entry(req, &ep->queue, queue) {
1282                         if (ep->dma && req->queue.prev == &ep->queue) {
1283                                 if (i == UDC_MSTRD_ENDPOINT)
1284                                         tmp = readl(&regs->in_dma_current);
1285                                 else
1286                                         tmp = readl(&regs->out_dma_current);
1287                                 tmp -= req->req.dma;
1288                                 tmp++;
1289                         } else
1290                                 tmp = req->req.actual;
1291
1292                         t = scnprintf(next, size,
1293                                 "\treq %p len %u/%u buf %p\n",
1294                                 &req->req, tmp, req->req.length,
1295                                 req->req.buf);
1296                         if (t <= 0 || t > size)
1297                                 goto done;
1298                         size -= t;
1299                         next += t;
1300                 }
1301         }
1302
1303 done:
1304         local_irq_restore(flags);
1305         *eof = 1;
1306         return count - size;
1307 }
1308
1309 #endif  /* CONFIG_USB_GADGET_DEBUG_FILES */
1310
1311 /*-------------------------------------------------------------------------*/
1312
1313 static void udc_reinit (struct goku_udc *dev)
1314 {
1315         static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1316         
1317         unsigned i;
1318
1319         INIT_LIST_HEAD (&dev->gadget.ep_list);
1320         dev->gadget.ep0 = &dev->ep [0].ep;
1321         dev->gadget.speed = USB_SPEED_UNKNOWN;
1322         dev->ep0state = EP0_DISCONNECT;
1323         dev->irqs = 0;
1324
1325         for (i = 0; i < 4; i++) {
1326                 struct goku_ep  *ep = &dev->ep[i];
1327
1328                 ep->num = i;
1329                 ep->ep.name = names[i];
1330                 ep->reg_fifo = &dev->regs->ep_fifo [i];
1331                 ep->reg_status = &dev->regs->ep_status [i];
1332                 ep->reg_mode = &dev->regs->ep_mode[i];
1333
1334                 ep->ep.ops = &goku_ep_ops;
1335                 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1336                 ep->dev = dev;
1337                 INIT_LIST_HEAD (&ep->queue);
1338
1339                 ep_reset(NULL, ep);
1340         }
1341
1342         dev->ep[0].reg_mode = NULL;
1343         dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
1344         list_del_init (&dev->ep[0].ep.ep_list);
1345 }
1346
1347 static void udc_reset(struct goku_udc *dev)
1348 {
1349         struct goku_udc_regs __iomem    *regs = dev->regs;
1350
1351         writel(0, &regs->power_detect);
1352         writel(0, &regs->int_enable);
1353         readl(&regs->int_enable);
1354         dev->int_enable = 0;
1355
1356         /* deassert reset, leave USB D+ at hi-Z (no pullup)
1357          * don't let INT_PWRDETECT sequence begin
1358          */
1359         udelay(250);
1360         writel(PW_RESETB, &regs->power_detect);
1361         readl(&regs->int_enable);
1362 }
1363
1364 static void ep0_start(struct goku_udc *dev)
1365 {
1366         struct goku_udc_regs __iomem    *regs = dev->regs;
1367         unsigned                        i;
1368
1369         VDBG(dev, "%s\n", __FUNCTION__);
1370
1371         udc_reset(dev);
1372         udc_reinit (dev);
1373         //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1374
1375         /* hw handles set_address, set_feature, get_status; maybe more */
1376         writel(   G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1377                 | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1378                 | G_REQMODE_GET_DESC
1379                 | G_REQMODE_CLEAR_FEAT
1380                 , &regs->reqmode);
1381
1382         for (i = 0; i < 4; i++)
1383                 dev->ep[i].irqs = 0;
1384
1385         /* can't modify descriptors after writing UsbReady */
1386         for (i = 0; i < DESC_LEN; i++)
1387                 writel(0, &regs->descriptors[i]);
1388         writel(0, &regs->UsbReady);
1389
1390         /* expect ep0 requests when the host drops reset */
1391         writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1392         dev->int_enable = INT_DEVWIDE | INT_EP0;
1393         writel(dev->int_enable, &dev->regs->int_enable);
1394         readl(&regs->int_enable);
1395         dev->gadget.speed = USB_SPEED_FULL;
1396         dev->ep0state = EP0_IDLE;
1397 }
1398
1399 static void udc_enable(struct goku_udc *dev)
1400 {
1401         /* start enumeration now, or after power detect irq */
1402         if (readl(&dev->regs->power_detect) & PW_DETECT)
1403                 ep0_start(dev);
1404         else {
1405                 DBG(dev, "%s\n", __FUNCTION__);
1406                 dev->int_enable = INT_PWRDETECT;
1407                 writel(dev->int_enable, &dev->regs->int_enable);
1408         }
1409 }
1410
1411 /*-------------------------------------------------------------------------*/
1412
1413 /* keeping it simple:
1414  * - one bus driver, initted first;
1415  * - one function driver, initted second
1416  */
1417
1418 static struct goku_udc  *the_controller;
1419
1420 /* when a driver is successfully registered, it will receive
1421  * control requests including set_configuration(), which enables
1422  * non-control requests.  then usb traffic follows until a
1423  * disconnect is reported.  then a host may connect again, or
1424  * the driver might get unbound.
1425  */
1426 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1427 {
1428         struct goku_udc *dev = the_controller;
1429         int                     retval;
1430
1431         if (!driver
1432                         || driver->speed != USB_SPEED_FULL
1433                         || !driver->bind
1434                         || !driver->disconnect
1435                         || !driver->setup)
1436                 return -EINVAL;
1437         if (!dev)
1438                 return -ENODEV;
1439         if (dev->driver)
1440                 return -EBUSY;
1441
1442         /* hook up the driver */
1443         driver->driver.bus = NULL;
1444         dev->driver = driver;
1445         dev->gadget.dev.driver = &driver->driver;
1446         retval = driver->bind(&dev->gadget);
1447         if (retval) {
1448                 DBG(dev, "bind to driver %s --> error %d\n",
1449                                 driver->driver.name, retval);
1450                 dev->driver = NULL;
1451                 dev->gadget.dev.driver = NULL;
1452                 return retval;
1453         }
1454
1455         /* then enable host detection and ep0; and we're ready
1456          * for set_configuration as well as eventual disconnect.
1457          */
1458         udc_enable(dev);
1459
1460         DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
1461         return 0;
1462 }
1463 EXPORT_SYMBOL(usb_gadget_register_driver);
1464
1465 static void
1466 stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
1467 {
1468         unsigned        i;
1469
1470         DBG (dev, "%s\n", __FUNCTION__);
1471
1472         if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1473                 driver = NULL;
1474
1475         /* disconnect gadget driver after quiesceing hw and the driver */
1476         udc_reset (dev);
1477         for (i = 0; i < 4; i++)
1478                 nuke(&dev->ep [i], -ESHUTDOWN);
1479         if (driver) {
1480                 spin_unlock(&dev->lock);
1481                 driver->disconnect(&dev->gadget);
1482                 spin_lock(&dev->lock);
1483         }
1484
1485         if (dev->driver)
1486                 udc_enable(dev);
1487 }
1488
1489 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1490 {
1491         struct goku_udc *dev = the_controller;
1492         unsigned long   flags;
1493
1494         if (!dev)
1495                 return -ENODEV;
1496         if (!driver || driver != dev->driver || !driver->unbind)
1497                 return -EINVAL;
1498
1499         spin_lock_irqsave(&dev->lock, flags);
1500         dev->driver = NULL;
1501         stop_activity(dev, driver);
1502         spin_unlock_irqrestore(&dev->lock, flags);
1503
1504         driver->unbind(&dev->gadget);
1505
1506         DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
1507         return 0;
1508 }
1509 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1510
1511
1512 /*-------------------------------------------------------------------------*/
1513
1514 static void ep0_setup(struct goku_udc *dev)
1515 {
1516         struct goku_udc_regs __iomem    *regs = dev->regs;
1517         struct usb_ctrlrequest          ctrl;
1518         int                             tmp;
1519
1520         /* read SETUP packet and enter DATA stage */
1521         ctrl.bRequestType = readl(&regs->bRequestType);
1522         ctrl.bRequest = readl(&regs->bRequest);
1523         ctrl.wValue  = cpu_to_le16((readl(&regs->wValueH)  << 8)
1524                                         | readl(&regs->wValueL));
1525         ctrl.wIndex  = cpu_to_le16((readl(&regs->wIndexH)  << 8)
1526                                         | readl(&regs->wIndexL));
1527         ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
1528                                         | readl(&regs->wLengthL));
1529         writel(0, &regs->SetupRecv);
1530
1531         nuke(&dev->ep[0], 0);
1532         dev->ep[0].stopped = 0;
1533         if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1534                 dev->ep[0].is_in = 1;
1535                 dev->ep0state = EP0_IN;
1536                 /* detect early status stages */
1537                 writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1538         } else {
1539                 dev->ep[0].is_in = 0;
1540                 dev->ep0state = EP0_OUT;
1541
1542                 /* NOTE:  CLEAR_FEATURE is done in software so that we can
1543                  * synchronize transfer restarts after bulk IN stalls.  data
1544                  * won't even enter the fifo until the halt is cleared.
1545                  */
1546                 switch (ctrl.bRequest) {
1547                 case USB_REQ_CLEAR_FEATURE:
1548                         switch (ctrl.bRequestType) {
1549                         case USB_RECIP_ENDPOINT:
1550                                 tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
1551                                 /* active endpoint */
1552                                 if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
1553                                         goto stall;
1554                                 if (ctrl.wIndex & __constant_cpu_to_le16(
1555                                                 USB_DIR_IN)) {
1556                                         if (!dev->ep[tmp].is_in)
1557                                                 goto stall;
1558                                 } else {
1559                                         if (dev->ep[tmp].is_in)
1560                                                 goto stall;
1561                                 }
1562                                 if (ctrl.wValue != __constant_cpu_to_le16(
1563                                                 USB_ENDPOINT_HALT))
1564                                         goto stall;
1565                                 if (tmp)
1566                                         goku_clear_halt(&dev->ep[tmp]);
1567 succeed:
1568                                 /* start ep0out status stage */
1569                                 writel(~(1<<0), &regs->EOP);
1570                                 dev->ep[0].stopped = 1;
1571                                 dev->ep0state = EP0_STATUS;
1572                                 return;
1573                         case USB_RECIP_DEVICE:
1574                                 /* device remote wakeup: always clear */
1575                                 if (ctrl.wValue != __constant_cpu_to_le16(1))
1576                                         goto stall;
1577                                 VDBG(dev, "clear dev remote wakeup\n");
1578                                 goto succeed;
1579                         case USB_RECIP_INTERFACE:
1580                                 goto stall;
1581                         default:                /* pass to gadget driver */
1582                                 break;
1583                         }
1584                         break;
1585                 default:
1586                         break;
1587                 }
1588         }
1589
1590 #ifdef USB_TRACE
1591         VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1592                 ctrl.bRequestType, ctrl.bRequest,
1593                 le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
1594                 le16_to_cpu(ctrl.wLength));
1595 #endif
1596
1597         /* hw wants to know when we're configured (or not) */
1598         dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1599                                 && ctrl.bRequestType == USB_RECIP_DEVICE);
1600         if (unlikely(dev->req_config))
1601                 dev->configured = (ctrl.wValue != __constant_cpu_to_le16(0));
1602
1603         /* delegate everything to the gadget driver.
1604          * it may respond after this irq handler returns.
1605          */
1606         spin_unlock (&dev->lock);
1607         tmp = dev->driver->setup(&dev->gadget, &ctrl);
1608         spin_lock (&dev->lock);
1609         if (unlikely(tmp < 0)) {
1610 stall:
1611 #ifdef USB_TRACE
1612                 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1613                                 ctrl.bRequestType, ctrl.bRequest, tmp);
1614 #endif
1615                 command(regs, COMMAND_STALL, 0);
1616                 dev->ep[0].stopped = 1;
1617                 dev->ep0state = EP0_STALL;
1618         }
1619
1620         /* expect at least one data or status stage irq */
1621 }
1622
1623 #define ACK(irqbit) { \
1624                 stat &= ~irqbit; \
1625                 writel(~irqbit, &regs->int_status); \
1626                 handled = 1; \
1627                 }
1628
1629 static irqreturn_t goku_irq(int irq, void *_dev)
1630 {
1631         struct goku_udc                 *dev = _dev;
1632         struct goku_udc_regs __iomem    *regs = dev->regs;
1633         struct goku_ep                  *ep;
1634         u32                             stat, handled = 0;
1635         unsigned                        i, rescans = 5;
1636
1637         spin_lock(&dev->lock);
1638
1639 rescan:
1640         stat = readl(&regs->int_status) & dev->int_enable;
1641         if (!stat)
1642                 goto done;
1643         dev->irqs++;
1644
1645         /* device-wide irqs */
1646         if (unlikely(stat & INT_DEVWIDE)) {
1647                 if (stat & INT_SYSERROR) {
1648                         ERROR(dev, "system error\n");
1649                         stop_activity(dev, dev->driver);
1650                         stat = 0;
1651                         handled = 1;
1652                         // FIXME have a neater way to prevent re-enumeration
1653                         dev->driver = NULL;
1654                         goto done;
1655                 }
1656                 if (stat & INT_PWRDETECT) {
1657                         writel(~stat, &regs->int_status);
1658                         if (readl(&dev->regs->power_detect) & PW_DETECT) {
1659                                 VDBG(dev, "connect\n");
1660                                 ep0_start(dev);
1661                         } else {
1662                                 DBG(dev, "disconnect\n");
1663                                 if (dev->gadget.speed == USB_SPEED_FULL)
1664                                         stop_activity(dev, dev->driver);
1665                                 dev->ep0state = EP0_DISCONNECT;
1666                                 dev->int_enable = INT_DEVWIDE;
1667                                 writel(dev->int_enable, &dev->regs->int_enable);
1668                         }
1669                         stat = 0;
1670                         handled = 1;
1671                         goto done;
1672                 }
1673                 if (stat & INT_SUSPEND) {
1674                         ACK(INT_SUSPEND);
1675                         if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1676                                 switch (dev->ep0state) {
1677                                 case EP0_DISCONNECT:
1678                                 case EP0_SUSPEND:
1679                                         goto pm_next;
1680                                 default:
1681                                         break;
1682                                 }
1683                                 DBG(dev, "USB suspend\n");
1684                                 dev->ep0state = EP0_SUSPEND;
1685                                 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1686                                                 && dev->driver
1687                                                 && dev->driver->suspend) {
1688                                         spin_unlock(&dev->lock);
1689                                         dev->driver->suspend(&dev->gadget);
1690                                         spin_lock(&dev->lock);
1691                                 }
1692                         } else {
1693                                 if (dev->ep0state != EP0_SUSPEND) {
1694                                         DBG(dev, "bogus USB resume %d\n",
1695                                                 dev->ep0state);
1696                                         goto pm_next;
1697                                 }
1698                                 DBG(dev, "USB resume\n");
1699                                 dev->ep0state = EP0_IDLE;
1700                                 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1701                                                 && dev->driver
1702                                                 && dev->driver->resume) {
1703                                         spin_unlock(&dev->lock);
1704                                         dev->driver->resume(&dev->gadget);
1705                                         spin_lock(&dev->lock);
1706                                 }
1707                         }
1708                 }
1709 pm_next:
1710                 if (stat & INT_USBRESET) {              /* hub reset done */
1711                         ACK(INT_USBRESET);
1712                         INFO(dev, "USB reset done, gadget %s\n",
1713                                 dev->driver->driver.name);
1714                 }
1715                 // and INT_ERR on some endpoint's crc/bitstuff/... problem
1716         }
1717
1718         /* progress ep0 setup, data, or status stages.
1719          * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1720          */
1721         if (stat & INT_SETUP) {
1722                 ACK(INT_SETUP);
1723                 dev->ep[0].irqs++;
1724                 ep0_setup(dev);
1725         }
1726         if (stat & INT_STATUSNAK) {
1727                 ACK(INT_STATUSNAK|INT_ENDPOINT0);
1728                 if (dev->ep0state == EP0_IN) {
1729                         ep = &dev->ep[0];
1730                         ep->irqs++;
1731                         nuke(ep, 0);
1732                         writel(~(1<<0), &regs->EOP);
1733                         dev->ep0state = EP0_STATUS;
1734                 }
1735         }
1736         if (stat & INT_ENDPOINT0) {
1737                 ACK(INT_ENDPOINT0);
1738                 ep = &dev->ep[0];
1739                 ep->irqs++;
1740                 pio_advance(ep);
1741         }
1742
1743         /* dma completion */
1744         if (stat & INT_MSTRDEND) {      /* IN */
1745                 ACK(INT_MSTRDEND);
1746                 ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1747                 ep->irqs++;
1748                 dma_advance(dev, ep);
1749         }
1750         if (stat & INT_MSTWREND) {      /* OUT */
1751                 ACK(INT_MSTWREND);
1752                 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1753                 ep->irqs++;
1754                 dma_advance(dev, ep);
1755         }
1756         if (stat & INT_MSTWRTMOUT) {    /* OUT */
1757                 ACK(INT_MSTWRTMOUT);
1758                 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1759                 ep->irqs++;
1760                 ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1761                 // reset dma? then dma_advance()
1762         }
1763
1764         /* pio */
1765         for (i = 1; i < 4; i++) {
1766                 u32             tmp = INT_EPxDATASET(i);
1767
1768                 if (!(stat & tmp))
1769                         continue;
1770                 ep = &dev->ep[i];
1771                 pio_advance(ep);
1772                 if (list_empty (&ep->queue))
1773                         pio_irq_disable(dev, regs, i);
1774                 stat &= ~tmp;
1775                 handled = 1;
1776                 ep->irqs++;
1777         }
1778
1779         if (rescans--)
1780                 goto rescan;
1781
1782 done:
1783         (void)readl(&regs->int_enable);
1784         spin_unlock(&dev->lock);
1785         if (stat)
1786                 DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1787                                 readl(&regs->int_status), dev->int_enable);
1788         return IRQ_RETVAL(handled);
1789 }
1790
1791 #undef ACK
1792
1793 /*-------------------------------------------------------------------------*/
1794
1795 static void gadget_release(struct device *_dev)
1796 {
1797         struct goku_udc *dev = dev_get_drvdata(_dev);
1798
1799         kfree(dev);
1800 }
1801
1802 /* tear down the binding between this driver and the pci device */
1803
1804 static void goku_remove(struct pci_dev *pdev)
1805 {
1806         struct goku_udc         *dev = pci_get_drvdata(pdev);
1807
1808         DBG(dev, "%s\n", __FUNCTION__);
1809
1810         BUG_ON(dev->driver);
1811
1812 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1813         remove_proc_entry(proc_node_name, NULL);
1814 #endif
1815         if (dev->regs)
1816                 udc_reset(dev);
1817         if (dev->got_irq)
1818                 free_irq(pdev->irq, dev);
1819         if (dev->regs)
1820                 iounmap(dev->regs);
1821         if (dev->got_region)
1822                 release_mem_region(pci_resource_start (pdev, 0),
1823                                 pci_resource_len (pdev, 0));
1824         if (dev->enabled)
1825                 pci_disable_device(pdev);
1826         device_unregister(&dev->gadget.dev);
1827
1828         pci_set_drvdata(pdev, NULL);
1829         dev->regs = NULL;
1830         the_controller = NULL;
1831
1832         INFO(dev, "unbind\n");
1833 }
1834
1835 /* wrap this driver around the specified pci device, but
1836  * don't respond over USB until a gadget driver binds to us.
1837  */
1838
1839 static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1840 {
1841         struct goku_udc         *dev = NULL;
1842         unsigned long           resource, len;
1843         void __iomem            *base = NULL;
1844         int                     retval;
1845
1846         /* if you want to support more than one controller in a system,
1847          * usb_gadget_driver_{register,unregister}() must change.
1848          */
1849         if (the_controller) {
1850                 WARN(dev, "ignoring %s\n", pci_name(pdev));
1851                 return -EBUSY;
1852         }
1853         if (!pdev->irq) {
1854                 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1855                 retval = -ENODEV;
1856                 goto done;
1857         }
1858
1859         /* alloc, and start init */
1860         dev = kmalloc (sizeof *dev, GFP_KERNEL);
1861         if (dev == NULL){
1862                 pr_debug("enomem %s\n", pci_name(pdev));
1863                 retval = -ENOMEM;
1864                 goto done;
1865         }
1866
1867         memset(dev, 0, sizeof *dev);
1868         spin_lock_init(&dev->lock);
1869         dev->pdev = pdev;
1870         dev->gadget.ops = &goku_ops;
1871
1872         /* the "gadget" abstracts/virtualizes the controller */
1873         strcpy(dev->gadget.dev.bus_id, "gadget");
1874         dev->gadget.dev.parent = &pdev->dev;
1875         dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
1876         dev->gadget.dev.release = gadget_release;
1877         dev->gadget.name = driver_name;
1878
1879         /* now all the pci goodies ... */
1880         retval = pci_enable_device(pdev);
1881         if (retval < 0) {
1882                 DBG(dev, "can't enable, %d\n", retval);
1883                 goto done;
1884         }
1885         dev->enabled = 1;
1886
1887         resource = pci_resource_start(pdev, 0);
1888         len = pci_resource_len(pdev, 0);
1889         if (!request_mem_region(resource, len, driver_name)) {
1890                 DBG(dev, "controller already in use\n");
1891                 retval = -EBUSY;
1892                 goto done;
1893         }
1894         dev->got_region = 1;
1895
1896         base = ioremap_nocache(resource, len);
1897         if (base == NULL) {
1898                 DBG(dev, "can't map memory\n");
1899                 retval = -EFAULT;
1900                 goto done;
1901         }
1902         dev->regs = (struct goku_udc_regs __iomem *) base;
1903
1904         pci_set_drvdata(pdev, dev);
1905         INFO(dev, "%s\n", driver_desc);
1906         INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1907         INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
1908
1909         /* init to known state, then setup irqs */
1910         udc_reset(dev);
1911         udc_reinit (dev);
1912         if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
1913                         driver_name, dev) != 0) {
1914                 DBG(dev, "request interrupt %d failed\n", pdev->irq);
1915                 retval = -EBUSY;
1916                 goto done;
1917         }
1918         dev->got_irq = 1;
1919         if (use_dma)
1920                 pci_set_master(pdev);
1921
1922
1923 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1924         create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
1925 #endif
1926
1927         /* done */
1928         the_controller = dev;
1929         device_register(&dev->gadget.dev);
1930
1931         return 0;
1932
1933 done:
1934         if (dev)
1935                 goku_remove (pdev);
1936         return retval;
1937 }
1938
1939
1940 /*-------------------------------------------------------------------------*/
1941
1942 static struct pci_device_id pci_ids [] = { {
1943         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
1944         .class_mask =   ~0,
1945         .vendor =       0x102f,         /* Toshiba */
1946         .device =       0x0107,         /* this UDC */
1947         .subvendor =    PCI_ANY_ID,
1948         .subdevice =    PCI_ANY_ID,
1949
1950 }, { /* end: all zeroes */ }
1951 };
1952 MODULE_DEVICE_TABLE (pci, pci_ids);
1953
1954 static struct pci_driver goku_pci_driver = {
1955         .name =         (char *) driver_name,
1956         .id_table =     pci_ids,
1957
1958         .probe =        goku_probe,
1959         .remove =       goku_remove,
1960
1961         /* FIXME add power management support */
1962 };
1963
1964 static int __init init (void)
1965 {
1966         return pci_register_driver (&goku_pci_driver);
1967 }
1968 module_init (init);
1969
1970 static void __exit cleanup (void)
1971 {
1972         pci_unregister_driver (&goku_pci_driver);
1973 }
1974 module_exit (cleanup);