Merge branch 'rbd-sysfs' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[pandora-kernel.git] / drivers / usb / host / isp1362-hcd.c
1 /*
2  * ISP1362 HCD (Host Controller Driver) for USB.
3  *
4  * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5  *
6  * Derived from the SL811 HCD, rewritten for ISP116x.
7  * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8  *
9  * Portions:
10  * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11  * Copyright (C) 2004 David Brownell
12  */
13
14 /*
15  * The ISP1362 chip requires a large delay (300ns and 462ns) between
16  * accesses to the address and data register.
17  * The following timing options exist:
18  *
19  * 1. Configure your memory controller to add such delays if it can (the best)
20  * 2. Implement platform-specific delay function possibly
21  *    combined with configuring the memory controller; see
22  *    include/linux/usb_isp1362.h for more info.
23  * 3. Use ndelay (easiest, poorest).
24  *
25  * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26  * platform specific section of isp1362.h to select the appropriate variant.
27  *
28  * Also note that according to the Philips "ISP1362 Errata" document
29  * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30  * is reasserted (even with #CS deasserted) within 132ns after a
31  * write cycle to any controller register. If the hardware doesn't
32  * implement the recommended fix (gating the #WR with #CS) software
33  * must ensure that no further write cycle (not necessarily to the chip!)
34  * is issued by the CPU within this interval.
35
36  * For PXA25x this can be ensured by using VLIO with the maximum
37  * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38  */
39
40 #ifdef CONFIG_USB_DEBUG
41 # define ISP1362_DEBUG
42 #else
43 # undef ISP1362_DEBUG
44 #endif
45
46 /*
47  * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48  * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49  * requests are carried out in separate frames. This will delay any SETUP
50  * packets until the start of the next frame so that this situation is
51  * unlikely to occur (and makes usbtest happy running with a PXA255 target
52  * device).
53  */
54 #undef BUGGY_PXA2XX_UDC_USBTEST
55
56 #undef PTD_TRACE
57 #undef URB_TRACE
58 #undef VERBOSE
59 #undef REGISTERS
60
61 /* This enables a memory test on the ISP1362 chip memory to make sure the
62  * chip access timing is correct.
63  */
64 #undef CHIP_BUFFER_TEST
65
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/kernel.h>
69 #include <linux/delay.h>
70 #include <linux/ioport.h>
71 #include <linux/sched.h>
72 #include <linux/slab.h>
73 #include <linux/errno.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <linux/interrupt.h>
77 #include <linux/usb.h>
78 #include <linux/usb/isp1362.h>
79 #include <linux/usb/hcd.h>
80 #include <linux/platform_device.h>
81 #include <linux/pm.h>
82 #include <linux/io.h>
83 #include <linux/bitmap.h>
84
85 #include <asm/irq.h>
86 #include <asm/system.h>
87 #include <asm/byteorder.h>
88 #include <asm/unaligned.h>
89
90 static int dbg_level;
91 #ifdef ISP1362_DEBUG
92 module_param(dbg_level, int, 0644);
93 #else
94 module_param(dbg_level, int, 0);
95 #define STUB_DEBUG_FILE
96 #endif
97
98 #include "../core/usb.h"
99 #include "isp1362.h"
100
101
102 #define DRIVER_VERSION  "2005-04-04"
103 #define DRIVER_DESC     "ISP1362 USB Host Controller Driver"
104
105 MODULE_DESCRIPTION(DRIVER_DESC);
106 MODULE_LICENSE("GPL");
107
108 static const char hcd_name[] = "isp1362-hcd";
109
110 static void isp1362_hc_stop(struct usb_hcd *hcd);
111 static int isp1362_hc_start(struct usb_hcd *hcd);
112
113 /*-------------------------------------------------------------------------*/
114
115 /*
116  * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
117  * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
118  * completion.
119  * We don't need a 'disable' counterpart, since interrupts will be disabled
120  * only by the interrupt handler.
121  */
122 static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
123 {
124         if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
125                 return;
126         if (mask & ~isp1362_hcd->irqenb)
127                 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
128         isp1362_hcd->irqenb |= mask;
129         if (isp1362_hcd->irq_active)
130                 return;
131         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
132 }
133
134 /*-------------------------------------------------------------------------*/
135
136 static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
137                                                      u16 offset)
138 {
139         struct isp1362_ep_queue *epq = NULL;
140
141         if (offset < isp1362_hcd->istl_queue[1].buf_start)
142                 epq = &isp1362_hcd->istl_queue[0];
143         else if (offset < isp1362_hcd->intl_queue.buf_start)
144                 epq = &isp1362_hcd->istl_queue[1];
145         else if (offset < isp1362_hcd->atl_queue.buf_start)
146                 epq = &isp1362_hcd->intl_queue;
147         else if (offset < isp1362_hcd->atl_queue.buf_start +
148                    isp1362_hcd->atl_queue.buf_size)
149                 epq = &isp1362_hcd->atl_queue;
150
151         if (epq)
152                 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
153         else
154                 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
155
156         return epq;
157 }
158
159 static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
160 {
161         int offset;
162
163         if (index * epq->blk_size > epq->buf_size) {
164                 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
165                      epq->buf_size / epq->blk_size);
166                 return -EINVAL;
167         }
168         offset = epq->buf_start + index * epq->blk_size;
169         DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
170
171         return offset;
172 }
173
174 /*-------------------------------------------------------------------------*/
175
176 static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
177                                     int mps)
178 {
179         u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
180
181         xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
182         if (xfer_size < size && xfer_size % mps)
183                 xfer_size -= xfer_size % mps;
184
185         return xfer_size;
186 }
187
188 static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
189                              struct isp1362_ep *ep, u16 len)
190 {
191         int ptd_offset = -EINVAL;
192         int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
193         int found;
194
195         BUG_ON(len > epq->buf_size);
196
197         if (!epq->buf_avail)
198                 return -ENOMEM;
199
200         if (ep->num_ptds)
201                 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
202                     epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
203         BUG_ON(ep->num_ptds != 0);
204
205         found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
206                                                 num_ptds, 0);
207         if (found >= epq->buf_count)
208                 return -EOVERFLOW;
209
210         DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
211             num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
212         ptd_offset = get_ptd_offset(epq, found);
213         WARN_ON(ptd_offset < 0);
214         ep->ptd_offset = ptd_offset;
215         ep->num_ptds += num_ptds;
216         epq->buf_avail -= num_ptds;
217         BUG_ON(epq->buf_avail > epq->buf_count);
218         ep->ptd_index = found;
219         bitmap_set(&epq->buf_map, found, num_ptds);
220         DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
221             __func__, epq->name, ep->ptd_index, ep->ptd_offset,
222             epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
223
224         return found;
225 }
226
227 static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
228 {
229         int index = ep->ptd_index;
230         int last = ep->ptd_index + ep->num_ptds;
231
232         if (last > epq->buf_count)
233                 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
234                     __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
235                     ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
236                     epq->buf_map, epq->skip_map);
237         BUG_ON(last > epq->buf_count);
238
239         for (; index < last; index++) {
240                 __clear_bit(index, &epq->buf_map);
241                 __set_bit(index, &epq->skip_map);
242         }
243         epq->buf_avail += ep->num_ptds;
244         epq->ptd_count--;
245
246         BUG_ON(epq->buf_avail > epq->buf_count);
247         BUG_ON(epq->ptd_count > epq->buf_count);
248
249         DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
250             __func__, epq->name,
251             ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
252         DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
253             epq->buf_map, epq->skip_map);
254
255         ep->num_ptds = 0;
256         ep->ptd_offset = -EINVAL;
257         ep->ptd_index = -EINVAL;
258 }
259
260 /*-------------------------------------------------------------------------*/
261
262 /*
263   Set up PTD's.
264 */
265 static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
266                         struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
267                         u16 fno)
268 {
269         struct ptd *ptd;
270         int toggle;
271         int dir;
272         u16 len;
273         size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
274
275         DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
276
277         ptd = &ep->ptd;
278
279         ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
280
281         switch (ep->nextpid) {
282         case USB_PID_IN:
283                 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
284                 dir = PTD_DIR_IN;
285                 if (usb_pipecontrol(urb->pipe)) {
286                         len = min_t(size_t, ep->maxpacket, buf_len);
287                 } else if (usb_pipeisoc(urb->pipe)) {
288                         len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
289                         ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
290                 } else
291                         len = max_transfer_size(epq, buf_len, ep->maxpacket);
292                 DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
293                     (int)buf_len);
294                 break;
295         case USB_PID_OUT:
296                 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
297                 dir = PTD_DIR_OUT;
298                 if (usb_pipecontrol(urb->pipe))
299                         len = min_t(size_t, ep->maxpacket, buf_len);
300                 else if (usb_pipeisoc(urb->pipe))
301                         len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
302                 else
303                         len = max_transfer_size(epq, buf_len, ep->maxpacket);
304                 if (len == 0)
305                         pr_info("%s: Sending ZERO packet: %d\n", __func__,
306                              urb->transfer_flags & URB_ZERO_PACKET);
307                 DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
308                     (int)buf_len);
309                 break;
310         case USB_PID_SETUP:
311                 toggle = 0;
312                 dir = PTD_DIR_SETUP;
313                 len = sizeof(struct usb_ctrlrequest);
314                 DBG(1, "%s: SETUP len %d\n", __func__, len);
315                 ep->data = urb->setup_packet;
316                 break;
317         case USB_PID_ACK:
318                 toggle = 1;
319                 len = 0;
320                 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
321                         PTD_DIR_OUT : PTD_DIR_IN;
322                 DBG(1, "%s: ACK   len %d\n", __func__, len);
323                 break;
324         default:
325                 toggle = dir = len = 0;
326                 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
327                 BUG_ON(1);
328         }
329
330         ep->length = len;
331         if (!len)
332                 ep->data = NULL;
333
334         ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
335         ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
336                 PTD_EP(ep->epnum);
337         ptd->len = PTD_LEN(len) | PTD_DIR(dir);
338         ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
339
340         if (usb_pipeint(urb->pipe)) {
341                 ptd->faddr |= PTD_SF_INT(ep->branch);
342                 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
343         }
344         if (usb_pipeisoc(urb->pipe))
345                 ptd->faddr |= PTD_SF_ISO(fno);
346
347         DBG(1, "%s: Finished\n", __func__);
348 }
349
350 static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
351                               struct isp1362_ep_queue *epq)
352 {
353         struct ptd *ptd = &ep->ptd;
354         int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
355
356         _BUG_ON(ep->ptd_offset < 0);
357
358         prefetch(ptd);
359         isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
360         if (len)
361                 isp1362_write_buffer(isp1362_hcd, ep->data,
362                                      ep->ptd_offset + PTD_HEADER_SIZE, len);
363
364         dump_ptd(ptd);
365         dump_ptd_out_data(ptd, ep->data);
366 }
367
368 static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
369                              struct isp1362_ep_queue *epq)
370 {
371         struct ptd *ptd = &ep->ptd;
372         int act_len;
373
374         WARN_ON(list_empty(&ep->active));
375         BUG_ON(ep->ptd_offset < 0);
376
377         list_del_init(&ep->active);
378         DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
379
380         prefetchw(ptd);
381         isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
382         dump_ptd(ptd);
383         act_len = PTD_GET_COUNT(ptd);
384         if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
385                 return;
386         if (act_len > ep->length)
387                 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
388                          ep->ptd_offset, act_len, ep->length);
389         BUG_ON(act_len > ep->length);
390         /* Only transfer the amount of data that has actually been overwritten
391          * in the chip buffer. We don't want any data that doesn't belong to the
392          * transfer to leak out of the chip to the callers transfer buffer!
393          */
394         prefetchw(ep->data);
395         isp1362_read_buffer(isp1362_hcd, ep->data,
396                             ep->ptd_offset + PTD_HEADER_SIZE, act_len);
397         dump_ptd_in_data(ptd, ep->data);
398 }
399
400 /*
401  * INT PTDs will stay in the chip until data is available.
402  * This function will remove a PTD from the chip when the URB is dequeued.
403  * Must be called with the spinlock held and IRQs disabled
404  */
405 static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
406
407 {
408         int index;
409         struct isp1362_ep_queue *epq;
410
411         DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
412         BUG_ON(ep->ptd_offset < 0);
413
414         epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
415         BUG_ON(!epq);
416
417         /* put ep in remove_list for cleanup */
418         WARN_ON(!list_empty(&ep->remove_list));
419         list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
420         /* let SOF interrupt handle the cleanup */
421         isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
422
423         index = ep->ptd_index;
424         if (index < 0)
425                 /* ISO queues don't have SKIP registers */
426                 return;
427
428         DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
429             index, ep->ptd_offset, epq->skip_map, 1 << index);
430
431         /* prevent further processing of PTD (will be effective after next SOF) */
432         epq->skip_map |= 1 << index;
433         if (epq == &isp1362_hcd->atl_queue) {
434                 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
435                     isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
436                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
437                 if (~epq->skip_map == 0)
438                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
439         } else if (epq == &isp1362_hcd->intl_queue) {
440                 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
441                     isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
442                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
443                 if (~epq->skip_map == 0)
444                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
445         }
446 }
447
448 /*
449   Take done or failed requests out of schedule. Give back
450   processed urbs.
451 */
452 static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
453                            struct urb *urb, int status)
454      __releases(isp1362_hcd->lock)
455      __acquires(isp1362_hcd->lock)
456 {
457         urb->hcpriv = NULL;
458         ep->error_count = 0;
459
460         if (usb_pipecontrol(urb->pipe))
461                 ep->nextpid = USB_PID_SETUP;
462
463         URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
464                 ep->num_req, usb_pipedevice(urb->pipe),
465                 usb_pipeendpoint(urb->pipe),
466                 !usb_pipein(urb->pipe) ? "out" : "in",
467                 usb_pipecontrol(urb->pipe) ? "ctrl" :
468                         usb_pipeint(urb->pipe) ? "int" :
469                         usb_pipebulk(urb->pipe) ? "bulk" :
470                         "iso",
471                 urb->actual_length, urb->transfer_buffer_length,
472                 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
473                 "short_ok" : "", urb->status);
474
475
476         usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
477         spin_unlock(&isp1362_hcd->lock);
478         usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
479         spin_lock(&isp1362_hcd->lock);
480
481         /* take idle endpoints out of the schedule right away */
482         if (!list_empty(&ep->hep->urb_list))
483                 return;
484
485         /* async deschedule */
486         if (!list_empty(&ep->schedule)) {
487                 list_del_init(&ep->schedule);
488                 return;
489         }
490
491
492         if (ep->interval) {
493                 /* periodic deschedule */
494                 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
495                     ep, ep->branch, ep->load,
496                     isp1362_hcd->load[ep->branch],
497                     isp1362_hcd->load[ep->branch] - ep->load);
498                 isp1362_hcd->load[ep->branch] -= ep->load;
499                 ep->branch = PERIODIC_SIZE;
500         }
501 }
502
503 /*
504  * Analyze transfer results, handle partial transfers and errors
505 */
506 static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
507 {
508         struct urb *urb = get_urb(ep);
509         struct usb_device *udev;
510         struct ptd *ptd;
511         int short_ok;
512         u16 len;
513         int urbstat = -EINPROGRESS;
514         u8 cc;
515
516         DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
517
518         udev = urb->dev;
519         ptd = &ep->ptd;
520         cc = PTD_GET_CC(ptd);
521         if (cc == PTD_NOTACCESSED) {
522                 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
523                     ep->num_req, ptd);
524                 cc = PTD_DEVNOTRESP;
525         }
526
527         short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
528         len = urb->transfer_buffer_length - urb->actual_length;
529
530         /* Data underrun is special. For allowed underrun
531            we clear the error and continue as normal. For
532            forbidden underrun we finish the DATA stage
533            immediately while for control transfer,
534            we do a STATUS stage.
535         */
536         if (cc == PTD_DATAUNDERRUN) {
537                 if (short_ok) {
538                         DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
539                             __func__, ep->num_req, short_ok ? "" : "not_",
540                             PTD_GET_COUNT(ptd), ep->maxpacket, len);
541                         cc = PTD_CC_NOERROR;
542                         urbstat = 0;
543                 } else {
544                         DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
545                             __func__, ep->num_req,
546                             usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
547                             short_ok ? "" : "not_",
548                             PTD_GET_COUNT(ptd), ep->maxpacket, len);
549                         if (usb_pipecontrol(urb->pipe)) {
550                                 ep->nextpid = USB_PID_ACK;
551                                 /* save the data underrun error code for later and
552                                  * procede with the status stage
553                                  */
554                                 urb->actual_length += PTD_GET_COUNT(ptd);
555                                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
556
557                                 if (urb->status == -EINPROGRESS)
558                                         urb->status = cc_to_error[PTD_DATAUNDERRUN];
559                         } else {
560                                 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
561                                               PTD_GET_TOGGLE(ptd));
562                                 urbstat = cc_to_error[PTD_DATAUNDERRUN];
563                         }
564                         goto out;
565                 }
566         }
567
568         if (cc != PTD_CC_NOERROR) {
569                 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
570                         urbstat = cc_to_error[cc];
571                         DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
572                             __func__, ep->num_req, ep->nextpid, urbstat, cc,
573                             ep->error_count);
574                 }
575                 goto out;
576         }
577
578         switch (ep->nextpid) {
579         case USB_PID_OUT:
580                 if (PTD_GET_COUNT(ptd) != ep->length)
581                         pr_err("%s: count=%d len=%d\n", __func__,
582                            PTD_GET_COUNT(ptd), ep->length);
583                 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
584                 urb->actual_length += ep->length;
585                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
586                 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
587                 if (urb->actual_length == urb->transfer_buffer_length) {
588                         DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
589                             ep->num_req, len, ep->maxpacket, urbstat);
590                         if (usb_pipecontrol(urb->pipe)) {
591                                 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
592                                     ep->num_req,
593                                     usb_pipein(urb->pipe) ? "IN" : "OUT");
594                                 ep->nextpid = USB_PID_ACK;
595                         } else {
596                                 if (len % ep->maxpacket ||
597                                     !(urb->transfer_flags & URB_ZERO_PACKET)) {
598                                         urbstat = 0;
599                                         DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
600                                             __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
601                                             urbstat, len, ep->maxpacket, urb->actual_length);
602                                 }
603                         }
604                 }
605                 break;
606         case USB_PID_IN:
607                 len = PTD_GET_COUNT(ptd);
608                 BUG_ON(len > ep->length);
609                 urb->actual_length += len;
610                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
611                 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
612                 /* if transfer completed or (allowed) data underrun */
613                 if ((urb->transfer_buffer_length == urb->actual_length) ||
614                     len % ep->maxpacket) {
615                         DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
616                             ep->num_req, len, ep->maxpacket, urbstat);
617                         if (usb_pipecontrol(urb->pipe)) {
618                                 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
619                                     ep->num_req,
620                                     usb_pipein(urb->pipe) ? "IN" : "OUT");
621                                 ep->nextpid = USB_PID_ACK;
622                         } else {
623                                 urbstat = 0;
624                                 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
625                                     __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
626                                     urbstat, len, ep->maxpacket, urb->actual_length);
627                         }
628                 }
629                 break;
630         case USB_PID_SETUP:
631                 if (urb->transfer_buffer_length == urb->actual_length) {
632                         ep->nextpid = USB_PID_ACK;
633                 } else if (usb_pipeout(urb->pipe)) {
634                         usb_settoggle(udev, 0, 1, 1);
635                         ep->nextpid = USB_PID_OUT;
636                 } else {
637                         usb_settoggle(udev, 0, 0, 1);
638                         ep->nextpid = USB_PID_IN;
639                 }
640                 break;
641         case USB_PID_ACK:
642                 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
643                     urbstat);
644                 WARN_ON(urbstat != -EINPROGRESS);
645                 urbstat = 0;
646                 ep->nextpid = 0;
647                 break;
648         default:
649                 BUG_ON(1);
650         }
651
652  out:
653         if (urbstat != -EINPROGRESS) {
654                 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
655                     ep, ep->num_req, urb, urbstat);
656                 finish_request(isp1362_hcd, ep, urb, urbstat);
657         }
658 }
659
660 static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
661 {
662         struct isp1362_ep *ep;
663         struct isp1362_ep *tmp;
664
665         list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
666                 struct isp1362_ep_queue *epq =
667                         get_ptd_queue(isp1362_hcd, ep->ptd_offset);
668                 int index = ep->ptd_index;
669
670                 BUG_ON(epq == NULL);
671                 if (index >= 0) {
672                         DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
673                         BUG_ON(ep->num_ptds == 0);
674                         release_ptd_buffers(epq, ep);
675                 }
676                 if (!list_empty(&ep->hep->urb_list)) {
677                         struct urb *urb = get_urb(ep);
678
679                         DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
680                             ep->num_req, ep);
681                         finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
682                 }
683                 WARN_ON(list_empty(&ep->active));
684                 if (!list_empty(&ep->active)) {
685                         list_del_init(&ep->active);
686                         DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
687                 }
688                 list_del_init(&ep->remove_list);
689                 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
690         }
691         DBG(1, "%s: Done\n", __func__);
692 }
693
694 static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
695 {
696         if (count > 0) {
697                 if (count < isp1362_hcd->atl_queue.ptd_count)
698                         isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
699                 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
700                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
701                 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
702         } else
703                 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
704 }
705
706 static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
707 {
708         isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
709         isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
710         isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
711 }
712
713 static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
714 {
715         isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
716         isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
717                            HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
718 }
719
720 static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
721                       struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
722 {
723         int index = epq->free_ptd;
724
725         prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
726         index = claim_ptd_buffers(epq, ep, ep->length);
727         if (index == -ENOMEM) {
728                 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
729                     ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
730                 return index;
731         } else if (index == -EOVERFLOW) {
732                 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
733                     __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
734                     epq->buf_map, epq->skip_map);
735                 return index;
736         } else
737                 BUG_ON(index < 0);
738         list_add_tail(&ep->active, &epq->active);
739         DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
740             ep, ep->num_req, ep->length, &epq->active);
741         DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
742             ep->ptd_offset, ep, ep->num_req);
743         isp1362_write_ptd(isp1362_hcd, ep, epq);
744         __clear_bit(ep->ptd_index, &epq->skip_map);
745
746         return 0;
747 }
748
749 static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
750 {
751         int ptd_count = 0;
752         struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
753         struct isp1362_ep *ep;
754         int defer = 0;
755
756         if (atomic_read(&epq->finishing)) {
757                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
758                 return;
759         }
760
761         list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
762                 struct urb *urb = get_urb(ep);
763                 int ret;
764
765                 if (!list_empty(&ep->active)) {
766                         DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
767                         continue;
768                 }
769
770                 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
771                     ep, ep->num_req);
772
773                 ret = submit_req(isp1362_hcd, urb, ep, epq);
774                 if (ret == -ENOMEM) {
775                         defer = 1;
776                         break;
777                 } else if (ret == -EOVERFLOW) {
778                         defer = 1;
779                         continue;
780                 }
781 #ifdef BUGGY_PXA2XX_UDC_USBTEST
782                 defer = ep->nextpid == USB_PID_SETUP;
783 #endif
784                 ptd_count++;
785         }
786
787         /* Avoid starving of endpoints */
788         if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
789                 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
790                 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
791         }
792         if (ptd_count || defer)
793                 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
794
795         epq->ptd_count += ptd_count;
796         if (epq->ptd_count > epq->stat_maxptds) {
797                 epq->stat_maxptds = epq->ptd_count;
798                 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
799         }
800 }
801
802 static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
803 {
804         int ptd_count = 0;
805         struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
806         struct isp1362_ep *ep;
807
808         if (atomic_read(&epq->finishing)) {
809                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
810                 return;
811         }
812
813         list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
814                 struct urb *urb = get_urb(ep);
815                 int ret;
816
817                 if (!list_empty(&ep->active)) {
818                         DBG(1, "%s: Skipping active %s ep %p\n", __func__,
819                             epq->name, ep);
820                         continue;
821                 }
822
823                 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
824                     epq->name, ep, ep->num_req);
825                 ret = submit_req(isp1362_hcd, urb, ep, epq);
826                 if (ret == -ENOMEM)
827                         break;
828                 else if (ret == -EOVERFLOW)
829                         continue;
830                 ptd_count++;
831         }
832
833         if (ptd_count) {
834                 static int last_count;
835
836                 if (ptd_count != last_count) {
837                         DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
838                         last_count = ptd_count;
839                 }
840                 enable_intl_transfers(isp1362_hcd);
841         }
842
843         epq->ptd_count += ptd_count;
844         if (epq->ptd_count > epq->stat_maxptds)
845                 epq->stat_maxptds = epq->ptd_count;
846 }
847
848 static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
849 {
850         u16 ptd_offset = ep->ptd_offset;
851         int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
852
853         DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
854             ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
855
856         ptd_offset += num_ptds * epq->blk_size;
857         if (ptd_offset < epq->buf_start + epq->buf_size)
858                 return ptd_offset;
859         else
860                 return -ENOMEM;
861 }
862
863 static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
864 {
865         int ptd_count = 0;
866         int flip = isp1362_hcd->istl_flip;
867         struct isp1362_ep_queue *epq;
868         int ptd_offset;
869         struct isp1362_ep *ep;
870         struct isp1362_ep *tmp;
871         u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
872
873  fill2:
874         epq = &isp1362_hcd->istl_queue[flip];
875         if (atomic_read(&epq->finishing)) {
876                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
877                 return;
878         }
879
880         if (!list_empty(&epq->active))
881                 return;
882
883         ptd_offset = epq->buf_start;
884         list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
885                 struct urb *urb = get_urb(ep);
886                 s16 diff = fno - (u16)urb->start_frame;
887
888                 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
889
890                 if (diff > urb->number_of_packets) {
891                         /* time frame for this URB has elapsed */
892                         finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
893                         continue;
894                 } else if (diff < -1) {
895                         /* URB is not due in this frame or the next one.
896                          * Comparing with '-1' instead of '0' accounts for double
897                          * buffering in the ISP1362 which enables us to queue the PTD
898                          * one frame ahead of time
899                          */
900                 } else if (diff == -1) {
901                         /* submit PTD's that are due in the next frame */
902                         prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
903                         if (ptd_offset + PTD_HEADER_SIZE + ep->length >
904                             epq->buf_start + epq->buf_size) {
905                                 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
906                                     __func__, ep->length);
907                                 continue;
908                         }
909                         ep->ptd_offset = ptd_offset;
910                         list_add_tail(&ep->active, &epq->active);
911
912                         ptd_offset = next_ptd(epq, ep);
913                         if (ptd_offset < 0) {
914                                 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
915                                      ep->num_req, epq->name);
916                                 break;
917                         }
918                 }
919         }
920         list_for_each_entry(ep, &epq->active, active) {
921                 if (epq->active.next == &ep->active)
922                         ep->ptd.mps |= PTD_LAST_MSK;
923                 isp1362_write_ptd(isp1362_hcd, ep, epq);
924                 ptd_count++;
925         }
926
927         if (ptd_count)
928                 enable_istl_transfers(isp1362_hcd, flip);
929
930         epq->ptd_count += ptd_count;
931         if (epq->ptd_count > epq->stat_maxptds)
932                 epq->stat_maxptds = epq->ptd_count;
933
934         /* check, whether the second ISTL buffer may also be filled */
935         if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
936               (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
937                 fno++;
938                 ptd_count = 0;
939                 flip = 1 - flip;
940                 goto fill2;
941         }
942 }
943
944 static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
945                              struct isp1362_ep_queue *epq)
946 {
947         struct isp1362_ep *ep;
948         struct isp1362_ep *tmp;
949
950         if (list_empty(&epq->active)) {
951                 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
952                 return;
953         }
954
955         DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
956
957         atomic_inc(&epq->finishing);
958         list_for_each_entry_safe(ep, tmp, &epq->active, active) {
959                 int index = ep->ptd_index;
960
961                 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
962                     index, ep->ptd_offset);
963
964                 BUG_ON(index < 0);
965                 if (__test_and_clear_bit(index, &done_map)) {
966                         isp1362_read_ptd(isp1362_hcd, ep, epq);
967                         epq->free_ptd = index;
968                         BUG_ON(ep->num_ptds == 0);
969                         release_ptd_buffers(epq, ep);
970
971                         DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
972                             ep, ep->num_req);
973                         if (!list_empty(&ep->remove_list)) {
974                                 list_del_init(&ep->remove_list);
975                                 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
976                         }
977                         DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
978                             ep, ep->num_req);
979                         postproc_ep(isp1362_hcd, ep);
980                 }
981                 if (!done_map)
982                         break;
983         }
984         if (done_map)
985                 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
986                      epq->skip_map);
987         atomic_dec(&epq->finishing);
988 }
989
990 static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
991 {
992         struct isp1362_ep *ep;
993         struct isp1362_ep *tmp;
994
995         if (list_empty(&epq->active)) {
996                 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
997                 return;
998         }
999
1000         DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
1001
1002         atomic_inc(&epq->finishing);
1003         list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1004                 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1005
1006                 isp1362_read_ptd(isp1362_hcd, ep, epq);
1007                 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1008                 postproc_ep(isp1362_hcd, ep);
1009         }
1010         WARN_ON(epq->blk_size != 0);
1011         atomic_dec(&epq->finishing);
1012 }
1013
1014 static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1015 {
1016         int handled = 0;
1017         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1018         u16 irqstat;
1019         u16 svc_mask;
1020
1021         spin_lock(&isp1362_hcd->lock);
1022
1023         BUG_ON(isp1362_hcd->irq_active++);
1024
1025         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1026
1027         irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1028         DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1029
1030         /* only handle interrupts that are currently enabled */
1031         irqstat &= isp1362_hcd->irqenb;
1032         isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1033         svc_mask = irqstat;
1034
1035         if (irqstat & HCuPINT_SOF) {
1036                 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1037                 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1038                 handled = 1;
1039                 svc_mask &= ~HCuPINT_SOF;
1040                 DBG(3, "%s: SOF\n", __func__);
1041                 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1042                 if (!list_empty(&isp1362_hcd->remove_list))
1043                         finish_unlinks(isp1362_hcd);
1044                 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1045                         if (list_empty(&isp1362_hcd->atl_queue.active)) {
1046                                 start_atl_transfers(isp1362_hcd);
1047                         } else {
1048                                 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1049                                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1050                                                     isp1362_hcd->atl_queue.skip_map);
1051                                 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1052                         }
1053                 }
1054         }
1055
1056         if (irqstat & HCuPINT_ISTL0) {
1057                 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1058                 handled = 1;
1059                 svc_mask &= ~HCuPINT_ISTL0;
1060                 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1061                 DBG(1, "%s: ISTL0\n", __func__);
1062                 WARN_ON((int)!!isp1362_hcd->istl_flip);
1063                 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1064                         HCBUFSTAT_ISTL0_ACTIVE);
1065                 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1066                         HCBUFSTAT_ISTL0_DONE));
1067                 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1068         }
1069
1070         if (irqstat & HCuPINT_ISTL1) {
1071                 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1072                 handled = 1;
1073                 svc_mask &= ~HCuPINT_ISTL1;
1074                 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1075                 DBG(1, "%s: ISTL1\n", __func__);
1076                 WARN_ON(!(int)isp1362_hcd->istl_flip);
1077                 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1078                         HCBUFSTAT_ISTL1_ACTIVE);
1079                 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1080                         HCBUFSTAT_ISTL1_DONE));
1081                 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1082         }
1083
1084         if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1085                 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1086                         (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1087                 finish_iso_transfers(isp1362_hcd,
1088                                      &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1089                 start_iso_transfers(isp1362_hcd);
1090                 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1091         }
1092
1093         if (irqstat & HCuPINT_INTL) {
1094                 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1095                 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1096                 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1097
1098                 DBG(2, "%s: INTL\n", __func__);
1099
1100                 svc_mask &= ~HCuPINT_INTL;
1101
1102                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1103                 if (~(done_map | skip_map) == 0)
1104                         /* All PTDs are finished, disable INTL processing entirely */
1105                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1106
1107                 handled = 1;
1108                 WARN_ON(!done_map);
1109                 if (done_map) {
1110                         DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1111                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1112                         start_intl_transfers(isp1362_hcd);
1113                 }
1114         }
1115
1116         if (irqstat & HCuPINT_ATL) {
1117                 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1118                 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1119                 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1120
1121                 DBG(2, "%s: ATL\n", __func__);
1122
1123                 svc_mask &= ~HCuPINT_ATL;
1124
1125                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1126                 if (~(done_map | skip_map) == 0)
1127                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1128                 if (done_map) {
1129                         DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1130                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1131                         start_atl_transfers(isp1362_hcd);
1132                 }
1133                 handled = 1;
1134         }
1135
1136         if (irqstat & HCuPINT_OPR) {
1137                 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1138                 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1139
1140                 svc_mask &= ~HCuPINT_OPR;
1141                 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1142                 intstat &= isp1362_hcd->intenb;
1143                 if (intstat & OHCI_INTR_UE) {
1144                         pr_err("Unrecoverable error\n");
1145                         /* FIXME: do here reset or cleanup or whatever */
1146                 }
1147                 if (intstat & OHCI_INTR_RHSC) {
1148                         isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1149                         isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1150                         isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1151                 }
1152                 if (intstat & OHCI_INTR_RD) {
1153                         pr_info("%s: RESUME DETECTED\n", __func__);
1154                         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1155                         usb_hcd_resume_root_hub(hcd);
1156                 }
1157                 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1158                 irqstat &= ~HCuPINT_OPR;
1159                 handled = 1;
1160         }
1161
1162         if (irqstat & HCuPINT_SUSP) {
1163                 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1164                 handled = 1;
1165                 svc_mask &= ~HCuPINT_SUSP;
1166
1167                 pr_info("%s: SUSPEND IRQ\n", __func__);
1168         }
1169
1170         if (irqstat & HCuPINT_CLKRDY) {
1171                 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1172                 handled = 1;
1173                 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1174                 svc_mask &= ~HCuPINT_CLKRDY;
1175                 pr_info("%s: CLKRDY IRQ\n", __func__);
1176         }
1177
1178         if (svc_mask)
1179                 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1180
1181         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1182         isp1362_hcd->irq_active--;
1183         spin_unlock(&isp1362_hcd->lock);
1184
1185         return IRQ_RETVAL(handled);
1186 }
1187
1188 /*-------------------------------------------------------------------------*/
1189
1190 #define MAX_PERIODIC_LOAD       900     /* out of 1000 usec */
1191 static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1192 {
1193         int i, branch = -ENOSPC;
1194
1195         /* search for the least loaded schedule branch of that interval
1196          * which has enough bandwidth left unreserved.
1197          */
1198         for (i = 0; i < interval; i++) {
1199                 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1200                         int j;
1201
1202                         for (j = i; j < PERIODIC_SIZE; j += interval) {
1203                                 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1204                                         pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1205                                             load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1206                                         break;
1207                                 }
1208                         }
1209                         if (j < PERIODIC_SIZE)
1210                                 continue;
1211                         branch = i;
1212                 }
1213         }
1214         return branch;
1215 }
1216
1217 /* NB! ALL the code above this point runs with isp1362_hcd->lock
1218    held, irqs off
1219 */
1220
1221 /*-------------------------------------------------------------------------*/
1222
1223 static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1224                                struct urb *urb,
1225                                gfp_t mem_flags)
1226 {
1227         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1228         struct usb_device *udev = urb->dev;
1229         unsigned int pipe = urb->pipe;
1230         int is_out = !usb_pipein(pipe);
1231         int type = usb_pipetype(pipe);
1232         int epnum = usb_pipeendpoint(pipe);
1233         struct usb_host_endpoint *hep = urb->ep;
1234         struct isp1362_ep *ep = NULL;
1235         unsigned long flags;
1236         int retval = 0;
1237
1238         DBG(3, "%s: urb %p\n", __func__, urb);
1239
1240         if (type == PIPE_ISOCHRONOUS) {
1241                 pr_err("Isochronous transfers not supported\n");
1242                 return -ENOSPC;
1243         }
1244
1245         URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1246                 usb_pipedevice(pipe), epnum,
1247                 is_out ? "out" : "in",
1248                 usb_pipecontrol(pipe) ? "ctrl" :
1249                         usb_pipeint(pipe) ? "int" :
1250                         usb_pipebulk(pipe) ? "bulk" :
1251                         "iso",
1252                 urb->transfer_buffer_length,
1253                 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1254                 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1255                 "short_ok" : "");
1256
1257         /* avoid all allocations within spinlocks: request or endpoint */
1258         if (!hep->hcpriv) {
1259                 ep = kzalloc(sizeof *ep, mem_flags);
1260                 if (!ep)
1261                         return -ENOMEM;
1262         }
1263         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1264
1265         /* don't submit to a dead or disabled port */
1266         if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1267               USB_PORT_STAT_ENABLE) ||
1268             !HC_IS_RUNNING(hcd->state)) {
1269                 kfree(ep);
1270                 retval = -ENODEV;
1271                 goto fail_not_linked;
1272         }
1273
1274         retval = usb_hcd_link_urb_to_ep(hcd, urb);
1275         if (retval) {
1276                 kfree(ep);
1277                 goto fail_not_linked;
1278         }
1279
1280         if (hep->hcpriv) {
1281                 ep = hep->hcpriv;
1282         } else {
1283                 INIT_LIST_HEAD(&ep->schedule);
1284                 INIT_LIST_HEAD(&ep->active);
1285                 INIT_LIST_HEAD(&ep->remove_list);
1286                 ep->udev = usb_get_dev(udev);
1287                 ep->hep = hep;
1288                 ep->epnum = epnum;
1289                 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1290                 ep->ptd_offset = -EINVAL;
1291                 ep->ptd_index = -EINVAL;
1292                 usb_settoggle(udev, epnum, is_out, 0);
1293
1294                 if (type == PIPE_CONTROL)
1295                         ep->nextpid = USB_PID_SETUP;
1296                 else if (is_out)
1297                         ep->nextpid = USB_PID_OUT;
1298                 else
1299                         ep->nextpid = USB_PID_IN;
1300
1301                 switch (type) {
1302                 case PIPE_ISOCHRONOUS:
1303                 case PIPE_INTERRUPT:
1304                         if (urb->interval > PERIODIC_SIZE)
1305                                 urb->interval = PERIODIC_SIZE;
1306                         ep->interval = urb->interval;
1307                         ep->branch = PERIODIC_SIZE;
1308                         ep->load = usb_calc_bus_time(udev->speed, !is_out,
1309                                                      (type == PIPE_ISOCHRONOUS),
1310                                                      usb_maxpacket(udev, pipe, is_out)) / 1000;
1311                         break;
1312                 }
1313                 hep->hcpriv = ep;
1314         }
1315         ep->num_req = isp1362_hcd->req_serial++;
1316
1317         /* maybe put endpoint into schedule */
1318         switch (type) {
1319         case PIPE_CONTROL:
1320         case PIPE_BULK:
1321                 if (list_empty(&ep->schedule)) {
1322                         DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1323                                 __func__, ep, ep->num_req);
1324                         list_add_tail(&ep->schedule, &isp1362_hcd->async);
1325                 }
1326                 break;
1327         case PIPE_ISOCHRONOUS:
1328         case PIPE_INTERRUPT:
1329                 urb->interval = ep->interval;
1330
1331                 /* urb submitted for already existing EP */
1332                 if (ep->branch < PERIODIC_SIZE)
1333                         break;
1334
1335                 retval = balance(isp1362_hcd, ep->interval, ep->load);
1336                 if (retval < 0) {
1337                         pr_err("%s: balance returned %d\n", __func__, retval);
1338                         goto fail;
1339                 }
1340                 ep->branch = retval;
1341                 retval = 0;
1342                 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1343                 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1344                     __func__, isp1362_hcd->fmindex, ep->branch,
1345                     ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1346                      ~(PERIODIC_SIZE - 1)) + ep->branch,
1347                     (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1348
1349                 if (list_empty(&ep->schedule)) {
1350                         if (type == PIPE_ISOCHRONOUS) {
1351                                 u16 frame = isp1362_hcd->fmindex;
1352
1353                                 frame += max_t(u16, 8, ep->interval);
1354                                 frame &= ~(ep->interval - 1);
1355                                 frame |= ep->branch;
1356                                 if (frame_before(frame, isp1362_hcd->fmindex))
1357                                         frame += ep->interval;
1358                                 urb->start_frame = frame;
1359
1360                                 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1361                                 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1362                         } else {
1363                                 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1364                                 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1365                         }
1366                 } else
1367                         DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1368
1369                 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1370                     ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1371                     isp1362_hcd->load[ep->branch] + ep->load);
1372                 isp1362_hcd->load[ep->branch] += ep->load;
1373         }
1374
1375         urb->hcpriv = hep;
1376         ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1377
1378         switch (type) {
1379         case PIPE_CONTROL:
1380         case PIPE_BULK:
1381                 start_atl_transfers(isp1362_hcd);
1382                 break;
1383         case PIPE_INTERRUPT:
1384                 start_intl_transfers(isp1362_hcd);
1385                 break;
1386         case PIPE_ISOCHRONOUS:
1387                 start_iso_transfers(isp1362_hcd);
1388                 break;
1389         default:
1390                 BUG();
1391         }
1392  fail:
1393         if (retval)
1394                 usb_hcd_unlink_urb_from_ep(hcd, urb);
1395
1396
1397  fail_not_linked:
1398         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1399         if (retval)
1400                 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1401         return retval;
1402 }
1403
1404 static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1405 {
1406         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1407         struct usb_host_endpoint *hep;
1408         unsigned long flags;
1409         struct isp1362_ep *ep;
1410         int retval = 0;
1411
1412         DBG(3, "%s: urb %p\n", __func__, urb);
1413
1414         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1415         retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1416         if (retval)
1417                 goto done;
1418
1419         hep = urb->hcpriv;
1420
1421         if (!hep) {
1422                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1423                 return -EIDRM;
1424         }
1425
1426         ep = hep->hcpriv;
1427         if (ep) {
1428                 /* In front of queue? */
1429                 if (ep->hep->urb_list.next == &urb->urb_list) {
1430                         if (!list_empty(&ep->active)) {
1431                                 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1432                                     urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1433                                 /* disable processing and queue PTD for removal */
1434                                 remove_ptd(isp1362_hcd, ep);
1435                                 urb = NULL;
1436                         }
1437                 }
1438                 if (urb) {
1439                         DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1440                             ep->num_req);
1441                         finish_request(isp1362_hcd, ep, urb, status);
1442                 } else
1443                         DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1444         } else {
1445                 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1446                 retval = -EINVAL;
1447         }
1448 done:
1449         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1450
1451         DBG(3, "%s: exit\n", __func__);
1452
1453         return retval;
1454 }
1455
1456 static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1457 {
1458         struct isp1362_ep *ep = hep->hcpriv;
1459         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1460         unsigned long flags;
1461
1462         DBG(1, "%s: ep %p\n", __func__, ep);
1463         if (!ep)
1464                 return;
1465         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1466         if (!list_empty(&hep->urb_list)) {
1467                 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1468                         DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1469                             ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1470                         remove_ptd(isp1362_hcd, ep);
1471                         pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1472                 }
1473         }
1474         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1475         /* Wait for interrupt to clear out active list */
1476         while (!list_empty(&ep->active))
1477                 msleep(1);
1478
1479         DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1480
1481         usb_put_dev(ep->udev);
1482         kfree(ep);
1483         hep->hcpriv = NULL;
1484 }
1485
1486 static int isp1362_get_frame(struct usb_hcd *hcd)
1487 {
1488         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1489         u32 fmnum;
1490         unsigned long flags;
1491
1492         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1493         fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1494         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1495
1496         return (int)fmnum;
1497 }
1498
1499 /*-------------------------------------------------------------------------*/
1500
1501 /* Adapted from ohci-hub.c */
1502 static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1503 {
1504         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1505         int ports, i, changed = 0;
1506         unsigned long flags;
1507
1508         if (!HC_IS_RUNNING(hcd->state))
1509                 return -ESHUTDOWN;
1510
1511         /* Report no status change now, if we are scheduled to be
1512            called later */
1513         if (timer_pending(&hcd->rh_timer))
1514                 return 0;
1515
1516         ports = isp1362_hcd->rhdesca & RH_A_NDP;
1517         BUG_ON(ports > 2);
1518
1519         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1520         /* init status */
1521         if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1522                 buf[0] = changed = 1;
1523         else
1524                 buf[0] = 0;
1525
1526         for (i = 0; i < ports; i++) {
1527                 u32 status = isp1362_hcd->rhport[i];
1528
1529                 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1530                               RH_PS_OCIC | RH_PS_PRSC)) {
1531                         changed = 1;
1532                         buf[0] |= 1 << (i + 1);
1533                         continue;
1534                 }
1535
1536                 if (!(status & RH_PS_CCS))
1537                         continue;
1538         }
1539         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1540         return changed;
1541 }
1542
1543 static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1544                                    struct usb_hub_descriptor *desc)
1545 {
1546         u32 reg = isp1362_hcd->rhdesca;
1547
1548         DBG(3, "%s: enter\n", __func__);
1549
1550         desc->bDescriptorType = 0x29;
1551         desc->bDescLength = 9;
1552         desc->bHubContrCurrent = 0;
1553         desc->bNbrPorts = reg & 0x3;
1554         /* Power switching, device type, overcurrent. */
1555         desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1556         DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1557         desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1558         /* two bitmaps:  ports removable, and legacy PortPwrCtrlMask */
1559         desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1560         desc->bitmap[1] = ~0;
1561
1562         DBG(3, "%s: exit\n", __func__);
1563 }
1564
1565 /* Adapted from ohci-hub.c */
1566 static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1567                                u16 wIndex, char *buf, u16 wLength)
1568 {
1569         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1570         int retval = 0;
1571         unsigned long flags;
1572         unsigned long t1;
1573         int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1574         u32 tmp = 0;
1575
1576         switch (typeReq) {
1577         case ClearHubFeature:
1578                 DBG(0, "ClearHubFeature: ");
1579                 switch (wValue) {
1580                 case C_HUB_OVER_CURRENT:
1581                         _DBG(0, "C_HUB_OVER_CURRENT\n");
1582                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1583                         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1584                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1585                 case C_HUB_LOCAL_POWER:
1586                         _DBG(0, "C_HUB_LOCAL_POWER\n");
1587                         break;
1588                 default:
1589                         goto error;
1590                 }
1591                 break;
1592         case SetHubFeature:
1593                 DBG(0, "SetHubFeature: ");
1594                 switch (wValue) {
1595                 case C_HUB_OVER_CURRENT:
1596                 case C_HUB_LOCAL_POWER:
1597                         _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1598                         break;
1599                 default:
1600                         goto error;
1601                 }
1602                 break;
1603         case GetHubDescriptor:
1604                 DBG(0, "GetHubDescriptor\n");
1605                 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1606                 break;
1607         case GetHubStatus:
1608                 DBG(0, "GetHubStatus\n");
1609                 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1610                 break;
1611         case GetPortStatus:
1612 #ifndef VERBOSE
1613                 DBG(0, "GetPortStatus\n");
1614 #endif
1615                 if (!wIndex || wIndex > ports)
1616                         goto error;
1617                 tmp = isp1362_hcd->rhport[--wIndex];
1618                 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1619                 break;
1620         case ClearPortFeature:
1621                 DBG(0, "ClearPortFeature: ");
1622                 if (!wIndex || wIndex > ports)
1623                         goto error;
1624                 wIndex--;
1625
1626                 switch (wValue) {
1627                 case USB_PORT_FEAT_ENABLE:
1628                         _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1629                         tmp = RH_PS_CCS;
1630                         break;
1631                 case USB_PORT_FEAT_C_ENABLE:
1632                         _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1633                         tmp = RH_PS_PESC;
1634                         break;
1635                 case USB_PORT_FEAT_SUSPEND:
1636                         _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1637                         tmp = RH_PS_POCI;
1638                         break;
1639                 case USB_PORT_FEAT_C_SUSPEND:
1640                         _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1641                         tmp = RH_PS_PSSC;
1642                         break;
1643                 case USB_PORT_FEAT_POWER:
1644                         _DBG(0, "USB_PORT_FEAT_POWER\n");
1645                         tmp = RH_PS_LSDA;
1646
1647                         break;
1648                 case USB_PORT_FEAT_C_CONNECTION:
1649                         _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1650                         tmp = RH_PS_CSC;
1651                         break;
1652                 case USB_PORT_FEAT_C_OVER_CURRENT:
1653                         _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1654                         tmp = RH_PS_OCIC;
1655                         break;
1656                 case USB_PORT_FEAT_C_RESET:
1657                         _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1658                         tmp = RH_PS_PRSC;
1659                         break;
1660                 default:
1661                         goto error;
1662                 }
1663
1664                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1665                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1666                 isp1362_hcd->rhport[wIndex] =
1667                         isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1668                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1669                 break;
1670         case SetPortFeature:
1671                 DBG(0, "SetPortFeature: ");
1672                 if (!wIndex || wIndex > ports)
1673                         goto error;
1674                 wIndex--;
1675                 switch (wValue) {
1676                 case USB_PORT_FEAT_SUSPEND:
1677                         _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1678                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1679                         isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1680                         isp1362_hcd->rhport[wIndex] =
1681                                 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1682                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1683                         break;
1684                 case USB_PORT_FEAT_POWER:
1685                         _DBG(0, "USB_PORT_FEAT_POWER\n");
1686                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1687                         isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1688                         isp1362_hcd->rhport[wIndex] =
1689                                 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1690                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1691                         break;
1692                 case USB_PORT_FEAT_RESET:
1693                         _DBG(0, "USB_PORT_FEAT_RESET\n");
1694                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1695
1696                         t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1697                         while (time_before(jiffies, t1)) {
1698                                 /* spin until any current reset finishes */
1699                                 for (;;) {
1700                                         tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1701                                         if (!(tmp & RH_PS_PRS))
1702                                                 break;
1703                                         udelay(500);
1704                                 }
1705                                 if (!(tmp & RH_PS_CCS))
1706                                         break;
1707                                 /* Reset lasts 10ms (claims datasheet) */
1708                                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1709
1710                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1711                                 msleep(10);
1712                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1713                         }
1714
1715                         isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1716                                                                          HCRHPORT1 + wIndex);
1717                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1718                         break;
1719                 default:
1720                         goto error;
1721                 }
1722                 break;
1723
1724         default:
1725  error:
1726                 /* "protocol stall" on error */
1727                 _DBG(0, "PROTOCOL STALL\n");
1728                 retval = -EPIPE;
1729         }
1730
1731         return retval;
1732 }
1733
1734 #ifdef  CONFIG_PM
1735 static int isp1362_bus_suspend(struct usb_hcd *hcd)
1736 {
1737         int status = 0;
1738         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1739         unsigned long flags;
1740
1741         if (time_before(jiffies, isp1362_hcd->next_statechange))
1742                 msleep(5);
1743
1744         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1745
1746         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1747         switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1748         case OHCI_USB_RESUME:
1749                 DBG(0, "%s: resume/suspend?\n", __func__);
1750                 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1751                 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1752                 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1753                 /* FALL THROUGH */
1754         case OHCI_USB_RESET:
1755                 status = -EBUSY;
1756                 pr_warning("%s: needs reinit!\n", __func__);
1757                 goto done;
1758         case OHCI_USB_SUSPEND:
1759                 pr_warning("%s: already suspended?\n", __func__);
1760                 goto done;
1761         }
1762         DBG(0, "%s: suspend root hub\n", __func__);
1763
1764         /* First stop any processing */
1765         hcd->state = HC_STATE_QUIESCING;
1766         if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1767             !list_empty(&isp1362_hcd->intl_queue.active) ||
1768             !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1769             !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1770                 int limit;
1771
1772                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1773                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1774                 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1775                 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1776                 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1777
1778                 DBG(0, "%s: stopping schedules ...\n", __func__);
1779                 limit = 2000;
1780                 while (limit > 0) {
1781                         udelay(250);
1782                         limit -= 250;
1783                         if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1784                                 break;
1785                 }
1786                 mdelay(7);
1787                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1788                         u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1789                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1790                 }
1791                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1792                         u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1793                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1794                 }
1795                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1796                         finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1797                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1798                         finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1799         }
1800         DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1801                     isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1802         isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1803                             isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1804
1805         /* Suspend hub */
1806         isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1807         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1808         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1809         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1810
1811 #if 1
1812         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1813         if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1814                 pr_err("%s: controller won't suspend %08x\n", __func__,
1815                     isp1362_hcd->hc_control);
1816                 status = -EBUSY;
1817         } else
1818 #endif
1819         {
1820                 /* no resumes until devices finish suspending */
1821                 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1822         }
1823 done:
1824         if (status == 0) {
1825                 hcd->state = HC_STATE_SUSPENDED;
1826                 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1827                     isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1828         }
1829         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1830         return status;
1831 }
1832
1833 static int isp1362_bus_resume(struct usb_hcd *hcd)
1834 {
1835         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1836         u32 port;
1837         unsigned long flags;
1838         int status = -EINPROGRESS;
1839
1840         if (time_before(jiffies, isp1362_hcd->next_statechange))
1841                 msleep(5);
1842
1843         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1844         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1845         pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1846         if (hcd->state == HC_STATE_RESUMING) {
1847                 pr_warning("%s: duplicate resume\n", __func__);
1848                 status = 0;
1849         } else
1850                 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1851                 case OHCI_USB_SUSPEND:
1852                         DBG(0, "%s: resume root hub\n", __func__);
1853                         isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1854                         isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1855                         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1856                         break;
1857                 case OHCI_USB_RESUME:
1858                         /* HCFS changes sometime after INTR_RD */
1859                         DBG(0, "%s: remote wakeup\n", __func__);
1860                         break;
1861                 case OHCI_USB_OPER:
1862                         DBG(0, "%s: odd resume\n", __func__);
1863                         status = 0;
1864                         hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1865                         break;
1866                 default:                /* RESET, we lost power */
1867                         DBG(0, "%s: root hub hardware reset\n", __func__);
1868                         status = -EBUSY;
1869                 }
1870         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1871         if (status == -EBUSY) {
1872                 DBG(0, "%s: Restarting HC\n", __func__);
1873                 isp1362_hc_stop(hcd);
1874                 return isp1362_hc_start(hcd);
1875         }
1876         if (status != -EINPROGRESS)
1877                 return status;
1878         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1879         port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1880         while (port--) {
1881                 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1882
1883                 /* force global, not selective, resume */
1884                 if (!(stat & RH_PS_PSS)) {
1885                         DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1886                         continue;
1887                 }
1888                 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1889                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1890         }
1891         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1892
1893         /* Some controllers (lucent) need extra-long delays */
1894         hcd->state = HC_STATE_RESUMING;
1895         mdelay(20 /* usb 11.5.1.10 */ + 15);
1896
1897         isp1362_hcd->hc_control = OHCI_USB_OPER;
1898         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1899         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1900         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1901         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1902         /* TRSMRCY */
1903         msleep(10);
1904
1905         /* keep it alive for ~5x suspend + resume costs */
1906         isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1907
1908         hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1909         hcd->state = HC_STATE_RUNNING;
1910         return 0;
1911 }
1912 #else
1913 #define isp1362_bus_suspend     NULL
1914 #define isp1362_bus_resume      NULL
1915 #endif
1916
1917 /*-------------------------------------------------------------------------*/
1918
1919 #ifdef STUB_DEBUG_FILE
1920
1921 static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1922 {
1923 }
1924 static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1925 {
1926 }
1927
1928 #else
1929
1930 #include <linux/proc_fs.h>
1931 #include <linux/seq_file.h>
1932
1933 static void dump_irq(struct seq_file *s, char *label, u16 mask)
1934 {
1935         seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1936                    mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1937                    mask & HCuPINT_SUSP ? " susp" : "",
1938                    mask & HCuPINT_OPR ? " opr" : "",
1939                    mask & HCuPINT_EOT ? " eot" : "",
1940                    mask & HCuPINT_ATL ? " atl" : "",
1941                    mask & HCuPINT_SOF ? " sof" : "");
1942 }
1943
1944 static void dump_int(struct seq_file *s, char *label, u32 mask)
1945 {
1946         seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1947                    mask & OHCI_INTR_MIE ? " MIE" : "",
1948                    mask & OHCI_INTR_RHSC ? " rhsc" : "",
1949                    mask & OHCI_INTR_FNO ? " fno" : "",
1950                    mask & OHCI_INTR_UE ? " ue" : "",
1951                    mask & OHCI_INTR_RD ? " rd" : "",
1952                    mask & OHCI_INTR_SF ? " sof" : "",
1953                    mask & OHCI_INTR_SO ? " so" : "");
1954 }
1955
1956 static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1957 {
1958         seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1959                    mask & OHCI_CTRL_RWC ? " rwc" : "",
1960                    mask & OHCI_CTRL_RWE ? " rwe" : "",
1961                    ({
1962                            char *hcfs;
1963                            switch (mask & OHCI_CTRL_HCFS) {
1964                            case OHCI_USB_OPER:
1965                                    hcfs = " oper";
1966                                    break;
1967                            case OHCI_USB_RESET:
1968                                    hcfs = " reset";
1969                                    break;
1970                            case OHCI_USB_RESUME:
1971                                    hcfs = " resume";
1972                                    break;
1973                            case OHCI_USB_SUSPEND:
1974                                    hcfs = " suspend";
1975                                    break;
1976                            default:
1977                                    hcfs = " ?";
1978                            }
1979                            hcfs;
1980                    }));
1981 }
1982
1983 static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1984 {
1985         seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1986                    isp1362_read_reg32(isp1362_hcd, HCREVISION));
1987         seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1988                    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1989         seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1990                    isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1991         seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1992                    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1993         seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1994                    isp1362_read_reg32(isp1362_hcd, HCINTENB));
1995         seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1996                    isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1997         seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1998                    isp1362_read_reg32(isp1362_hcd, HCFMREM));
1999         seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
2000                    isp1362_read_reg32(isp1362_hcd, HCFMNUM));
2001         seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
2002                    isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2003         seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2004                    isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2005         seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2006                    isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2007         seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2008                    isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2009         seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2010                    isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2011         seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2012                    isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2013         seq_printf(s, "\n");
2014         seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2015                    isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2016         seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2017                    isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2018         seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2019                    isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2020         seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2021                    isp1362_read_reg16(isp1362_hcd, HCuPINT));
2022         seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2023                    isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2024         seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2025                    isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2026         seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2027                    isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2028         seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2029                    isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2030         seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2031                    isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2032 #if 0
2033         seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2034                    isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2035 #endif
2036         seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2037                    isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2038         seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2039                    isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2040         seq_printf(s, "\n");
2041         seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2042                    isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2043         seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2044                    isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2045         seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2046                    isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2047         seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2048                    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2049         seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2050                    isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2051         seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2052                    isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2053         seq_printf(s, "\n");
2054         seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2055                    isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2056         seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2057                    isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2058 #if 0
2059         seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2060                    isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2061 #endif
2062         seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2063                    isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2064         seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2065                    isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2066         seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2067                    isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2068         seq_printf(s, "\n");
2069         seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2070                    isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2071         seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2072                    isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2073 }
2074
2075 static int proc_isp1362_show(struct seq_file *s, void *unused)
2076 {
2077         struct isp1362_hcd *isp1362_hcd = s->private;
2078         struct isp1362_ep *ep;
2079         int i;
2080
2081         seq_printf(s, "%s\n%s version %s\n",
2082                    isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2083
2084         /* collect statistics to help estimate potential win for
2085          * DMA engines that care about alignment (PXA)
2086          */
2087         seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2088                    isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2089                    isp1362_hcd->stat2, isp1362_hcd->stat1);
2090         seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2091         seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2092         seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2093                    max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2094                        isp1362_hcd->istl_queue[1] .stat_maxptds));
2095
2096         /* FIXME: don't show the following in suspended state */
2097         spin_lock_irq(&isp1362_hcd->lock);
2098
2099         dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2100         dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2101         dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2102         dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2103         dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2104
2105         for (i = 0; i < NUM_ISP1362_IRQS; i++)
2106                 if (isp1362_hcd->irq_stat[i])
2107                         seq_printf(s, "%-15s: %d\n",
2108                                    ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2109
2110         dump_regs(s, isp1362_hcd);
2111         list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2112                 struct urb *urb;
2113
2114                 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2115                            ({
2116                                    char *s;
2117                                    switch (ep->nextpid) {
2118                                    case USB_PID_IN:
2119                                            s = "in";
2120                                            break;
2121                                    case USB_PID_OUT:
2122                                            s = "out";
2123                                            break;
2124                                    case USB_PID_SETUP:
2125                                            s = "setup";
2126                                            break;
2127                                    case USB_PID_ACK:
2128                                            s = "status";
2129                                            break;
2130                                    default:
2131                                            s = "?";
2132                                            break;
2133                                    };
2134                                    s;}), ep->maxpacket) ;
2135                 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2136                         seq_printf(s, "  urb%p, %d/%d\n", urb,
2137                                    urb->actual_length,
2138                                    urb->transfer_buffer_length);
2139                 }
2140         }
2141         if (!list_empty(&isp1362_hcd->async))
2142                 seq_printf(s, "\n");
2143         dump_ptd_queue(&isp1362_hcd->atl_queue);
2144
2145         seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2146
2147         list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2148                 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2149                            isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2150
2151                 seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2152                            ep->interval, ep,
2153                            (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2154                            ep->udev->devnum, ep->epnum,
2155                            (ep->epnum == 0) ? "" :
2156                            ((ep->nextpid == USB_PID_IN) ?
2157                             "in" : "out"), ep->maxpacket);
2158         }
2159         dump_ptd_queue(&isp1362_hcd->intl_queue);
2160
2161         seq_printf(s, "ISO:\n");
2162
2163         list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2164                 seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2165                            ep->interval, ep,
2166                            (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2167                            ep->udev->devnum, ep->epnum,
2168                            (ep->epnum == 0) ? "" :
2169                            ((ep->nextpid == USB_PID_IN) ?
2170                             "in" : "out"), ep->maxpacket);
2171         }
2172
2173         spin_unlock_irq(&isp1362_hcd->lock);
2174         seq_printf(s, "\n");
2175
2176         return 0;
2177 }
2178
2179 static int proc_isp1362_open(struct inode *inode, struct file *file)
2180 {
2181         return single_open(file, proc_isp1362_show, PDE(inode)->data);
2182 }
2183
2184 static const struct file_operations proc_ops = {
2185         .open = proc_isp1362_open,
2186         .read = seq_read,
2187         .llseek = seq_lseek,
2188         .release = single_release,
2189 };
2190
2191 /* expect just one isp1362_hcd per system */
2192 static const char proc_filename[] = "driver/isp1362";
2193
2194 static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2195 {
2196         struct proc_dir_entry *pde;
2197
2198         pde = create_proc_entry(proc_filename, 0, NULL);
2199         if (pde == NULL) {
2200                 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2201                 return;
2202         }
2203
2204         pde->proc_fops = &proc_ops;
2205         pde->data = isp1362_hcd;
2206         isp1362_hcd->pde = pde;
2207 }
2208
2209 static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2210 {
2211         if (isp1362_hcd->pde)
2212                 remove_proc_entry(proc_filename, NULL);
2213 }
2214
2215 #endif
2216
2217 /*-------------------------------------------------------------------------*/
2218
2219 static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2220 {
2221         int tmp = 20;
2222
2223         isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2224         isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2225         while (--tmp) {
2226                 mdelay(1);
2227                 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2228                         break;
2229         }
2230         if (!tmp)
2231                 pr_err("Software reset timeout\n");
2232 }
2233
2234 static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2235 {
2236         unsigned long flags;
2237
2238         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2239         __isp1362_sw_reset(isp1362_hcd);
2240         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2241 }
2242
2243 static int isp1362_mem_config(struct usb_hcd *hcd)
2244 {
2245         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2246         unsigned long flags;
2247         u32 total;
2248         u16 istl_size = ISP1362_ISTL_BUFSIZE;
2249         u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2250         u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2251         u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2252         u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2253         u16 atl_size;
2254         int i;
2255
2256         WARN_ON(istl_size & 3);
2257         WARN_ON(atl_blksize & 3);
2258         WARN_ON(intl_blksize & 3);
2259         WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2260         WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2261
2262         BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2263         if (atl_buffers > 32)
2264                 atl_buffers = 32;
2265         atl_size = atl_buffers * atl_blksize;
2266         total = atl_size + intl_size + istl_size;
2267         dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2268         dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2269                  istl_size / 2, istl_size, 0, istl_size / 2);
2270         dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2271                  ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2272                  intl_size, istl_size);
2273         dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2274                  atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2275                  atl_size, istl_size + intl_size);
2276         dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2277                  ISP1362_BUF_SIZE - total);
2278
2279         if (total > ISP1362_BUF_SIZE) {
2280                 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2281                         __func__, total, ISP1362_BUF_SIZE);
2282                 return -ENOMEM;
2283         }
2284
2285         total = istl_size + intl_size + atl_size;
2286         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2287
2288         for (i = 0; i < 2; i++) {
2289                 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2290                 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2291                 isp1362_hcd->istl_queue[i].blk_size = 4;
2292                 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2293                 snprintf(isp1362_hcd->istl_queue[i].name,
2294                          sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2295                 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2296                      isp1362_hcd->istl_queue[i].name,
2297                      isp1362_hcd->istl_queue[i].buf_start,
2298                      isp1362_hcd->istl_queue[i].buf_size);
2299         }
2300         isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2301
2302         isp1362_hcd->intl_queue.buf_start = istl_size;
2303         isp1362_hcd->intl_queue.buf_size = intl_size;
2304         isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2305         isp1362_hcd->intl_queue.blk_size = intl_blksize;
2306         isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2307         isp1362_hcd->intl_queue.skip_map = ~0;
2308         INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2309
2310         isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2311                             isp1362_hcd->intl_queue.buf_size);
2312         isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2313                             isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2314         isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2315         isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2316                             1 << (ISP1362_INTL_BUFFERS - 1));
2317
2318         isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2319         isp1362_hcd->atl_queue.buf_size = atl_size;
2320         isp1362_hcd->atl_queue.buf_count = atl_buffers;
2321         isp1362_hcd->atl_queue.blk_size = atl_blksize;
2322         isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2323         isp1362_hcd->atl_queue.skip_map = ~0;
2324         INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2325
2326         isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2327                             isp1362_hcd->atl_queue.buf_size);
2328         isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2329                             isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2330         isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2331         isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2332                             1 << (atl_buffers - 1));
2333
2334         snprintf(isp1362_hcd->atl_queue.name,
2335                  sizeof(isp1362_hcd->atl_queue.name), "ATL");
2336         snprintf(isp1362_hcd->intl_queue.name,
2337                  sizeof(isp1362_hcd->intl_queue.name), "INTL");
2338         DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2339              isp1362_hcd->intl_queue.name,
2340              isp1362_hcd->intl_queue.buf_start,
2341              ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2342              isp1362_hcd->intl_queue.buf_size);
2343         DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2344              isp1362_hcd->atl_queue.name,
2345              isp1362_hcd->atl_queue.buf_start,
2346              atl_buffers, isp1362_hcd->atl_queue.blk_size,
2347              isp1362_hcd->atl_queue.buf_size);
2348
2349         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2350
2351         return 0;
2352 }
2353
2354 static int isp1362_hc_reset(struct usb_hcd *hcd)
2355 {
2356         int ret = 0;
2357         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2358         unsigned long t;
2359         unsigned long timeout = 100;
2360         unsigned long flags;
2361         int clkrdy = 0;
2362
2363         pr_info("%s:\n", __func__);
2364
2365         if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2366                 isp1362_hcd->board->reset(hcd->self.controller, 1);
2367                 msleep(20);
2368                 if (isp1362_hcd->board->clock)
2369                         isp1362_hcd->board->clock(hcd->self.controller, 1);
2370                 isp1362_hcd->board->reset(hcd->self.controller, 0);
2371         } else
2372                 isp1362_sw_reset(isp1362_hcd);
2373
2374         /* chip has been reset. First we need to see a clock */
2375         t = jiffies + msecs_to_jiffies(timeout);
2376         while (!clkrdy && time_before_eq(jiffies, t)) {
2377                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2378                 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2379                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2380                 if (!clkrdy)
2381                         msleep(4);
2382         }
2383
2384         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2385         isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2386         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2387         if (!clkrdy) {
2388                 pr_err("Clock not ready after %lums\n", timeout);
2389                 ret = -ENODEV;
2390         }
2391         return ret;
2392 }
2393
2394 static void isp1362_hc_stop(struct usb_hcd *hcd)
2395 {
2396         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2397         unsigned long flags;
2398         u32 tmp;
2399
2400         pr_info("%s:\n", __func__);
2401
2402         del_timer_sync(&hcd->rh_timer);
2403
2404         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2405
2406         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2407
2408         /* Switch off power for all ports */
2409         tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2410         tmp &= ~(RH_A_NPS | RH_A_PSM);
2411         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2412         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2413
2414         /* Reset the chip */
2415         if (isp1362_hcd->board && isp1362_hcd->board->reset)
2416                 isp1362_hcd->board->reset(hcd->self.controller, 1);
2417         else
2418                 __isp1362_sw_reset(isp1362_hcd);
2419
2420         if (isp1362_hcd->board && isp1362_hcd->board->clock)
2421                 isp1362_hcd->board->clock(hcd->self.controller, 0);
2422
2423         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2424 }
2425
2426 #ifdef CHIP_BUFFER_TEST
2427 static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2428 {
2429         int ret = 0;
2430         u16 *ref;
2431         unsigned long flags;
2432
2433         ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2434         if (ref) {
2435                 int offset;
2436                 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2437
2438                 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2439                         ref[offset] = ~offset;
2440                         tst[offset] = offset;
2441                 }
2442
2443                 for (offset = 0; offset < 4; offset++) {
2444                         int j;
2445
2446                         for (j = 0; j < 8; j++) {
2447                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2448                                 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2449                                 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2450                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2451
2452                                 if (memcmp(ref, tst, j)) {
2453                                         ret = -ENODEV;
2454                                         pr_err("%s: memory check with %d byte offset %d failed\n",
2455                                             __func__, j, offset);
2456                                         dump_data((u8 *)ref + offset, j);
2457                                         dump_data((u8 *)tst + offset, j);
2458                                 }
2459                         }
2460                 }
2461
2462                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2463                 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2464                 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2465                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2466
2467                 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2468                         ret = -ENODEV;
2469                         pr_err("%s: memory check failed\n", __func__);
2470                         dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2471                 }
2472
2473                 for (offset = 0; offset < 256; offset++) {
2474                         int test_size = 0;
2475
2476                         yield();
2477
2478                         memset(tst, 0, ISP1362_BUF_SIZE);
2479                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2480                         isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2481                         isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2482                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2483                         if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2484                                    ISP1362_BUF_SIZE / 2)) {
2485                                 pr_err("%s: Failed to clear buffer\n", __func__);
2486                                 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2487                                 break;
2488                         }
2489                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2490                         isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2491                         isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2492                                              offset * 2 + PTD_HEADER_SIZE, test_size);
2493                         isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2494                                             PTD_HEADER_SIZE + test_size);
2495                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2496                         if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2497                                 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2498                                 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2499                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2500                                 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2501                                                     PTD_HEADER_SIZE + test_size);
2502                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2503                                 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2504                                         ret = -ENODEV;
2505                                         pr_err("%s: memory check with offset %02x failed\n",
2506                                             __func__, offset);
2507                                         break;
2508                                 }
2509                                 pr_warning("%s: memory check with offset %02x ok after second read\n",
2510                                      __func__, offset);
2511                         }
2512                 }
2513                 kfree(ref);
2514         }
2515         return ret;
2516 }
2517 #endif
2518
2519 static int isp1362_hc_start(struct usb_hcd *hcd)
2520 {
2521         int ret;
2522         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2523         struct isp1362_platform_data *board = isp1362_hcd->board;
2524         u16 hwcfg;
2525         u16 chipid;
2526         unsigned long flags;
2527
2528         pr_info("%s:\n", __func__);
2529
2530         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2531         chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2532         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2533
2534         if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2535                 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2536                 return -ENODEV;
2537         }
2538
2539 #ifdef CHIP_BUFFER_TEST
2540         ret = isp1362_chip_test(isp1362_hcd);
2541         if (ret)
2542                 return -ENODEV;
2543 #endif
2544         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2545         /* clear interrupt status and disable all interrupt sources */
2546         isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2547         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2548
2549         /* HW conf */
2550         hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2551         if (board->sel15Kres)
2552                 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2553                         ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2554         if (board->clknotstop)
2555                 hwcfg |= HCHWCFG_CLKNOTSTOP;
2556         if (board->oc_enable)
2557                 hwcfg |= HCHWCFG_ANALOG_OC;
2558         if (board->int_act_high)
2559                 hwcfg |= HCHWCFG_INT_POL;
2560         if (board->int_edge_triggered)
2561                 hwcfg |= HCHWCFG_INT_TRIGGER;
2562         if (board->dreq_act_high)
2563                 hwcfg |= HCHWCFG_DREQ_POL;
2564         if (board->dack_act_high)
2565                 hwcfg |= HCHWCFG_DACK_POL;
2566         isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2567         isp1362_show_reg(isp1362_hcd, HCHWCFG);
2568         isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2569         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2570
2571         ret = isp1362_mem_config(hcd);
2572         if (ret)
2573                 return ret;
2574
2575         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2576
2577         /* Root hub conf */
2578         isp1362_hcd->rhdesca = 0;
2579         if (board->no_power_switching)
2580                 isp1362_hcd->rhdesca |= RH_A_NPS;
2581         if (board->power_switching_mode)
2582                 isp1362_hcd->rhdesca |= RH_A_PSM;
2583         if (board->potpg)
2584                 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2585         else
2586                 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2587
2588         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2589         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2590         isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2591
2592         isp1362_hcd->rhdescb = RH_B_PPCM;
2593         isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2594         isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2595
2596         isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2597         isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2598         isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2599
2600         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2601
2602         isp1362_hcd->hc_control = OHCI_USB_OPER;
2603         hcd->state = HC_STATE_RUNNING;
2604
2605         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2606         /* Set up interrupts */
2607         isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2608         isp1362_hcd->intenb |= OHCI_INTR_RD;
2609         isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2610         isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2611         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2612
2613         /* Go operational */
2614         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2615         /* enable global power */
2616         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2617
2618         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2619
2620         return 0;
2621 }
2622
2623 /*-------------------------------------------------------------------------*/
2624
2625 static struct hc_driver isp1362_hc_driver = {
2626         .description =          hcd_name,
2627         .product_desc =         "ISP1362 Host Controller",
2628         .hcd_priv_size =        sizeof(struct isp1362_hcd),
2629
2630         .irq =                  isp1362_irq,
2631         .flags =                HCD_USB11 | HCD_MEMORY,
2632
2633         .reset =                isp1362_hc_reset,
2634         .start =                isp1362_hc_start,
2635         .stop =                 isp1362_hc_stop,
2636
2637         .urb_enqueue =          isp1362_urb_enqueue,
2638         .urb_dequeue =          isp1362_urb_dequeue,
2639         .endpoint_disable =     isp1362_endpoint_disable,
2640
2641         .get_frame_number =     isp1362_get_frame,
2642
2643         .hub_status_data =      isp1362_hub_status_data,
2644         .hub_control =          isp1362_hub_control,
2645         .bus_suspend =          isp1362_bus_suspend,
2646         .bus_resume =           isp1362_bus_resume,
2647 };
2648
2649 /*-------------------------------------------------------------------------*/
2650
2651 static int __devexit isp1362_remove(struct platform_device *pdev)
2652 {
2653         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2654         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2655         struct resource *res;
2656
2657         remove_debug_file(isp1362_hcd);
2658         DBG(0, "%s: Removing HCD\n", __func__);
2659         usb_remove_hcd(hcd);
2660
2661         DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2662             isp1362_hcd->data_reg);
2663         iounmap(isp1362_hcd->data_reg);
2664
2665         DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2666             isp1362_hcd->addr_reg);
2667         iounmap(isp1362_hcd->addr_reg);
2668
2669         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2670         DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2671         if (res)
2672                 release_mem_region(res->start, resource_size(res));
2673
2674         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2675         DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2676         if (res)
2677                 release_mem_region(res->start, resource_size(res));
2678
2679         DBG(0, "%s: put_hcd\n", __func__);
2680         usb_put_hcd(hcd);
2681         DBG(0, "%s: Done\n", __func__);
2682
2683         return 0;
2684 }
2685
2686 static int __devinit isp1362_probe(struct platform_device *pdev)
2687 {
2688         struct usb_hcd *hcd;
2689         struct isp1362_hcd *isp1362_hcd;
2690         struct resource *addr, *data;
2691         void __iomem *addr_reg;
2692         void __iomem *data_reg;
2693         int irq;
2694         int retval = 0;
2695         struct resource *irq_res;
2696         unsigned int irq_flags = 0;
2697
2698         /* basic sanity checks first.  board-specific init logic should
2699          * have initialized this the three resources and probably board
2700          * specific platform_data.  we don't probe for IRQs, and do only
2701          * minimal sanity checking.
2702          */
2703         if (pdev->num_resources < 3) {
2704                 retval = -ENODEV;
2705                 goto err1;
2706         }
2707
2708         data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2709         addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2710         irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2711         if (!addr || !data || !irq_res) {
2712                 retval = -ENODEV;
2713                 goto err1;
2714         }
2715         irq = irq_res->start;
2716
2717         if (pdev->dev.dma_mask) {
2718                 DBG(1, "won't do DMA");
2719                 retval = -ENODEV;
2720                 goto err1;
2721         }
2722
2723         if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
2724                 retval = -EBUSY;
2725                 goto err1;
2726         }
2727         addr_reg = ioremap(addr->start, resource_size(addr));
2728         if (addr_reg == NULL) {
2729                 retval = -ENOMEM;
2730                 goto err2;
2731         }
2732
2733         if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
2734                 retval = -EBUSY;
2735                 goto err3;
2736         }
2737         data_reg = ioremap(data->start, resource_size(data));
2738         if (data_reg == NULL) {
2739                 retval = -ENOMEM;
2740                 goto err4;
2741         }
2742
2743         /* allocate and initialize hcd */
2744         hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2745         if (!hcd) {
2746                 retval = -ENOMEM;
2747                 goto err5;
2748         }
2749         hcd->rsrc_start = data->start;
2750         isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2751         isp1362_hcd->data_reg = data_reg;
2752         isp1362_hcd->addr_reg = addr_reg;
2753
2754         isp1362_hcd->next_statechange = jiffies;
2755         spin_lock_init(&isp1362_hcd->lock);
2756         INIT_LIST_HEAD(&isp1362_hcd->async);
2757         INIT_LIST_HEAD(&isp1362_hcd->periodic);
2758         INIT_LIST_HEAD(&isp1362_hcd->isoc);
2759         INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2760         isp1362_hcd->board = pdev->dev.platform_data;
2761 #if USE_PLATFORM_DELAY
2762         if (!isp1362_hcd->board->delay) {
2763                 dev_err(hcd->self.controller, "No platform delay function given\n");
2764                 retval = -ENODEV;
2765                 goto err6;
2766         }
2767 #endif
2768
2769         if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2770                 irq_flags |= IRQF_TRIGGER_RISING;
2771         if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2772                 irq_flags |= IRQF_TRIGGER_FALLING;
2773         if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2774                 irq_flags |= IRQF_TRIGGER_HIGH;
2775         if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2776                 irq_flags |= IRQF_TRIGGER_LOW;
2777
2778         retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
2779         if (retval != 0)
2780                 goto err6;
2781         pr_info("%s, irq %d\n", hcd->product_desc, irq);
2782
2783         create_debug_file(isp1362_hcd);
2784
2785         return 0;
2786
2787  err6:
2788         DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2789         usb_put_hcd(hcd);
2790  err5:
2791         DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2792         iounmap(data_reg);
2793  err4:
2794         DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2795         release_mem_region(data->start, resource_size(data));
2796  err3:
2797         DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2798         iounmap(addr_reg);
2799  err2:
2800         DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2801         release_mem_region(addr->start, resource_size(addr));
2802  err1:
2803         pr_err("%s: init error, %d\n", __func__, retval);
2804
2805         return retval;
2806 }
2807
2808 #ifdef  CONFIG_PM
2809 static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2810 {
2811         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2812         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2813         unsigned long flags;
2814         int retval = 0;
2815
2816         DBG(0, "%s: Suspending device\n", __func__);
2817
2818         if (state.event == PM_EVENT_FREEZE) {
2819                 DBG(0, "%s: Suspending root hub\n", __func__);
2820                 retval = isp1362_bus_suspend(hcd);
2821         } else {
2822                 DBG(0, "%s: Suspending RH ports\n", __func__);
2823                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2824                 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2825                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2826         }
2827         if (retval == 0)
2828                 pdev->dev.power.power_state = state;
2829         return retval;
2830 }
2831
2832 static int isp1362_resume(struct platform_device *pdev)
2833 {
2834         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2835         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2836         unsigned long flags;
2837
2838         DBG(0, "%s: Resuming\n", __func__);
2839
2840         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2841                 DBG(0, "%s: Resume RH ports\n", __func__);
2842                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2843                 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2844                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2845                 return 0;
2846         }
2847
2848         pdev->dev.power.power_state = PMSG_ON;
2849
2850         return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2851 }
2852 #else
2853 #define isp1362_suspend NULL
2854 #define isp1362_resume  NULL
2855 #endif
2856
2857 static struct platform_driver isp1362_driver = {
2858         .probe = isp1362_probe,
2859         .remove = __devexit_p(isp1362_remove),
2860
2861         .suspend = isp1362_suspend,
2862         .resume = isp1362_resume,
2863         .driver = {
2864                 .name = (char *)hcd_name,
2865                 .owner = THIS_MODULE,
2866         },
2867 };
2868
2869 /*-------------------------------------------------------------------------*/
2870
2871 static int __init isp1362_init(void)
2872 {
2873         if (usb_disabled())
2874                 return -ENODEV;
2875         pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
2876         return platform_driver_register(&isp1362_driver);
2877 }
2878 module_init(isp1362_init);
2879
2880 static void __exit isp1362_cleanup(void)
2881 {
2882         platform_driver_unregister(&isp1362_driver);
2883 }
2884 module_exit(isp1362_cleanup);