UHCI: Add macros for computing DMA values
[pandora-kernel.git] / drivers / usb / host / uhci-q.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu
17  */
18
19
20 /*
21  * Technically, updating td->status here is a race, but it's not really a
22  * problem. The worst that can happen is that we set the IOC bit again
23  * generating a spurious interrupt. We could fix this by creating another
24  * QH and leaving the IOC bit always set, but then we would have to play
25  * games with the FSBR code to make sure we get the correct order in all
26  * the cases. I don't think it's worth the effort
27  */
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29 {
30         if (uhci->is_stopped)
31                 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
33 }
34
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
36 {
37         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
38 }
39
40
41 /*
42  * Full-Speed Bandwidth Reclamation (FSBR).
43  * We turn on FSBR whenever a queue that wants it is advancing,
44  * and leave it on for a short time thereafter.
45  */
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
47 {
48         uhci->fsbr_is_on = 1;
49         uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_fs_control_qh);
50 }
51
52 static void uhci_fsbr_off(struct uhci_hcd *uhci)
53 {
54         uhci->fsbr_is_on = 0;
55         uhci->skel_term_qh->link = UHCI_PTR_TERM;
56 }
57
58 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
59 {
60         struct urb_priv *urbp = urb->hcpriv;
61
62         if (!(urb->transfer_flags & URB_NO_FSBR))
63                 urbp->fsbr = 1;
64 }
65
66 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
67 {
68         if (urbp->fsbr) {
69                 uhci->fsbr_is_wanted = 1;
70                 if (!uhci->fsbr_is_on)
71                         uhci_fsbr_on(uhci);
72                 else if (uhci->fsbr_expiring) {
73                         uhci->fsbr_expiring = 0;
74                         del_timer(&uhci->fsbr_timer);
75                 }
76         }
77 }
78
79 static void uhci_fsbr_timeout(unsigned long _uhci)
80 {
81         struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
82         unsigned long flags;
83
84         spin_lock_irqsave(&uhci->lock, flags);
85         if (uhci->fsbr_expiring) {
86                 uhci->fsbr_expiring = 0;
87                 uhci_fsbr_off(uhci);
88         }
89         spin_unlock_irqrestore(&uhci->lock, flags);
90 }
91
92
93 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
94 {
95         dma_addr_t dma_handle;
96         struct uhci_td *td;
97
98         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
99         if (!td)
100                 return NULL;
101
102         td->dma_handle = dma_handle;
103         td->frame = -1;
104
105         INIT_LIST_HEAD(&td->list);
106         INIT_LIST_HEAD(&td->fl_list);
107
108         return td;
109 }
110
111 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
112 {
113         if (!list_empty(&td->list))
114                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
115         if (!list_empty(&td->fl_list))
116                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
117
118         dma_pool_free(uhci->td_pool, td, td->dma_handle);
119 }
120
121 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
122                 u32 token, u32 buffer)
123 {
124         td->status = cpu_to_le32(status);
125         td->token = cpu_to_le32(token);
126         td->buffer = cpu_to_le32(buffer);
127 }
128
129 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
130 {
131         list_add_tail(&td->list, &urbp->td_list);
132 }
133
134 static void uhci_remove_td_from_urbp(struct uhci_td *td)
135 {
136         list_del_init(&td->list);
137 }
138
139 /*
140  * We insert Isochronous URBs directly into the frame list at the beginning
141  */
142 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
143                 struct uhci_td *td, unsigned framenum)
144 {
145         framenum &= (UHCI_NUMFRAMES - 1);
146
147         td->frame = framenum;
148
149         /* Is there a TD already mapped there? */
150         if (uhci->frame_cpu[framenum]) {
151                 struct uhci_td *ftd, *ltd;
152
153                 ftd = uhci->frame_cpu[framenum];
154                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
155
156                 list_add_tail(&td->fl_list, &ftd->fl_list);
157
158                 td->link = ltd->link;
159                 wmb();
160                 ltd->link = LINK_TO_TD(td);
161         } else {
162                 td->link = uhci->frame[framenum];
163                 wmb();
164                 uhci->frame[framenum] = LINK_TO_TD(td);
165                 uhci->frame_cpu[framenum] = td;
166         }
167 }
168
169 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
170                 struct uhci_td *td)
171 {
172         /* If it's not inserted, don't remove it */
173         if (td->frame == -1) {
174                 WARN_ON(!list_empty(&td->fl_list));
175                 return;
176         }
177
178         if (uhci->frame_cpu[td->frame] == td) {
179                 if (list_empty(&td->fl_list)) {
180                         uhci->frame[td->frame] = td->link;
181                         uhci->frame_cpu[td->frame] = NULL;
182                 } else {
183                         struct uhci_td *ntd;
184
185                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
186                         uhci->frame[td->frame] = LINK_TO_TD(ntd);
187                         uhci->frame_cpu[td->frame] = ntd;
188                 }
189         } else {
190                 struct uhci_td *ptd;
191
192                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
193                 ptd->link = td->link;
194         }
195
196         list_del_init(&td->fl_list);
197         td->frame = -1;
198 }
199
200 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
201                 unsigned int framenum)
202 {
203         struct uhci_td *ftd, *ltd;
204
205         framenum &= (UHCI_NUMFRAMES - 1);
206
207         ftd = uhci->frame_cpu[framenum];
208         if (ftd) {
209                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
210                 uhci->frame[framenum] = ltd->link;
211                 uhci->frame_cpu[framenum] = NULL;
212
213                 while (!list_empty(&ftd->fl_list))
214                         list_del_init(ftd->fl_list.prev);
215         }
216 }
217
218 /*
219  * Remove all the TDs for an Isochronous URB from the frame list
220  */
221 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
222 {
223         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
224         struct uhci_td *td;
225
226         list_for_each_entry(td, &urbp->td_list, list)
227                 uhci_remove_td_from_frame_list(uhci, td);
228 }
229
230 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
231                 struct usb_device *udev, struct usb_host_endpoint *hep)
232 {
233         dma_addr_t dma_handle;
234         struct uhci_qh *qh;
235
236         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
237         if (!qh)
238                 return NULL;
239
240         memset(qh, 0, sizeof(*qh));
241         qh->dma_handle = dma_handle;
242
243         qh->element = UHCI_PTR_TERM;
244         qh->link = UHCI_PTR_TERM;
245
246         INIT_LIST_HEAD(&qh->queue);
247         INIT_LIST_HEAD(&qh->node);
248
249         if (udev) {             /* Normal QH */
250                 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
251                 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
252                         qh->dummy_td = uhci_alloc_td(uhci);
253                         if (!qh->dummy_td) {
254                                 dma_pool_free(uhci->qh_pool, qh, dma_handle);
255                                 return NULL;
256                         }
257                 }
258                 qh->state = QH_STATE_IDLE;
259                 qh->hep = hep;
260                 qh->udev = udev;
261                 hep->hcpriv = qh;
262
263                 if (qh->type == USB_ENDPOINT_XFER_INT ||
264                                 qh->type == USB_ENDPOINT_XFER_ISOC)
265                         qh->load = usb_calc_bus_time(udev->speed,
266                                         usb_endpoint_dir_in(&hep->desc),
267                                         qh->type == USB_ENDPOINT_XFER_ISOC,
268                                         le16_to_cpu(hep->desc.wMaxPacketSize))
269                                 / 1000 + 1;
270
271         } else {                /* Skeleton QH */
272                 qh->state = QH_STATE_ACTIVE;
273                 qh->type = -1;
274         }
275         return qh;
276 }
277
278 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
279 {
280         WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
281         if (!list_empty(&qh->queue))
282                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
283
284         list_del(&qh->node);
285         if (qh->udev) {
286                 qh->hep->hcpriv = NULL;
287                 if (qh->dummy_td)
288                         uhci_free_td(uhci, qh->dummy_td);
289         }
290         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
291 }
292
293 /*
294  * When a queue is stopped and a dequeued URB is given back, adjust
295  * the previous TD link (if the URB isn't first on the queue) or
296  * save its toggle value (if it is first and is currently executing).
297  *
298  * Returns 0 if the URB should not yet be given back, 1 otherwise.
299  */
300 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
301                 struct urb *urb)
302 {
303         struct urb_priv *urbp = urb->hcpriv;
304         struct uhci_td *td;
305         int ret = 1;
306
307         /* Isochronous pipes don't use toggles and their TD link pointers
308          * get adjusted during uhci_urb_dequeue().  But since their queues
309          * cannot truly be stopped, we have to watch out for dequeues
310          * occurring after the nominal unlink frame. */
311         if (qh->type == USB_ENDPOINT_XFER_ISOC) {
312                 ret = (uhci->frame_number + uhci->is_stopped !=
313                                 qh->unlink_frame);
314                 goto done;
315         }
316
317         /* If the URB isn't first on its queue, adjust the link pointer
318          * of the last TD in the previous URB.  The toggle doesn't need
319          * to be saved since this URB can't be executing yet. */
320         if (qh->queue.next != &urbp->node) {
321                 struct urb_priv *purbp;
322                 struct uhci_td *ptd;
323
324                 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
325                 WARN_ON(list_empty(&purbp->td_list));
326                 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
327                                 list);
328                 td = list_entry(urbp->td_list.prev, struct uhci_td,
329                                 list);
330                 ptd->link = td->link;
331                 goto done;
332         }
333
334         /* If the QH element pointer is UHCI_PTR_TERM then then currently
335          * executing URB has already been unlinked, so this one isn't it. */
336         if (qh_element(qh) == UHCI_PTR_TERM)
337                 goto done;
338         qh->element = UHCI_PTR_TERM;
339
340         /* Control pipes don't have to worry about toggles */
341         if (qh->type == USB_ENDPOINT_XFER_CONTROL)
342                 goto done;
343
344         /* Save the next toggle value */
345         WARN_ON(list_empty(&urbp->td_list));
346         td = list_entry(urbp->td_list.next, struct uhci_td, list);
347         qh->needs_fixup = 1;
348         qh->initial_toggle = uhci_toggle(td_token(td));
349
350 done:
351         return ret;
352 }
353
354 /*
355  * Fix up the data toggles for URBs in a queue, when one of them
356  * terminates early (short transfer, error, or dequeued).
357  */
358 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
359 {
360         struct urb_priv *urbp = NULL;
361         struct uhci_td *td;
362         unsigned int toggle = qh->initial_toggle;
363         unsigned int pipe;
364
365         /* Fixups for a short transfer start with the second URB in the
366          * queue (the short URB is the first). */
367         if (skip_first)
368                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
369
370         /* When starting with the first URB, if the QH element pointer is
371          * still valid then we know the URB's toggles are okay. */
372         else if (qh_element(qh) != UHCI_PTR_TERM)
373                 toggle = 2;
374
375         /* Fix up the toggle for the URBs in the queue.  Normally this
376          * loop won't run more than once: When an error or short transfer
377          * occurs, the queue usually gets emptied. */
378         urbp = list_prepare_entry(urbp, &qh->queue, node);
379         list_for_each_entry_continue(urbp, &qh->queue, node) {
380
381                 /* If the first TD has the right toggle value, we don't
382                  * need to change any toggles in this URB */
383                 td = list_entry(urbp->td_list.next, struct uhci_td, list);
384                 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
385                         td = list_entry(urbp->td_list.prev, struct uhci_td,
386                                         list);
387                         toggle = uhci_toggle(td_token(td)) ^ 1;
388
389                 /* Otherwise all the toggles in the URB have to be switched */
390                 } else {
391                         list_for_each_entry(td, &urbp->td_list, list) {
392                                 td->token ^= __constant_cpu_to_le32(
393                                                         TD_TOKEN_TOGGLE);
394                                 toggle ^= 1;
395                         }
396                 }
397         }
398
399         wmb();
400         pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
401         usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
402                         usb_pipeout(pipe), toggle);
403         qh->needs_fixup = 0;
404 }
405
406 /*
407  * Put a QH on the schedule in both hardware and software
408  */
409 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
410 {
411         struct uhci_qh *pqh;
412
413         WARN_ON(list_empty(&qh->queue));
414
415         /* Set the element pointer if it isn't set already.
416          * This isn't needed for Isochronous queues, but it doesn't hurt. */
417         if (qh_element(qh) == UHCI_PTR_TERM) {
418                 struct urb_priv *urbp = list_entry(qh->queue.next,
419                                 struct urb_priv, node);
420                 struct uhci_td *td = list_entry(urbp->td_list.next,
421                                 struct uhci_td, list);
422
423                 qh->element = LINK_TO_TD(td);
424         }
425
426         /* Treat the queue as if it has just advanced */
427         qh->wait_expired = 0;
428         qh->advance_jiffies = jiffies;
429
430         if (qh->state == QH_STATE_ACTIVE)
431                 return;
432         qh->state = QH_STATE_ACTIVE;
433
434         /* Move the QH from its old list to the end of the appropriate
435          * skeleton's list */
436         if (qh == uhci->next_qh)
437                 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
438                                 node);
439         list_move_tail(&qh->node, &qh->skel->node);
440
441         /* Link it into the schedule */
442         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
443         qh->link = pqh->link;
444         wmb();
445         pqh->link = LINK_TO_QH(qh);
446 }
447
448 /*
449  * Take a QH off the hardware schedule
450  */
451 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
452 {
453         struct uhci_qh *pqh;
454
455         if (qh->state == QH_STATE_UNLINKING)
456                 return;
457         WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
458         qh->state = QH_STATE_UNLINKING;
459
460         /* Unlink the QH from the schedule and record when we did it */
461         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
462         pqh->link = qh->link;
463         mb();
464
465         uhci_get_current_frame_number(uhci);
466         qh->unlink_frame = uhci->frame_number;
467
468         /* Force an interrupt so we know when the QH is fully unlinked */
469         if (list_empty(&uhci->skel_unlink_qh->node))
470                 uhci_set_next_interrupt(uhci);
471
472         /* Move the QH from its old list to the end of the unlinking list */
473         if (qh == uhci->next_qh)
474                 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
475                                 node);
476         list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
477 }
478
479 /*
480  * When we and the controller are through with a QH, it becomes IDLE.
481  * This happens when a QH has been off the schedule (on the unlinking
482  * list) for more than one frame, or when an error occurs while adding
483  * the first URB onto a new QH.
484  */
485 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
486 {
487         WARN_ON(qh->state == QH_STATE_ACTIVE);
488
489         if (qh == uhci->next_qh)
490                 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
491                                 node);
492         list_move(&qh->node, &uhci->idle_qh_list);
493         qh->state = QH_STATE_IDLE;
494
495         /* Now that the QH is idle, its post_td isn't being used */
496         if (qh->post_td) {
497                 uhci_free_td(uhci, qh->post_td);
498                 qh->post_td = NULL;
499         }
500
501         /* If anyone is waiting for a QH to become idle, wake them up */
502         if (uhci->num_waiting)
503                 wake_up_all(&uhci->waitqh);
504 }
505
506 /*
507  * Find the highest existing bandwidth load for a given phase and period.
508  */
509 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
510 {
511         int highest_load = uhci->load[phase];
512
513         for (phase += period; phase < MAX_PHASE; phase += period)
514                 highest_load = max_t(int, highest_load, uhci->load[phase]);
515         return highest_load;
516 }
517
518 /*
519  * Set qh->phase to the optimal phase for a periodic transfer and
520  * check whether the bandwidth requirement is acceptable.
521  */
522 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
523 {
524         int minimax_load;
525
526         /* Find the optimal phase (unless it is already set) and get
527          * its load value. */
528         if (qh->phase >= 0)
529                 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
530         else {
531                 int phase, load;
532                 int max_phase = min_t(int, MAX_PHASE, qh->period);
533
534                 qh->phase = 0;
535                 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
536                 for (phase = 1; phase < max_phase; ++phase) {
537                         load = uhci_highest_load(uhci, phase, qh->period);
538                         if (load < minimax_load) {
539                                 minimax_load = load;
540                                 qh->phase = phase;
541                         }
542                 }
543         }
544
545         /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
546         if (minimax_load + qh->load > 900) {
547                 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
548                                 "period %d, phase %d, %d + %d us\n",
549                                 qh->period, qh->phase, minimax_load, qh->load);
550                 return -ENOSPC;
551         }
552         return 0;
553 }
554
555 /*
556  * Reserve a periodic QH's bandwidth in the schedule
557  */
558 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
559 {
560         int i;
561         int load = qh->load;
562         char *p = "??";
563
564         for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
565                 uhci->load[i] += load;
566                 uhci->total_load += load;
567         }
568         uhci_to_hcd(uhci)->self.bandwidth_allocated =
569                         uhci->total_load / MAX_PHASE;
570         switch (qh->type) {
571         case USB_ENDPOINT_XFER_INT:
572                 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
573                 p = "INT";
574                 break;
575         case USB_ENDPOINT_XFER_ISOC:
576                 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
577                 p = "ISO";
578                 break;
579         }
580         qh->bandwidth_reserved = 1;
581         dev_dbg(uhci_dev(uhci),
582                         "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
583                         "reserve", qh->udev->devnum,
584                         qh->hep->desc.bEndpointAddress, p,
585                         qh->period, qh->phase, load);
586 }
587
588 /*
589  * Release a periodic QH's bandwidth reservation
590  */
591 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
592 {
593         int i;
594         int load = qh->load;
595         char *p = "??";
596
597         for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
598                 uhci->load[i] -= load;
599                 uhci->total_load -= load;
600         }
601         uhci_to_hcd(uhci)->self.bandwidth_allocated =
602                         uhci->total_load / MAX_PHASE;
603         switch (qh->type) {
604         case USB_ENDPOINT_XFER_INT:
605                 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
606                 p = "INT";
607                 break;
608         case USB_ENDPOINT_XFER_ISOC:
609                 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
610                 p = "ISO";
611                 break;
612         }
613         qh->bandwidth_reserved = 0;
614         dev_dbg(uhci_dev(uhci),
615                         "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
616                         "release", qh->udev->devnum,
617                         qh->hep->desc.bEndpointAddress, p,
618                         qh->period, qh->phase, load);
619 }
620
621 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
622                 struct urb *urb)
623 {
624         struct urb_priv *urbp;
625
626         urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
627         if (!urbp)
628                 return NULL;
629
630         urbp->urb = urb;
631         urb->hcpriv = urbp;
632         
633         INIT_LIST_HEAD(&urbp->node);
634         INIT_LIST_HEAD(&urbp->td_list);
635
636         return urbp;
637 }
638
639 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
640                 struct urb_priv *urbp)
641 {
642         struct uhci_td *td, *tmp;
643
644         if (!list_empty(&urbp->node))
645                 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
646                                 urbp->urb);
647
648         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
649                 uhci_remove_td_from_urbp(td);
650                 uhci_free_td(uhci, td);
651         }
652
653         urbp->urb->hcpriv = NULL;
654         kmem_cache_free(uhci_up_cachep, urbp);
655 }
656
657 /*
658  * Map status to standard result codes
659  *
660  * <status> is (td_status(td) & 0xF60000), a.k.a.
661  * uhci_status_bits(td_status(td)).
662  * Note: <status> does not include the TD_CTRL_NAK bit.
663  * <dir_out> is True for output TDs and False for input TDs.
664  */
665 static int uhci_map_status(int status, int dir_out)
666 {
667         if (!status)
668                 return 0;
669         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
670                 return -EPROTO;
671         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
672                 if (dir_out)
673                         return -EPROTO;
674                 else
675                         return -EILSEQ;
676         }
677         if (status & TD_CTRL_BABBLE)                    /* Babble */
678                 return -EOVERFLOW;
679         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
680                 return -ENOSR;
681         if (status & TD_CTRL_STALLED)                   /* Stalled */
682                 return -EPIPE;
683         return 0;
684 }
685
686 /*
687  * Control transfers
688  */
689 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
690                 struct uhci_qh *qh)
691 {
692         struct uhci_td *td;
693         unsigned long destination, status;
694         int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
695         int len = urb->transfer_buffer_length;
696         dma_addr_t data = urb->transfer_dma;
697         __le32 *plink;
698         struct urb_priv *urbp = urb->hcpriv;
699
700         /* The "pipe" thing contains the destination in bits 8--18 */
701         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
702
703         /* 3 errors, dummy TD remains inactive */
704         status = uhci_maxerr(3);
705         if (urb->dev->speed == USB_SPEED_LOW)
706                 status |= TD_CTRL_LS;
707
708         /*
709          * Build the TD for the control request setup packet
710          */
711         td = qh->dummy_td;
712         uhci_add_td_to_urbp(td, urbp);
713         uhci_fill_td(td, status, destination | uhci_explen(8),
714                         urb->setup_dma);
715         plink = &td->link;
716         status |= TD_CTRL_ACTIVE;
717
718         /*
719          * If direction is "send", change the packet ID from SETUP (0x2D)
720          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
721          * set Short Packet Detect (SPD) for all data packets.
722          */
723         if (usb_pipeout(urb->pipe))
724                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
725         else {
726                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
727                 status |= TD_CTRL_SPD;
728         }
729
730         /*
731          * Build the DATA TDs
732          */
733         while (len > 0) {
734                 int pktsze = min(len, maxsze);
735
736                 td = uhci_alloc_td(uhci);
737                 if (!td)
738                         goto nomem;
739                 *plink = LINK_TO_TD(td);
740
741                 /* Alternate Data0/1 (start with Data1) */
742                 destination ^= TD_TOKEN_TOGGLE;
743         
744                 uhci_add_td_to_urbp(td, urbp);
745                 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
746                                 data);
747                 plink = &td->link;
748
749                 data += pktsze;
750                 len -= pktsze;
751         }
752
753         /*
754          * Build the final TD for control status 
755          */
756         td = uhci_alloc_td(uhci);
757         if (!td)
758                 goto nomem;
759         *plink = LINK_TO_TD(td);
760
761         /*
762          * It's IN if the pipe is an output pipe or we're not expecting
763          * data back.
764          */
765         destination &= ~TD_TOKEN_PID_MASK;
766         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
767                 destination |= USB_PID_IN;
768         else
769                 destination |= USB_PID_OUT;
770
771         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
772
773         status &= ~TD_CTRL_SPD;
774
775         uhci_add_td_to_urbp(td, urbp);
776         uhci_fill_td(td, status | TD_CTRL_IOC,
777                         destination | uhci_explen(0), 0);
778         plink = &td->link;
779
780         /*
781          * Build the new dummy TD and activate the old one
782          */
783         td = uhci_alloc_td(uhci);
784         if (!td)
785                 goto nomem;
786         *plink = LINK_TO_TD(td);
787
788         uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
789         wmb();
790         qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
791         qh->dummy_td = td;
792
793         /* Low-speed transfers get a different queue, and won't hog the bus.
794          * Also, some devices enumerate better without FSBR; the easiest way
795          * to do that is to put URBs on the low-speed queue while the device
796          * isn't in the CONFIGURED state. */
797         if (urb->dev->speed == USB_SPEED_LOW ||
798                         urb->dev->state != USB_STATE_CONFIGURED)
799                 qh->skel = uhci->skel_ls_control_qh;
800         else {
801                 qh->skel = uhci->skel_fs_control_qh;
802                 uhci_add_fsbr(uhci, urb);
803         }
804
805         urb->actual_length = -8;        /* Account for the SETUP packet */
806         return 0;
807
808 nomem:
809         /* Remove the dummy TD from the td_list so it doesn't get freed */
810         uhci_remove_td_from_urbp(qh->dummy_td);
811         return -ENOMEM;
812 }
813
814 /*
815  * Common submit for bulk and interrupt
816  */
817 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
818                 struct uhci_qh *qh)
819 {
820         struct uhci_td *td;
821         unsigned long destination, status;
822         int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
823         int len = urb->transfer_buffer_length;
824         dma_addr_t data = urb->transfer_dma;
825         __le32 *plink;
826         struct urb_priv *urbp = urb->hcpriv;
827         unsigned int toggle;
828
829         if (len < 0)
830                 return -EINVAL;
831
832         /* The "pipe" thing contains the destination in bits 8--18 */
833         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
834         toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
835                          usb_pipeout(urb->pipe));
836
837         /* 3 errors, dummy TD remains inactive */
838         status = uhci_maxerr(3);
839         if (urb->dev->speed == USB_SPEED_LOW)
840                 status |= TD_CTRL_LS;
841         if (usb_pipein(urb->pipe))
842                 status |= TD_CTRL_SPD;
843
844         /*
845          * Build the DATA TDs
846          */
847         plink = NULL;
848         td = qh->dummy_td;
849         do {    /* Allow zero length packets */
850                 int pktsze = maxsze;
851
852                 if (len <= pktsze) {            /* The last packet */
853                         pktsze = len;
854                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
855                                 status &= ~TD_CTRL_SPD;
856                 }
857
858                 if (plink) {
859                         td = uhci_alloc_td(uhci);
860                         if (!td)
861                                 goto nomem;
862                         *plink = LINK_TO_TD(td);
863                 }
864                 uhci_add_td_to_urbp(td, urbp);
865                 uhci_fill_td(td, status,
866                                 destination | uhci_explen(pktsze) |
867                                         (toggle << TD_TOKEN_TOGGLE_SHIFT),
868                                 data);
869                 plink = &td->link;
870                 status |= TD_CTRL_ACTIVE;
871
872                 data += pktsze;
873                 len -= maxsze;
874                 toggle ^= 1;
875         } while (len > 0);
876
877         /*
878          * URB_ZERO_PACKET means adding a 0-length packet, if direction
879          * is OUT and the transfer_length was an exact multiple of maxsze,
880          * hence (len = transfer_length - N * maxsze) == 0
881          * however, if transfer_length == 0, the zero packet was already
882          * prepared above.
883          */
884         if ((urb->transfer_flags & URB_ZERO_PACKET) &&
885                         usb_pipeout(urb->pipe) && len == 0 &&
886                         urb->transfer_buffer_length > 0) {
887                 td = uhci_alloc_td(uhci);
888                 if (!td)
889                         goto nomem;
890                 *plink = LINK_TO_TD(td);
891
892                 uhci_add_td_to_urbp(td, urbp);
893                 uhci_fill_td(td, status,
894                                 destination | uhci_explen(0) |
895                                         (toggle << TD_TOKEN_TOGGLE_SHIFT),
896                                 data);
897                 plink = &td->link;
898
899                 toggle ^= 1;
900         }
901
902         /* Set the interrupt-on-completion flag on the last packet.
903          * A more-or-less typical 4 KB URB (= size of one memory page)
904          * will require about 3 ms to transfer; that's a little on the
905          * fast side but not enough to justify delaying an interrupt
906          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
907          * flag setting. */
908         td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
909
910         /*
911          * Build the new dummy TD and activate the old one
912          */
913         td = uhci_alloc_td(uhci);
914         if (!td)
915                 goto nomem;
916         *plink = LINK_TO_TD(td);
917
918         uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
919         wmb();
920         qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
921         qh->dummy_td = td;
922
923         usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
924                         usb_pipeout(urb->pipe), toggle);
925         return 0;
926
927 nomem:
928         /* Remove the dummy TD from the td_list so it doesn't get freed */
929         uhci_remove_td_from_urbp(qh->dummy_td);
930         return -ENOMEM;
931 }
932
933 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
934                 struct uhci_qh *qh)
935 {
936         int ret;
937
938         /* Can't have low-speed bulk transfers */
939         if (urb->dev->speed == USB_SPEED_LOW)
940                 return -EINVAL;
941
942         qh->skel = uhci->skel_bulk_qh;
943         ret = uhci_submit_common(uhci, urb, qh);
944         if (ret == 0)
945                 uhci_add_fsbr(uhci, urb);
946         return ret;
947 }
948
949 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
950                 struct uhci_qh *qh)
951 {
952         int ret;
953
954         /* USB 1.1 interrupt transfers only involve one packet per interval.
955          * Drivers can submit URBs of any length, but longer ones will need
956          * multiple intervals to complete.
957          */
958
959         if (!qh->bandwidth_reserved) {
960                 int exponent;
961
962                 /* Figure out which power-of-two queue to use */
963                 for (exponent = 7; exponent >= 0; --exponent) {
964                         if ((1 << exponent) <= urb->interval)
965                                 break;
966                 }
967                 if (exponent < 0)
968                         return -EINVAL;
969                 qh->period = 1 << exponent;
970                 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)];
971
972                 /* For now, interrupt phase is fixed by the layout
973                  * of the QH lists. */
974                 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
975                 ret = uhci_check_bandwidth(uhci, qh);
976                 if (ret)
977                         return ret;
978         } else if (qh->period > urb->interval)
979                 return -EINVAL;         /* Can't decrease the period */
980
981         ret = uhci_submit_common(uhci, urb, qh);
982         if (ret == 0) {
983                 urb->interval = qh->period;
984                 if (!qh->bandwidth_reserved)
985                         uhci_reserve_bandwidth(uhci, qh);
986         }
987         return ret;
988 }
989
990 /*
991  * Fix up the data structures following a short transfer
992  */
993 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
994                 struct uhci_qh *qh, struct urb_priv *urbp)
995 {
996         struct uhci_td *td;
997         struct list_head *tmp;
998         int ret;
999
1000         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1001         if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1002
1003                 /* When a control transfer is short, we have to restart
1004                  * the queue at the status stage transaction, which is
1005                  * the last TD. */
1006                 WARN_ON(list_empty(&urbp->td_list));
1007                 qh->element = LINK_TO_TD(td);
1008                 tmp = td->list.prev;
1009                 ret = -EINPROGRESS;
1010
1011         } else {
1012
1013                 /* When a bulk/interrupt transfer is short, we have to
1014                  * fix up the toggles of the following URBs on the queue
1015                  * before restarting the queue at the next URB. */
1016                 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1017                 uhci_fixup_toggles(qh, 1);
1018
1019                 if (list_empty(&urbp->td_list))
1020                         td = qh->post_td;
1021                 qh->element = td->link;
1022                 tmp = urbp->td_list.prev;
1023                 ret = 0;
1024         }
1025
1026         /* Remove all the TDs we skipped over, from tmp back to the start */
1027         while (tmp != &urbp->td_list) {
1028                 td = list_entry(tmp, struct uhci_td, list);
1029                 tmp = tmp->prev;
1030
1031                 uhci_remove_td_from_urbp(td);
1032                 uhci_free_td(uhci, td);
1033         }
1034         return ret;
1035 }
1036
1037 /*
1038  * Common result for control, bulk, and interrupt
1039  */
1040 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1041 {
1042         struct urb_priv *urbp = urb->hcpriv;
1043         struct uhci_qh *qh = urbp->qh;
1044         struct uhci_td *td, *tmp;
1045         unsigned status;
1046         int ret = 0;
1047
1048         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1049                 unsigned int ctrlstat;
1050                 int len;
1051
1052                 ctrlstat = td_status(td);
1053                 status = uhci_status_bits(ctrlstat);
1054                 if (status & TD_CTRL_ACTIVE)
1055                         return -EINPROGRESS;
1056
1057                 len = uhci_actual_length(ctrlstat);
1058                 urb->actual_length += len;
1059
1060                 if (status) {
1061                         ret = uhci_map_status(status,
1062                                         uhci_packetout(td_token(td)));
1063                         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1064                                 /* Some debugging code */
1065                                 dev_dbg(&urb->dev->dev,
1066                                                 "%s: failed with status %x\n",
1067                                                 __FUNCTION__, status);
1068
1069                                 if (debug > 1 && errbuf) {
1070                                         /* Print the chain for debugging */
1071                                         uhci_show_qh(urbp->qh, errbuf,
1072                                                         ERRBUF_LEN, 0);
1073                                         lprintk(errbuf);
1074                                 }
1075                         }
1076
1077                 } else if (len < uhci_expected_length(td_token(td))) {
1078
1079                         /* We received a short packet */
1080                         if (urb->transfer_flags & URB_SHORT_NOT_OK)
1081                                 ret = -EREMOTEIO;
1082
1083                         /* Fixup needed only if this isn't the URB's last TD */
1084                         else if (&td->list != urbp->td_list.prev)
1085                                 ret = 1;
1086                 }
1087
1088                 uhci_remove_td_from_urbp(td);
1089                 if (qh->post_td)
1090                         uhci_free_td(uhci, qh->post_td);
1091                 qh->post_td = td;
1092
1093                 if (ret != 0)
1094                         goto err;
1095         }
1096         return ret;
1097
1098 err:
1099         if (ret < 0) {
1100                 /* In case a control transfer gets an error
1101                  * during the setup stage */
1102                 urb->actual_length = max(urb->actual_length, 0);
1103
1104                 /* Note that the queue has stopped and save
1105                  * the next toggle value */
1106                 qh->element = UHCI_PTR_TERM;
1107                 qh->is_stopped = 1;
1108                 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1109                 qh->initial_toggle = uhci_toggle(td_token(td)) ^
1110                                 (ret == -EREMOTEIO);
1111
1112         } else          /* Short packet received */
1113                 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1114         return ret;
1115 }
1116
1117 /*
1118  * Isochronous transfers
1119  */
1120 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1121                 struct uhci_qh *qh)
1122 {
1123         struct uhci_td *td = NULL;      /* Since urb->number_of_packets > 0 */
1124         int i, frame;
1125         unsigned long destination, status;
1126         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1127
1128         /* Values must not be too big (could overflow below) */
1129         if (urb->interval >= UHCI_NUMFRAMES ||
1130                         urb->number_of_packets >= UHCI_NUMFRAMES)
1131                 return -EFBIG;
1132
1133         /* Check the period and figure out the starting frame number */
1134         if (!qh->bandwidth_reserved) {
1135                 qh->period = urb->interval;
1136                 if (urb->transfer_flags & URB_ISO_ASAP) {
1137                         qh->phase = -1;         /* Find the best phase */
1138                         i = uhci_check_bandwidth(uhci, qh);
1139                         if (i)
1140                                 return i;
1141
1142                         /* Allow a little time to allocate the TDs */
1143                         uhci_get_current_frame_number(uhci);
1144                         frame = uhci->frame_number + 10;
1145
1146                         /* Move forward to the first frame having the
1147                          * correct phase */
1148                         urb->start_frame = frame + ((qh->phase - frame) &
1149                                         (qh->period - 1));
1150                 } else {
1151                         i = urb->start_frame - uhci->last_iso_frame;
1152                         if (i <= 0 || i >= UHCI_NUMFRAMES)
1153                                 return -EINVAL;
1154                         qh->phase = urb->start_frame & (qh->period - 1);
1155                         i = uhci_check_bandwidth(uhci, qh);
1156                         if (i)
1157                                 return i;
1158                 }
1159
1160         } else if (qh->period != urb->interval) {
1161                 return -EINVAL;         /* Can't change the period */
1162
1163         } else {        /* Pick up where the last URB leaves off */
1164                 if (list_empty(&qh->queue)) {
1165                         frame = qh->iso_frame;
1166                 } else {
1167                         struct urb *lurb;
1168
1169                         lurb = list_entry(qh->queue.prev,
1170                                         struct urb_priv, node)->urb;
1171                         frame = lurb->start_frame +
1172                                         lurb->number_of_packets *
1173                                         lurb->interval;
1174                 }
1175                 if (urb->transfer_flags & URB_ISO_ASAP)
1176                         urb->start_frame = frame;
1177                 else if (urb->start_frame != frame)
1178                         return -EINVAL;
1179         }
1180
1181         /* Make sure we won't have to go too far into the future */
1182         if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1183                         urb->start_frame + urb->number_of_packets *
1184                                 urb->interval))
1185                 return -EFBIG;
1186
1187         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1188         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1189
1190         for (i = 0; i < urb->number_of_packets; i++) {
1191                 td = uhci_alloc_td(uhci);
1192                 if (!td)
1193                         return -ENOMEM;
1194
1195                 uhci_add_td_to_urbp(td, urbp);
1196                 uhci_fill_td(td, status, destination |
1197                                 uhci_explen(urb->iso_frame_desc[i].length),
1198                                 urb->transfer_dma +
1199                                         urb->iso_frame_desc[i].offset);
1200         }
1201
1202         /* Set the interrupt-on-completion flag on the last packet. */
1203         td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1204
1205         /* Add the TDs to the frame list */
1206         frame = urb->start_frame;
1207         list_for_each_entry(td, &urbp->td_list, list) {
1208                 uhci_insert_td_in_frame_list(uhci, td, frame);
1209                 frame += qh->period;
1210         }
1211
1212         if (list_empty(&qh->queue)) {
1213                 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1214                 qh->iso_frame = urb->start_frame;
1215                 qh->iso_status = 0;
1216         }
1217
1218         qh->skel = uhci->skel_iso_qh;
1219         if (!qh->bandwidth_reserved)
1220                 uhci_reserve_bandwidth(uhci, qh);
1221         return 0;
1222 }
1223
1224 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1225 {
1226         struct uhci_td *td, *tmp;
1227         struct urb_priv *urbp = urb->hcpriv;
1228         struct uhci_qh *qh = urbp->qh;
1229
1230         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1231                 unsigned int ctrlstat;
1232                 int status;
1233                 int actlength;
1234
1235                 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1236                         return -EINPROGRESS;
1237
1238                 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1239
1240                 ctrlstat = td_status(td);
1241                 if (ctrlstat & TD_CTRL_ACTIVE) {
1242                         status = -EXDEV;        /* TD was added too late? */
1243                 } else {
1244                         status = uhci_map_status(uhci_status_bits(ctrlstat),
1245                                         usb_pipeout(urb->pipe));
1246                         actlength = uhci_actual_length(ctrlstat);
1247
1248                         urb->actual_length += actlength;
1249                         qh->iso_packet_desc->actual_length = actlength;
1250                         qh->iso_packet_desc->status = status;
1251                 }
1252
1253                 if (status) {
1254                         urb->error_count++;
1255                         qh->iso_status = status;
1256                 }
1257
1258                 uhci_remove_td_from_urbp(td);
1259                 uhci_free_td(uhci, td);
1260                 qh->iso_frame += qh->period;
1261                 ++qh->iso_packet_desc;
1262         }
1263         return qh->iso_status;
1264 }
1265
1266 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1267                 struct usb_host_endpoint *hep,
1268                 struct urb *urb, gfp_t mem_flags)
1269 {
1270         int ret;
1271         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1272         unsigned long flags;
1273         struct urb_priv *urbp;
1274         struct uhci_qh *qh;
1275
1276         spin_lock_irqsave(&uhci->lock, flags);
1277
1278         ret = urb->status;
1279         if (ret != -EINPROGRESS)                /* URB already unlinked! */
1280                 goto done;
1281
1282         ret = -ENOMEM;
1283         urbp = uhci_alloc_urb_priv(uhci, urb);
1284         if (!urbp)
1285                 goto done;
1286
1287         if (hep->hcpriv)
1288                 qh = (struct uhci_qh *) hep->hcpriv;
1289         else {
1290                 qh = uhci_alloc_qh(uhci, urb->dev, hep);
1291                 if (!qh)
1292                         goto err_no_qh;
1293         }
1294         urbp->qh = qh;
1295
1296         switch (qh->type) {
1297         case USB_ENDPOINT_XFER_CONTROL:
1298                 ret = uhci_submit_control(uhci, urb, qh);
1299                 break;
1300         case USB_ENDPOINT_XFER_BULK:
1301                 ret = uhci_submit_bulk(uhci, urb, qh);
1302                 break;
1303         case USB_ENDPOINT_XFER_INT:
1304                 ret = uhci_submit_interrupt(uhci, urb, qh);
1305                 break;
1306         case USB_ENDPOINT_XFER_ISOC:
1307                 urb->error_count = 0;
1308                 ret = uhci_submit_isochronous(uhci, urb, qh);
1309                 break;
1310         }
1311         if (ret != 0)
1312                 goto err_submit_failed;
1313
1314         /* Add this URB to the QH */
1315         urbp->qh = qh;
1316         list_add_tail(&urbp->node, &qh->queue);
1317
1318         /* If the new URB is the first and only one on this QH then either
1319          * the QH is new and idle or else it's unlinked and waiting to
1320          * become idle, so we can activate it right away.  But only if the
1321          * queue isn't stopped. */
1322         if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1323                 uhci_activate_qh(uhci, qh);
1324                 uhci_urbp_wants_fsbr(uhci, urbp);
1325         }
1326         goto done;
1327
1328 err_submit_failed:
1329         if (qh->state == QH_STATE_IDLE)
1330                 uhci_make_qh_idle(uhci, qh);    /* Reclaim unused QH */
1331
1332 err_no_qh:
1333         uhci_free_urb_priv(uhci, urbp);
1334
1335 done:
1336         spin_unlock_irqrestore(&uhci->lock, flags);
1337         return ret;
1338 }
1339
1340 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1341 {
1342         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1343         unsigned long flags;
1344         struct urb_priv *urbp;
1345         struct uhci_qh *qh;
1346
1347         spin_lock_irqsave(&uhci->lock, flags);
1348         urbp = urb->hcpriv;
1349         if (!urbp)                      /* URB was never linked! */
1350                 goto done;
1351         qh = urbp->qh;
1352
1353         /* Remove Isochronous TDs from the frame list ASAP */
1354         if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1355                 uhci_unlink_isochronous_tds(uhci, urb);
1356                 mb();
1357
1358                 /* If the URB has already started, update the QH unlink time */
1359                 uhci_get_current_frame_number(uhci);
1360                 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1361                         qh->unlink_frame = uhci->frame_number;
1362         }
1363
1364         uhci_unlink_qh(uhci, qh);
1365
1366 done:
1367         spin_unlock_irqrestore(&uhci->lock, flags);
1368         return 0;
1369 }
1370
1371 /*
1372  * Finish unlinking an URB and give it back
1373  */
1374 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1375                 struct urb *urb)
1376 __releases(uhci->lock)
1377 __acquires(uhci->lock)
1378 {
1379         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1380
1381         /* When giving back the first URB in an Isochronous queue,
1382          * reinitialize the QH's iso-related members for the next URB. */
1383         if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1384                         urbp->node.prev == &qh->queue &&
1385                         urbp->node.next != &qh->queue) {
1386                 struct urb *nurb = list_entry(urbp->node.next,
1387                                 struct urb_priv, node)->urb;
1388
1389                 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1390                 qh->iso_frame = nurb->start_frame;
1391                 qh->iso_status = 0;
1392         }
1393
1394         /* Take the URB off the QH's queue.  If the queue is now empty,
1395          * this is a perfect time for a toggle fixup. */
1396         list_del_init(&urbp->node);
1397         if (list_empty(&qh->queue) && qh->needs_fixup) {
1398                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1399                                 usb_pipeout(urb->pipe), qh->initial_toggle);
1400                 qh->needs_fixup = 0;
1401         }
1402
1403         uhci_free_urb_priv(uhci, urbp);
1404
1405         spin_unlock(&uhci->lock);
1406         usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);
1407         spin_lock(&uhci->lock);
1408
1409         /* If the queue is now empty, we can unlink the QH and give up its
1410          * reserved bandwidth. */
1411         if (list_empty(&qh->queue)) {
1412                 uhci_unlink_qh(uhci, qh);
1413                 if (qh->bandwidth_reserved)
1414                         uhci_release_bandwidth(uhci, qh);
1415         }
1416 }
1417
1418 /*
1419  * Scan the URBs in a QH's queue
1420  */
1421 #define QH_FINISHED_UNLINKING(qh)                       \
1422                 (qh->state == QH_STATE_UNLINKING &&     \
1423                 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1424
1425 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1426 {
1427         struct urb_priv *urbp;
1428         struct urb *urb;
1429         int status;
1430
1431         while (!list_empty(&qh->queue)) {
1432                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1433                 urb = urbp->urb;
1434
1435                 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1436                         status = uhci_result_isochronous(uhci, urb);
1437                 else
1438                         status = uhci_result_common(uhci, urb);
1439                 if (status == -EINPROGRESS)
1440                         break;
1441
1442                 spin_lock(&urb->lock);
1443                 if (urb->status == -EINPROGRESS)        /* Not dequeued */
1444                         urb->status = status;
1445                 else
1446                         status = ECONNRESET;            /* Not -ECONNRESET */
1447                 spin_unlock(&urb->lock);
1448
1449                 /* Dequeued but completed URBs can't be given back unless
1450                  * the QH is stopped or has finished unlinking. */
1451                 if (status == ECONNRESET) {
1452                         if (QH_FINISHED_UNLINKING(qh))
1453                                 qh->is_stopped = 1;
1454                         else if (!qh->is_stopped)
1455                                 return;
1456                 }
1457
1458                 uhci_giveback_urb(uhci, qh, urb);
1459                 if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC)
1460                         break;
1461         }
1462
1463         /* If the QH is neither stopped nor finished unlinking (normal case),
1464          * our work here is done. */
1465         if (QH_FINISHED_UNLINKING(qh))
1466                 qh->is_stopped = 1;
1467         else if (!qh->is_stopped)
1468                 return;
1469
1470         /* Otherwise give back each of the dequeued URBs */
1471 restart:
1472         list_for_each_entry(urbp, &qh->queue, node) {
1473                 urb = urbp->urb;
1474                 if (urb->status != -EINPROGRESS) {
1475
1476                         /* Fix up the TD links and save the toggles for
1477                          * non-Isochronous queues.  For Isochronous queues,
1478                          * test for too-recent dequeues. */
1479                         if (!uhci_cleanup_queue(uhci, qh, urb)) {
1480                                 qh->is_stopped = 0;
1481                                 return;
1482                         }
1483                         uhci_giveback_urb(uhci, qh, urb);
1484                         goto restart;
1485                 }
1486         }
1487         qh->is_stopped = 0;
1488
1489         /* There are no more dequeued URBs.  If there are still URBs on the
1490          * queue, the QH can now be re-activated. */
1491         if (!list_empty(&qh->queue)) {
1492                 if (qh->needs_fixup)
1493                         uhci_fixup_toggles(qh, 0);
1494
1495                 /* If the first URB on the queue wants FSBR but its time
1496                  * limit has expired, set the next TD to interrupt on
1497                  * completion before reactivating the QH. */
1498                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1499                 if (urbp->fsbr && qh->wait_expired) {
1500                         struct uhci_td *td = list_entry(urbp->td_list.next,
1501                                         struct uhci_td, list);
1502
1503                         td->status |= __cpu_to_le32(TD_CTRL_IOC);
1504                 }
1505
1506                 uhci_activate_qh(uhci, qh);
1507         }
1508
1509         /* The queue is empty.  The QH can become idle if it is fully
1510          * unlinked. */
1511         else if (QH_FINISHED_UNLINKING(qh))
1512                 uhci_make_qh_idle(uhci, qh);
1513 }
1514
1515 /*
1516  * Check for queues that have made some forward progress.
1517  * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1518  * has not advanced since last examined; 1 otherwise.
1519  *
1520  * Early Intel controllers have a bug which causes qh->element sometimes
1521  * not to advance when a TD completes successfully.  The queue remains
1522  * stuck on the inactive completed TD.  We detect such cases and advance
1523  * the element pointer by hand.
1524  */
1525 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1526 {
1527         struct urb_priv *urbp = NULL;
1528         struct uhci_td *td;
1529         int ret = 1;
1530         unsigned status;
1531
1532         if (qh->type == USB_ENDPOINT_XFER_ISOC)
1533                 goto done;
1534
1535         /* Treat an UNLINKING queue as though it hasn't advanced.
1536          * This is okay because reactivation will treat it as though
1537          * it has advanced, and if it is going to become IDLE then
1538          * this doesn't matter anyway.  Furthermore it's possible
1539          * for an UNLINKING queue not to have any URBs at all, or
1540          * for its first URB not to have any TDs (if it was dequeued
1541          * just as it completed).  So it's not easy in any case to
1542          * test whether such queues have advanced. */
1543         if (qh->state != QH_STATE_ACTIVE) {
1544                 urbp = NULL;
1545                 status = 0;
1546
1547         } else {
1548                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1549                 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1550                 status = td_status(td);
1551                 if (!(status & TD_CTRL_ACTIVE)) {
1552
1553                         /* We're okay, the queue has advanced */
1554                         qh->wait_expired = 0;
1555                         qh->advance_jiffies = jiffies;
1556                         goto done;
1557                 }
1558                 ret = 0;
1559         }
1560
1561         /* The queue hasn't advanced; check for timeout */
1562         if (qh->wait_expired)
1563                 goto done;
1564
1565         if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1566
1567                 /* Detect the Intel bug and work around it */
1568                 if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1569                         qh->element = qh->post_td->link;
1570                         qh->advance_jiffies = jiffies;
1571                         ret = 1;
1572                         goto done;
1573                 }
1574
1575                 qh->wait_expired = 1;
1576
1577                 /* If the current URB wants FSBR, unlink it temporarily
1578                  * so that we can safely set the next TD to interrupt on
1579                  * completion.  That way we'll know as soon as the queue
1580                  * starts moving again. */
1581                 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1582                         uhci_unlink_qh(uhci, qh);
1583
1584         } else {
1585                 /* Unmoving but not-yet-expired queues keep FSBR alive */
1586                 if (urbp)
1587                         uhci_urbp_wants_fsbr(uhci, urbp);
1588         }
1589
1590 done:
1591         return ret;
1592 }
1593
1594 /*
1595  * Process events in the schedule, but only in one thread at a time
1596  */
1597 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1598 {
1599         int i;
1600         struct uhci_qh *qh;
1601
1602         /* Don't allow re-entrant calls */
1603         if (uhci->scan_in_progress) {
1604                 uhci->need_rescan = 1;
1605                 return;
1606         }
1607         uhci->scan_in_progress = 1;
1608 rescan:
1609         uhci->need_rescan = 0;
1610         uhci->fsbr_is_wanted = 0;
1611
1612         uhci_clear_next_interrupt(uhci);
1613         uhci_get_current_frame_number(uhci);
1614         uhci->cur_iso_frame = uhci->frame_number;
1615
1616         /* Go through all the QH queues and process the URBs in each one */
1617         for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1618                 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1619                                 struct uhci_qh, node);
1620                 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1621                         uhci->next_qh = list_entry(qh->node.next,
1622                                         struct uhci_qh, node);
1623
1624                         if (uhci_advance_check(uhci, qh)) {
1625                                 uhci_scan_qh(uhci, qh);
1626                                 if (qh->state == QH_STATE_ACTIVE) {
1627                                         uhci_urbp_wants_fsbr(uhci,
1628         list_entry(qh->queue.next, struct urb_priv, node));
1629                                 }
1630                         }
1631                 }
1632         }
1633
1634         uhci->last_iso_frame = uhci->cur_iso_frame;
1635         if (uhci->need_rescan)
1636                 goto rescan;
1637         uhci->scan_in_progress = 0;
1638
1639         if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1640                         !uhci->fsbr_expiring) {
1641                 uhci->fsbr_expiring = 1;
1642                 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1643         }
1644
1645         if (list_empty(&uhci->skel_unlink_qh->node))
1646                 uhci_clear_next_interrupt(uhci);
1647         else
1648                 uhci_set_next_interrupt(uhci);
1649 }