2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 size_t len, int token, int maxpacket)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) {
65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
69 if ((count + 0x1000) < len)
75 /* short packets may only terminate transfers */
77 count -= (count % maxpacket);
79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
90 /* writes to an active overlay are unsafe */
91 BUG_ON(qh->qh_state != QH_STATE_IDLE);
93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
94 qh->hw_alt_next = EHCI_LIST_END(ehci);
96 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
102 unsigned is_out, epnum;
104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
108 usb_settoggle (qh->dev, epnum, is_out, 1);
112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119 * recovery (including urb dequeue) would need software changes to a QH...
122 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
124 struct ehci_qtd *qtd;
126 if (list_empty (&qh->qtd_list))
129 qtd = list_entry (qh->qtd_list.next,
130 struct ehci_qtd, qtd_list);
131 /* first qtd may already be partially processed */
132 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
137 qh_update (ehci, qh, qtd);
140 /*-------------------------------------------------------------------------*/
142 static int qtd_copy_status (
143 struct ehci_hcd *ehci,
149 int status = -EINPROGRESS;
151 /* count IN/OUT bytes, not SETUP (even short packets) */
152 if (likely (QTD_PID (token) != 2))
153 urb->actual_length += length - QTD_LENGTH (token);
155 /* don't modify error codes */
156 if (unlikely(urb->unlinked))
159 /* force cleanup after short read; not always an error */
160 if (unlikely (IS_SHORT_READ (token)))
163 /* serious "can't proceed" faults reported by the hardware */
164 if (token & QTD_STS_HALT) {
165 if (token & QTD_STS_BABBLE) {
166 /* FIXME "must" disable babbling device's port too */
168 } else if (token & QTD_STS_MMF) {
169 /* fs/ls interrupt xfer missed the complete-split */
171 } else if (token & QTD_STS_DBE) {
172 status = (QTD_PID (token) == 1) /* IN ? */
173 ? -ENOSR /* hc couldn't read data */
174 : -ECOMM; /* hc couldn't write data */
175 } else if (token & QTD_STS_XACT) {
176 /* timeout, bad crc, wrong PID, etc; retried */
177 if (QTD_CERR (token))
180 ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n",
182 usb_pipeendpoint (urb->pipe),
183 usb_pipein (urb->pipe) ? "in" : "out");
186 /* CERR nonzero + no errors + halt --> stall */
187 } else if (QTD_CERR (token))
193 "dev%d ep%d%s qtd token %08x --> status %d\n",
194 usb_pipedevice (urb->pipe),
195 usb_pipeendpoint (urb->pipe),
196 usb_pipein (urb->pipe) ? "in" : "out",
199 /* if async CSPLIT failed, try cleaning out the TT buffer */
202 && !usb_pipeint(urb->pipe)
203 && ((token & QTD_STS_MMF) != 0
204 || QTD_CERR(token) == 0)
205 && (!ehci_is_TDI(ehci)
206 || urb->dev->tt->hub !=
207 ehci_to_hcd(ehci)->self.root_hub)) {
209 struct usb_device *tt = urb->dev->tt->hub;
211 "clear tt buffer port %d, a%d ep%d t%08x\n",
212 urb->dev->ttport, urb->dev->devnum,
213 usb_pipeendpoint (urb->pipe), token);
215 /* REVISIT ARC-derived cores don't clear the root
216 * hub TT buffer in this way...
218 usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
226 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
227 __releases(ehci->lock)
228 __acquires(ehci->lock)
230 if (likely (urb->hcpriv != NULL)) {
231 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
233 /* S-mask in a QH means it's an interrupt urb */
234 if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
236 /* ... update hc-wide periodic stats (for usbfs) */
237 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
242 if (unlikely(urb->unlinked)) {
243 COUNT(ehci->stats.unlink);
245 if (likely(status == -EINPROGRESS))
247 COUNT(ehci->stats.complete);
250 #ifdef EHCI_URB_TRACE
252 "%s %s urb %p ep%d%s status %d len %d/%d\n",
253 __FUNCTION__, urb->dev->devpath, urb,
254 usb_pipeendpoint (urb->pipe),
255 usb_pipein (urb->pipe) ? "in" : "out",
257 urb->actual_length, urb->transfer_buffer_length);
260 /* complete() can reenter this HCD */
261 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
262 spin_unlock (&ehci->lock);
263 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
264 spin_lock (&ehci->lock);
267 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
268 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
270 static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
271 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
274 * Process and free completed qtds for a qh, returning URBs to drivers.
275 * Chases up to qh->hw_current. Returns number of completions called,
276 * indicating how much "real" work we did.
279 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
281 struct ehci_qtd *last = NULL, *end = qh->dummy;
282 struct list_head *entry, *tmp;
283 int last_status = -EINPROGRESS;
288 u32 halt = HALT_BIT(ehci);
290 if (unlikely (list_empty (&qh->qtd_list)))
293 /* completions (or tasks on other cpus) must never clobber HALT
294 * till we've gone through and cleaned everything up, even when
295 * they add urbs to this qh's queue or mark them for unlinking.
297 * NOTE: unlinking expects to be done in queue order.
299 state = qh->qh_state;
300 qh->qh_state = QH_STATE_COMPLETING;
301 stopped = (state == QH_STATE_IDLE);
303 /* remove de-activated QTDs from front of queue.
304 * after faults (including short reads), cleanup this urb
305 * then let the queue advance.
306 * if queue is stopped, handles unlinks.
308 list_for_each_safe (entry, tmp, &qh->qtd_list) {
309 struct ehci_qtd *qtd;
314 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
317 /* clean up any state from previous QTD ...*/
319 if (likely (last->urb != urb)) {
320 ehci_urb_done(ehci, last->urb, last_status);
322 last_status = -EINPROGRESS;
324 ehci_qtd_free (ehci, last);
328 /* ignore urbs submitted during completions we reported */
332 /* hardware copies qtd out of qh overlay */
334 token = hc32_to_cpu(ehci, qtd->hw_token);
336 /* always clean up qtds the hc de-activated */
337 if ((token & QTD_STS_ACTIVE) == 0) {
339 /* on STALL, error, and short reads this urb must
340 * complete and all its qtds must be recycled.
342 if ((token & QTD_STS_HALT) != 0) {
345 /* magic dummy for some short reads; qh won't advance.
346 * that silicon quirk can kick in with this dummy too.
348 * other short reads won't stop the queue, including
349 * control transfers (status stage handles that) or
350 * most other single-qtd reads ... the queue stops if
351 * URB_SHORT_NOT_OK was set so the driver submitting
352 * the urbs could clean it up.
354 } else if (IS_SHORT_READ (token)
355 && !(qtd->hw_alt_next
356 & EHCI_LIST_END(ehci))) {
361 /* stop scanning when we reach qtds the hc is using */
362 } else if (likely (!stopped
363 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
366 /* scan the whole queue for unlinks whenever it stops */
370 /* cancel everything if we halt, suspend, etc */
371 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
372 last_status = -ESHUTDOWN;
374 /* this qtd is active; skip it unless a previous qtd
375 * for its urb faulted, or its urb was canceled.
377 else if (last_status == -EINPROGRESS && !urb->unlinked)
380 /* issue status after short control reads */
381 if (unlikely (do_status != 0)
382 && QTD_PID (token) == 0 /* OUT */) {
387 /* qh unlinked; token in overlay may be most current */
388 if (state == QH_STATE_IDLE
389 && cpu_to_hc32(ehci, qtd->qtd_dma)
391 token = hc32_to_cpu(ehci, qh->hw_token);
393 /* force halt for unlinked or blocked qh, so we'll
394 * patch the qh later and so that completions can't
395 * activate it while we "know" it's stopped.
397 if ((halt & qh->hw_token) == 0) {
399 qh->hw_token |= halt;
404 /* remove it from the queue */
405 qtd_status = qtd_copy_status(ehci, urb, qtd->length, token);
406 if (unlikely(qtd_status == -EREMOTEIO)) {
407 do_status = (!urb->unlinked &&
408 usb_pipecontrol(urb->pipe));
411 if (likely(last_status == -EINPROGRESS))
412 last_status = qtd_status;
414 /* if we're removing something not at the queue head,
415 * patch the hardware queue pointer.
417 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
418 last = list_entry (qtd->qtd_list.prev,
419 struct ehci_qtd, qtd_list);
420 last->hw_next = qtd->hw_next;
423 /* remove qtd; it's recycled after possible urb completion */
424 list_del (&qtd->qtd_list);
428 /* last urb's completion might still need calling */
429 if (likely (last != NULL)) {
430 ehci_urb_done(ehci, last->urb, last_status);
432 ehci_qtd_free (ehci, last);
435 /* restore original state; caller must unlink or relink */
436 qh->qh_state = state;
438 /* be sure the hardware's done with the qh before refreshing
439 * it after fault cleanup, or recovering from silicon wrongly
440 * overlaying the dummy qtd (which reduces DMA chatter).
442 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
445 qh_refresh(ehci, qh);
447 case QH_STATE_LINKED:
448 /* We won't refresh a QH that's linked (after the HC
449 * stopped the queue). That avoids a race:
450 * - HC reads first part of QH;
451 * - CPU updates that first part and the token;
452 * - HC reads rest of that QH, including token
453 * Result: HC gets an inconsistent image, and then
454 * DMAs to/from the wrong memory (corrupting it).
456 * That should be rare for interrupt transfers,
457 * except maybe high bandwidth ...
459 if ((cpu_to_hc32(ehci, QH_SMASK)
460 & qh->hw_info2) != 0) {
461 intr_deschedule (ehci, qh);
462 (void) qh_schedule (ehci, qh);
464 unlink_async (ehci, qh);
466 /* otherwise, unlink already started */
473 /*-------------------------------------------------------------------------*/
475 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
476 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
477 // ... and packet size, for any kind of endpoint descriptor
478 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
481 * reverse of qh_urb_transaction: free a list of TDs.
482 * used for cleanup after errors, before HC sees an URB's TDs.
484 static void qtd_list_free (
485 struct ehci_hcd *ehci,
487 struct list_head *qtd_list
489 struct list_head *entry, *temp;
491 list_for_each_safe (entry, temp, qtd_list) {
492 struct ehci_qtd *qtd;
494 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
495 list_del (&qtd->qtd_list);
496 ehci_qtd_free (ehci, qtd);
501 * create a list of filled qtds for this URB; won't link into qh.
503 static struct list_head *
505 struct ehci_hcd *ehci,
507 struct list_head *head,
510 struct ehci_qtd *qtd, *qtd_prev;
517 * URBs map to sequences of QTDs: one logical transaction
519 qtd = ehci_qtd_alloc (ehci, flags);
522 list_add_tail (&qtd->qtd_list, head);
525 token = QTD_STS_ACTIVE;
526 token |= (EHCI_TUNE_CERR << 10);
527 /* for split transactions, SplitXState initialized to zero */
529 len = urb->transfer_buffer_length;
530 is_input = usb_pipein (urb->pipe);
531 if (usb_pipecontrol (urb->pipe)) {
533 qtd_fill(ehci, qtd, urb->setup_dma,
534 sizeof (struct usb_ctrlrequest),
535 token | (2 /* "setup" */ << 8), 8);
537 /* ... and always at least one more pid */
540 qtd = ehci_qtd_alloc (ehci, flags);
544 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
545 list_add_tail (&qtd->qtd_list, head);
547 /* for zero length DATA stages, STATUS is always IN */
549 token |= (1 /* "in" */ << 8);
553 * data transfer stage: buffer setup
555 buf = urb->transfer_dma;
558 token |= (1 /* "in" */ << 8);
559 /* else it's already initted to "out" pid (0 << 8) */
561 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
564 * buffer gets wrapped in one or more qtds;
565 * last one may be "short" (including zero len)
566 * and may serve as a control status ack
571 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
576 * short reads advance to a "magic" dummy instead of the next
577 * qtd ... that forces the queue to stop, for manual cleanup.
578 * (this will usually be overridden later.)
581 qtd->hw_alt_next = ehci->async->hw_alt_next;
583 /* qh makes control packets use qtd toggle; maybe switch it */
584 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
587 if (likely (len <= 0))
591 qtd = ehci_qtd_alloc (ehci, flags);
595 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
596 list_add_tail (&qtd->qtd_list, head);
600 * unless the caller requires manual cleanup after short reads,
601 * have the alt_next mechanism keep the queue running after the
602 * last data qtd (the only one, for control and most other cases).
604 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
605 || usb_pipecontrol (urb->pipe)))
606 qtd->hw_alt_next = EHCI_LIST_END(ehci);
609 * control requests may need a terminating data "status" ack;
610 * bulk ones may need a terminating short packet (zero length).
612 if (likely (urb->transfer_buffer_length != 0)) {
615 if (usb_pipecontrol (urb->pipe)) {
617 token ^= 0x0100; /* "in" <--> "out" */
618 token |= QTD_TOGGLE; /* force DATA1 */
619 } else if (usb_pipebulk (urb->pipe)
620 && (urb->transfer_flags & URB_ZERO_PACKET)
621 && !(urb->transfer_buffer_length % maxpacket)) {
626 qtd = ehci_qtd_alloc (ehci, flags);
630 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
631 list_add_tail (&qtd->qtd_list, head);
633 /* never any data in such packets */
634 qtd_fill(ehci, qtd, 0, 0, token, 0);
638 /* by default, enable interrupt on urb completion */
639 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
640 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
644 qtd_list_free (ehci, urb, head);
648 /*-------------------------------------------------------------------------*/
650 // Would be best to create all qh's from config descriptors,
651 // when each interface/altsetting is established. Unlink
652 // any previous qh and cancel its urbs first; endpoints are
653 // implicitly reset then (data toggle too).
654 // That'd mean updating how usbcore talks to HCDs. (2.7?)
658 * Each QH holds a qtd list; a QH is used for everything except iso.
660 * For interrupt urbs, the scheduler must set the microframe scheduling
661 * mask(s) each time the QH gets scheduled. For highspeed, that's
662 * just one microframe in the s-mask. For split interrupt transactions
663 * there are additional complications: c-mask, maybe FSTNs.
665 static struct ehci_qh *
667 struct ehci_hcd *ehci,
671 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
672 u32 info1 = 0, info2 = 0;
675 struct usb_tt *tt = urb->dev->tt;
681 * init endpoint/device data for this QH
683 info1 |= usb_pipeendpoint (urb->pipe) << 8;
684 info1 |= usb_pipedevice (urb->pipe) << 0;
686 is_input = usb_pipein (urb->pipe);
687 type = usb_pipetype (urb->pipe);
688 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
690 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
691 * acts like up to 3KB, but is built from smaller packets.
693 if (max_packet(maxp) > 1024) {
694 ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
698 /* Compute interrupt scheduling parameters just once, and save.
699 * - allowing for high bandwidth, how many nsec/uframe are used?
700 * - split transactions need a second CSPLIT uframe; same question
701 * - splits also need a schedule gap (for full/low speed I/O)
702 * - qh has a polling interval
704 * For control/bulk requests, the HC or TT handles these.
706 if (type == PIPE_INTERRUPT) {
707 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
709 hb_mult(maxp) * max_packet(maxp)));
710 qh->start = NO_FRAME;
712 if (urb->dev->speed == USB_SPEED_HIGH) {
716 qh->period = urb->interval >> 3;
717 if (qh->period == 0 && urb->interval != 1) {
718 /* NOTE interval 2 or 4 uframes could work.
719 * But interval 1 scheduling is simpler, and
720 * includes high bandwidth.
722 dbg ("intr period %d uframes, NYET!",
729 /* gap is f(FS/LS transfer times) */
730 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
731 is_input, 0, maxp) / (125 * 1000);
733 /* FIXME this just approximates SPLIT/CSPLIT times */
734 if (is_input) { // SPLIT, gap, CSPLIT+DATA
735 qh->c_usecs = qh->usecs + HS_USECS (0);
736 qh->usecs = HS_USECS (1);
737 } else { // SPLIT+DATA, gap, CSPLIT
738 qh->usecs += HS_USECS (1);
739 qh->c_usecs = HS_USECS (0);
742 think_time = tt ? tt->think_time : 0;
743 qh->tt_usecs = NS_TO_US (think_time +
744 usb_calc_bus_time (urb->dev->speed,
745 is_input, 0, max_packet (maxp)));
746 qh->period = urb->interval;
750 /* support for tt scheduling, and access to toggles */
754 switch (urb->dev->speed) {
756 info1 |= (1 << 12); /* EPS "low" */
760 /* EPS 0 means "full" */
761 if (type != PIPE_INTERRUPT)
762 info1 |= (EHCI_TUNE_RL_TT << 28);
763 if (type == PIPE_CONTROL) {
764 info1 |= (1 << 27); /* for TT */
765 info1 |= 1 << 14; /* toggle from qtd */
769 info2 |= (EHCI_TUNE_MULT_TT << 30);
771 /* Some Freescale processors have an erratum in which the
772 * port number in the queue head was 0..N-1 instead of 1..N.
774 if (ehci_has_fsl_portno_bug(ehci))
775 info2 |= (urb->dev->ttport-1) << 23;
777 info2 |= urb->dev->ttport << 23;
779 /* set the address of the TT; for TDI's integrated
780 * root hub tt, leave it zeroed.
782 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
783 info2 |= tt->hub->devnum << 16;
785 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
789 case USB_SPEED_HIGH: /* no TT involved */
790 info1 |= (2 << 12); /* EPS "high" */
791 if (type == PIPE_CONTROL) {
792 info1 |= (EHCI_TUNE_RL_HS << 28);
793 info1 |= 64 << 16; /* usb2 fixed maxpacket */
794 info1 |= 1 << 14; /* toggle from qtd */
795 info2 |= (EHCI_TUNE_MULT_HS << 30);
796 } else if (type == PIPE_BULK) {
797 info1 |= (EHCI_TUNE_RL_HS << 28);
798 /* The USB spec says that high speed bulk endpoints
799 * always use 512 byte maxpacket. But some device
800 * vendors decided to ignore that, and MSFT is happy
801 * to help them do so. So now people expect to use
802 * such nonconformant devices with Linux too; sigh.
804 info1 |= max_packet(maxp) << 16;
805 info2 |= (EHCI_TUNE_MULT_HS << 30);
806 } else { /* PIPE_INTERRUPT */
807 info1 |= max_packet (maxp) << 16;
808 info2 |= hb_mult (maxp) << 30;
812 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
818 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
820 /* init as live, toggle clear, advance to dummy */
821 qh->qh_state = QH_STATE_IDLE;
822 qh->hw_info1 = cpu_to_hc32(ehci, info1);
823 qh->hw_info2 = cpu_to_hc32(ehci, info2);
824 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
825 qh_refresh (ehci, qh);
829 /*-------------------------------------------------------------------------*/
831 /* move qh (and its qtds) onto async queue; maybe enable queue. */
833 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
835 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
836 struct ehci_qh *head;
838 /* (re)start the async schedule? */
840 timer_action_done (ehci, TIMER_ASYNC_OFF);
841 if (!head->qh_next.qh) {
842 u32 cmd = ehci_readl(ehci, &ehci->regs->command);
844 if (!(cmd & CMD_ASE)) {
845 /* in case a clear of CMD_ASE didn't take yet */
846 (void)handshake(ehci, &ehci->regs->status,
848 cmd |= CMD_ASE | CMD_RUN;
849 ehci_writel(ehci, cmd, &ehci->regs->command);
850 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
851 /* posted write need not be known to HC yet ... */
855 /* clear halt and/or toggle; and maybe recover from silicon quirk */
856 if (qh->qh_state == QH_STATE_IDLE)
857 qh_refresh (ehci, qh);
859 /* splice right after start */
860 qh->qh_next = head->qh_next;
861 qh->hw_next = head->hw_next;
864 head->qh_next.qh = qh;
867 qh->qh_state = QH_STATE_LINKED;
868 /* qtd completions reported later by interrupt */
871 /*-------------------------------------------------------------------------*/
874 * For control/bulk/interrupt, return QH with these TDs appended.
875 * Allocates and initializes the QH if necessary.
876 * Returns null if it can't allocate a QH it needs to.
877 * If the QH has TDs (urbs) already, that's great.
879 static struct ehci_qh *qh_append_tds (
880 struct ehci_hcd *ehci,
882 struct list_head *qtd_list,
887 struct ehci_qh *qh = NULL;
888 u32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
890 qh = (struct ehci_qh *) *ptr;
891 if (unlikely (qh == NULL)) {
892 /* can't sleep here, we have ehci->lock... */
893 qh = qh_make (ehci, urb, GFP_ATOMIC);
896 if (likely (qh != NULL)) {
897 struct ehci_qtd *qtd;
899 if (unlikely (list_empty (qtd_list)))
902 qtd = list_entry (qtd_list->next, struct ehci_qtd,
905 /* control qh may need patching ... */
906 if (unlikely (epnum == 0)) {
908 /* usb_reset_device() briefly reverts to address 0 */
909 if (usb_pipedevice (urb->pipe) == 0)
910 qh->hw_info1 &= ~qh_addr_mask;
913 /* just one way to queue requests: swap with the dummy qtd.
914 * only hc or qh_refresh() ever modify the overlay.
916 if (likely (qtd != NULL)) {
917 struct ehci_qtd *dummy;
921 /* to avoid racing the HC, use the dummy td instead of
922 * the first td of our list (becomes new dummy). both
923 * tds stay deactivated until we're done, when the
924 * HC is allowed to fetch the old dummy (4.10.2).
926 token = qtd->hw_token;
927 qtd->hw_token = HALT_BIT(ehci);
931 dma = dummy->qtd_dma;
933 dummy->qtd_dma = dma;
935 list_del (&qtd->qtd_list);
936 list_add (&dummy->qtd_list, qtd_list);
937 __list_splice (qtd_list, qh->qtd_list.prev);
939 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
942 /* hc must see the new dummy at list end */
944 qtd = list_entry (qh->qtd_list.prev,
945 struct ehci_qtd, qtd_list);
946 qtd->hw_next = QTD_NEXT(ehci, dma);
948 /* let the hc process these next qtds */
950 dummy->hw_token = token;
952 urb->hcpriv = qh_get (qh);
958 /*-------------------------------------------------------------------------*/
962 struct ehci_hcd *ehci,
964 struct list_head *qtd_list,
967 struct ehci_qtd *qtd;
970 struct ehci_qh *qh = NULL;
973 qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
974 epnum = urb->ep->desc.bEndpointAddress;
976 #ifdef EHCI_URB_TRACE
978 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
979 __FUNCTION__, urb->dev->devpath, urb,
980 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
981 urb->transfer_buffer_length,
982 qtd, urb->ep->hcpriv);
985 spin_lock_irqsave (&ehci->lock, flags);
986 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
987 &ehci_to_hcd(ehci)->flags))) {
991 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
995 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
996 if (unlikely(qh == NULL)) {
997 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1002 /* Control/bulk operations through TTs don't need scheduling,
1003 * the HC and TT handle it when the TT has a buffer ready.
1005 if (likely (qh->qh_state == QH_STATE_IDLE))
1006 qh_link_async (ehci, qh_get (qh));
1008 spin_unlock_irqrestore (&ehci->lock, flags);
1009 if (unlikely (qh == NULL))
1010 qtd_list_free (ehci, urb, qtd_list);
1014 /*-------------------------------------------------------------------------*/
1016 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
1018 static void end_unlink_async (struct ehci_hcd *ehci)
1020 struct ehci_qh *qh = ehci->reclaim;
1021 struct ehci_qh *next;
1023 iaa_watchdog_done(ehci);
1025 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
1026 qh->qh_state = QH_STATE_IDLE;
1027 qh->qh_next.qh = NULL;
1028 qh_put (qh); // refcount from reclaim
1030 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1032 ehci->reclaim = next;
1035 qh_completions (ehci, qh);
1037 if (!list_empty (&qh->qtd_list)
1038 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
1039 qh_link_async (ehci, qh);
1041 qh_put (qh); // refcount from async list
1043 /* it's not free to turn the async schedule on/off; leave it
1044 * active but idle for a while once it empties.
1046 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
1047 && ehci->async->qh_next.qh == NULL)
1048 timer_action (ehci, TIMER_ASYNC_OFF);
1052 ehci->reclaim = NULL;
1053 start_unlink_async (ehci, next);
1057 /* makes sure the async qh will become idle */
1058 /* caller must own ehci->lock */
1060 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1062 int cmd = ehci_readl(ehci, &ehci->regs->command);
1063 struct ehci_qh *prev;
1066 assert_spin_locked(&ehci->lock);
1068 || (qh->qh_state != QH_STATE_LINKED
1069 && qh->qh_state != QH_STATE_UNLINK_WAIT)
1074 /* stop async schedule right now? */
1075 if (unlikely (qh == ehci->async)) {
1076 /* can't get here without STS_ASS set */
1077 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
1078 && !ehci->reclaim) {
1079 /* ... and CMD_IAAD clear */
1080 ehci_writel(ehci, cmd & ~CMD_ASE,
1081 &ehci->regs->command);
1083 // handshake later, if we need to
1084 timer_action_done (ehci, TIMER_ASYNC_OFF);
1089 qh->qh_state = QH_STATE_UNLINK;
1090 ehci->reclaim = qh = qh_get (qh);
1093 while (prev->qh_next.qh != qh)
1094 prev = prev->qh_next.qh;
1096 prev->hw_next = qh->hw_next;
1097 prev->qh_next = qh->qh_next;
1100 if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
1101 /* if (unlikely (qh->reclaim != 0))
1102 * this will recurse, probably not much
1104 end_unlink_async (ehci);
1109 ehci_writel(ehci, cmd, &ehci->regs->command);
1110 (void)ehci_readl(ehci, &ehci->regs->command);
1111 iaa_watchdog_start(ehci);
1114 /*-------------------------------------------------------------------------*/
1116 static void scan_async (struct ehci_hcd *ehci)
1119 enum ehci_timer_action action = TIMER_IO_WATCHDOG;
1121 if (!++(ehci->stamp))
1123 timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1125 qh = ehci->async->qh_next.qh;
1126 if (likely (qh != NULL)) {
1128 /* clean any finished work for this qh */
1129 if (!list_empty (&qh->qtd_list)
1130 && qh->stamp != ehci->stamp) {
1133 /* unlinks could happen here; completion
1134 * reporting drops the lock. rescan using
1135 * the latest schedule, but don't rescan
1136 * qhs we already finished (no looping).
1139 qh->stamp = ehci->stamp;
1140 temp = qh_completions (ehci, qh);
1147 /* unlink idle entries, reducing HC PCI usage as well
1148 * as HCD schedule-scanning costs. delay for any qh
1149 * we just scanned, there's a not-unusual case that it
1150 * doesn't stay idle for long.
1151 * (plus, avoids some kind of re-activation race.)
1153 if (list_empty (&qh->qtd_list)) {
1154 if (qh->stamp == ehci->stamp)
1155 action = TIMER_ASYNC_SHRINK;
1156 else if (!ehci->reclaim
1157 && qh->qh_state == QH_STATE_LINKED)
1158 start_unlink_async (ehci, qh);
1161 qh = qh->qh_next.qh;
1164 if (action == TIMER_ASYNC_SHRINK)
1165 timer_action (ehci, TIMER_ASYNC_SHRINK);