2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 size_t len, int token, int maxpacket)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) {
65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
69 if ((count + 0x1000) < len)
75 /* short packets may only terminate transfers */
77 count -= (count % maxpacket);
79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
90 struct ehci_qh_hw *hw = qh->hw;
92 /* writes to an active overlay are unsafe */
93 BUG_ON(qh->qh_state != QH_STATE_IDLE);
95 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
96 hw->hw_alt_next = EHCI_LIST_END(ehci);
98 /* Except for control endpoints, we make hardware maintain data
99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
103 if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
104 unsigned is_out, epnum;
107 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
108 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
109 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
110 usb_settoggle (qh->dev, epnum, is_out, 1);
114 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119 * recovery (including urb dequeue) would need software changes to a QH...
122 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
124 struct ehci_qtd *qtd;
126 if (list_empty (&qh->qtd_list))
129 qtd = list_entry (qh->qtd_list.next,
130 struct ehci_qtd, qtd_list);
132 * first qtd may already be partially processed.
133 * If we come here during unlink, the QH overlay region
134 * might have reference to the just unlinked qtd. The
135 * qtd is updated in qh_completions(). Update the QH
138 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
139 qh->hw->hw_qtd_next = qtd->hw_next;
145 qh_update (ehci, qh, qtd);
148 /*-------------------------------------------------------------------------*/
150 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
152 static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
153 struct usb_host_endpoint *ep)
155 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
156 struct ehci_qh *qh = ep->hcpriv;
159 spin_lock_irqsave(&ehci->lock, flags);
161 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
162 && ehci->rh_state == EHCI_RH_RUNNING)
163 qh_link_async(ehci, qh);
164 spin_unlock_irqrestore(&ehci->lock, flags);
167 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
168 struct urb *urb, u32 token)
171 /* If an async split transaction gets an error or is unlinked,
172 * the TT buffer may be left in an indeterminate state. We
173 * have to clear the TT buffer.
175 * Note: this routine is never called for Isochronous transfers.
177 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
179 struct usb_device *tt = urb->dev->tt->hub;
181 "clear tt buffer port %d, a%d ep%d t%08x\n",
182 urb->dev->ttport, urb->dev->devnum,
183 usb_pipeendpoint(urb->pipe), token);
185 if (!ehci_is_TDI(ehci)
186 || urb->dev->tt->hub !=
187 ehci_to_hcd(ehci)->self.root_hub) {
188 if (usb_hub_clear_tt_buffer(urb) == 0)
192 /* REVISIT ARC-derived cores don't clear the root
193 * hub TT buffer in this way...
199 static int qtd_copy_status (
200 struct ehci_hcd *ehci,
206 int status = -EINPROGRESS;
208 /* count IN/OUT bytes, not SETUP (even short packets) */
209 if (likely (QTD_PID (token) != 2))
210 urb->actual_length += length - QTD_LENGTH (token);
212 /* don't modify error codes */
213 if (unlikely(urb->unlinked))
216 /* force cleanup after short read; not always an error */
217 if (unlikely (IS_SHORT_READ (token)))
220 /* serious "can't proceed" faults reported by the hardware */
221 if (token & QTD_STS_HALT) {
222 if (token & QTD_STS_BABBLE) {
223 /* FIXME "must" disable babbling device's port too */
225 /* CERR nonzero + halt --> stall */
226 } else if (QTD_CERR(token)) {
229 /* In theory, more than one of the following bits can be set
230 * since they are sticky and the transaction is retried.
231 * Which to test first is rather arbitrary.
233 } else if (token & QTD_STS_MMF) {
234 /* fs/ls interrupt xfer missed the complete-split */
236 } else if (token & QTD_STS_DBE) {
237 status = (QTD_PID (token) == 1) /* IN ? */
238 ? -ENOSR /* hc couldn't read data */
239 : -ECOMM; /* hc couldn't write data */
240 } else if (token & QTD_STS_XACT) {
241 /* timeout, bad CRC, wrong PID, etc */
242 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
244 usb_pipeendpoint(urb->pipe),
245 usb_pipein(urb->pipe) ? "in" : "out");
247 } else { /* unknown */
252 "dev%d ep%d%s qtd token %08x --> status %d\n",
253 usb_pipedevice (urb->pipe),
254 usb_pipeendpoint (urb->pipe),
255 usb_pipein (urb->pipe) ? "in" : "out",
263 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
264 __releases(ehci->lock)
265 __acquires(ehci->lock)
267 if (likely (urb->hcpriv != NULL)) {
268 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
270 /* S-mask in a QH means it's an interrupt urb */
271 if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
273 /* ... update hc-wide periodic stats (for usbfs) */
274 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
279 if (unlikely(urb->unlinked)) {
280 COUNT(ehci->stats.unlink);
282 /* report non-error and short read status as zero */
283 if (status == -EINPROGRESS || status == -EREMOTEIO)
285 COUNT(ehci->stats.complete);
288 #ifdef EHCI_URB_TRACE
290 "%s %s urb %p ep%d%s status %d len %d/%d\n",
291 __func__, urb->dev->devpath, urb,
292 usb_pipeendpoint (urb->pipe),
293 usb_pipein (urb->pipe) ? "in" : "out",
295 urb->actual_length, urb->transfer_buffer_length);
298 /* complete() can reenter this HCD */
299 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
300 spin_unlock (&ehci->lock);
301 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
302 spin_lock (&ehci->lock);
305 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
306 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
308 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
311 * Process and free completed qtds for a qh, returning URBs to drivers.
312 * Chases up to qh->hw_current. Returns number of completions called,
313 * indicating how much "real" work we did.
316 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
318 struct ehci_qtd *last, *end = qh->dummy;
319 struct list_head *entry, *tmp;
324 struct ehci_qh_hw *hw = qh->hw;
326 if (unlikely (list_empty (&qh->qtd_list)))
329 /* completions (or tasks on other cpus) must never clobber HALT
330 * till we've gone through and cleaned everything up, even when
331 * they add urbs to this qh's queue or mark them for unlinking.
333 * NOTE: unlinking expects to be done in queue order.
335 * It's a bug for qh->qh_state to be anything other than
336 * QH_STATE_IDLE, unless our caller is scan_async() or
339 state = qh->qh_state;
340 qh->qh_state = QH_STATE_COMPLETING;
341 stopped = (state == QH_STATE_IDLE);
345 last_status = -EINPROGRESS;
346 qh->needs_rescan = 0;
348 /* remove de-activated QTDs from front of queue.
349 * after faults (including short reads), cleanup this urb
350 * then let the queue advance.
351 * if queue is stopped, handles unlinks.
353 list_for_each_safe (entry, tmp, &qh->qtd_list) {
354 struct ehci_qtd *qtd;
358 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
361 /* clean up any state from previous QTD ...*/
363 if (likely (last->urb != urb)) {
364 ehci_urb_done(ehci, last->urb, last_status);
366 last_status = -EINPROGRESS;
368 ehci_qtd_free (ehci, last);
372 /* ignore urbs submitted during completions we reported */
376 /* hardware copies qtd out of qh overlay */
378 token = hc32_to_cpu(ehci, qtd->hw_token);
380 /* always clean up qtds the hc de-activated */
382 if ((token & QTD_STS_ACTIVE) == 0) {
384 /* on STALL, error, and short reads this urb must
385 * complete and all its qtds must be recycled.
387 if ((token & QTD_STS_HALT) != 0) {
389 /* retry transaction errors until we
390 * reach the software xacterr limit
392 if ((token & QTD_STS_XACT) &&
393 QTD_CERR(token) == 0 &&
394 ++qh->xacterrs < QH_XACTERR_MAX &&
397 "detected XactErr len %zu/%zu retry %d\n",
398 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
400 /* reset the token in the qtd and the
401 * qh overlay (which still contains
402 * the qtd) so that we pick up from
405 token &= ~QTD_STS_HALT;
406 token |= QTD_STS_ACTIVE |
407 (EHCI_TUNE_CERR << 10);
408 qtd->hw_token = cpu_to_hc32(ehci,
411 hw->hw_token = cpu_to_hc32(ehci,
417 /* magic dummy for some short reads; qh won't advance.
418 * that silicon quirk can kick in with this dummy too.
420 * other short reads won't stop the queue, including
421 * control transfers (status stage handles that) or
422 * most other single-qtd reads ... the queue stops if
423 * URB_SHORT_NOT_OK was set so the driver submitting
424 * the urbs could clean it up.
426 } else if (IS_SHORT_READ (token)
427 && !(qtd->hw_alt_next
428 & EHCI_LIST_END(ehci))) {
432 /* stop scanning when we reach qtds the hc is using */
433 } else if (likely (!stopped
434 && ehci->rh_state == EHCI_RH_RUNNING)) {
437 /* scan the whole queue for unlinks whenever it stops */
441 /* cancel everything if we halt, suspend, etc */
442 if (ehci->rh_state != EHCI_RH_RUNNING)
443 last_status = -ESHUTDOWN;
445 /* this qtd is active; skip it unless a previous qtd
446 * for its urb faulted, or its urb was canceled.
448 else if (last_status == -EINPROGRESS && !urb->unlinked)
451 /* qh unlinked; token in overlay may be most current */
452 if (state == QH_STATE_IDLE
453 && cpu_to_hc32(ehci, qtd->qtd_dma)
455 token = hc32_to_cpu(ehci, hw->hw_token);
457 /* An unlink may leave an incomplete
458 * async transaction in the TT buffer.
459 * We have to clear it.
461 ehci_clear_tt_buffer(ehci, qh, urb, token);
465 /* unless we already know the urb's status, collect qtd status
466 * and update count of bytes transferred. in common short read
467 * cases with only one data qtd (including control transfers),
468 * queue processing won't halt. but with two or more qtds (for
469 * example, with a 32 KB transfer), when the first qtd gets a
470 * short read the second must be removed by hand.
472 if (last_status == -EINPROGRESS) {
473 last_status = qtd_copy_status(ehci, urb,
475 if (last_status == -EREMOTEIO
477 & EHCI_LIST_END(ehci)))
478 last_status = -EINPROGRESS;
480 /* As part of low/full-speed endpoint-halt processing
481 * we must clear the TT buffer (11.17.5).
483 if (unlikely(last_status != -EINPROGRESS &&
484 last_status != -EREMOTEIO)) {
485 /* The TT's in some hubs malfunction when they
486 * receive this request following a STALL (they
487 * stop sending isochronous packets). Since a
488 * STALL can't leave the TT buffer in a busy
489 * state (if you believe Figures 11-48 - 11-51
490 * in the USB 2.0 spec), we won't clear the TT
491 * buffer in this case. Strictly speaking this
492 * is a violation of the spec.
494 if (last_status != -EPIPE)
495 ehci_clear_tt_buffer(ehci, qh, urb,
500 /* if we're removing something not at the queue head,
501 * patch the hardware queue pointer.
503 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
504 last = list_entry (qtd->qtd_list.prev,
505 struct ehci_qtd, qtd_list);
506 last->hw_next = qtd->hw_next;
509 /* remove qtd; it's recycled after possible urb completion */
510 list_del (&qtd->qtd_list);
513 /* reinit the xacterr counter for the next qtd */
517 /* last urb's completion might still need calling */
518 if (likely (last != NULL)) {
519 ehci_urb_done(ehci, last->urb, last_status);
521 ehci_qtd_free (ehci, last);
524 /* Do we need to rescan for URBs dequeued during a giveback? */
525 if (unlikely(qh->needs_rescan)) {
526 /* If the QH is already unlinked, do the rescan now. */
527 if (state == QH_STATE_IDLE)
530 /* Otherwise we have to wait until the QH is fully unlinked.
531 * Our caller will start an unlink if qh->needs_rescan is
532 * set. But if an unlink has already started, nothing needs
535 if (state != QH_STATE_LINKED)
536 qh->needs_rescan = 0;
539 /* restore original state; caller must unlink or relink */
540 qh->qh_state = state;
542 /* be sure the hardware's done with the qh before refreshing
543 * it after fault cleanup, or recovering from silicon wrongly
544 * overlaying the dummy qtd (which reduces DMA chatter).
546 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
549 qh_refresh(ehci, qh);
551 case QH_STATE_LINKED:
552 /* We won't refresh a QH that's linked (after the HC
553 * stopped the queue). That avoids a race:
554 * - HC reads first part of QH;
555 * - CPU updates that first part and the token;
556 * - HC reads rest of that QH, including token
557 * Result: HC gets an inconsistent image, and then
558 * DMAs to/from the wrong memory (corrupting it).
560 * That should be rare for interrupt transfers,
561 * except maybe high bandwidth ...
564 /* Tell the caller to start an unlink */
565 qh->needs_rescan = 1;
567 /* otherwise, unlink already started */
574 /*-------------------------------------------------------------------------*/
576 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
577 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
578 // ... and packet size, for any kind of endpoint descriptor
579 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
582 * reverse of qh_urb_transaction: free a list of TDs.
583 * used for cleanup after errors, before HC sees an URB's TDs.
585 static void qtd_list_free (
586 struct ehci_hcd *ehci,
588 struct list_head *qtd_list
590 struct list_head *entry, *temp;
592 list_for_each_safe (entry, temp, qtd_list) {
593 struct ehci_qtd *qtd;
595 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
596 list_del (&qtd->qtd_list);
597 ehci_qtd_free (ehci, qtd);
602 * create a list of filled qtds for this URB; won't link into qh.
604 static struct list_head *
606 struct ehci_hcd *ehci,
608 struct list_head *head,
611 struct ehci_qtd *qtd, *qtd_prev;
613 int len, this_sg_len, maxpacket;
617 struct scatterlist *sg;
620 * URBs map to sequences of QTDs: one logical transaction
622 qtd = ehci_qtd_alloc (ehci, flags);
625 list_add_tail (&qtd->qtd_list, head);
628 token = QTD_STS_ACTIVE;
629 token |= (EHCI_TUNE_CERR << 10);
630 /* for split transactions, SplitXState initialized to zero */
632 len = urb->transfer_buffer_length;
633 is_input = usb_pipein (urb->pipe);
634 if (usb_pipecontrol (urb->pipe)) {
636 qtd_fill(ehci, qtd, urb->setup_dma,
637 sizeof (struct usb_ctrlrequest),
638 token | (2 /* "setup" */ << 8), 8);
640 /* ... and always at least one more pid */
643 qtd = ehci_qtd_alloc (ehci, flags);
647 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
648 list_add_tail (&qtd->qtd_list, head);
650 /* for zero length DATA stages, STATUS is always IN */
652 token |= (1 /* "in" */ << 8);
656 * data transfer stage: buffer setup
658 i = urb->num_mapped_sgs;
659 if (len > 0 && i > 0) {
661 buf = sg_dma_address(sg);
663 /* urb->transfer_buffer_length may be smaller than the
664 * size of the scatterlist (or vice versa)
666 this_sg_len = min_t(int, sg_dma_len(sg), len);
669 buf = urb->transfer_dma;
674 token |= (1 /* "in" */ << 8);
675 /* else it's already initted to "out" pid (0 << 8) */
677 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
680 * buffer gets wrapped in one or more qtds;
681 * last one may be "short" (including zero len)
682 * and may serve as a control status ack
687 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
689 this_sg_len -= this_qtd_len;
694 * short reads advance to a "magic" dummy instead of the next
695 * qtd ... that forces the queue to stop, for manual cleanup.
696 * (this will usually be overridden later.)
699 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
701 /* qh makes control packets use qtd toggle; maybe switch it */
702 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
705 if (likely(this_sg_len <= 0)) {
706 if (--i <= 0 || len <= 0)
709 buf = sg_dma_address(sg);
710 this_sg_len = min_t(int, sg_dma_len(sg), len);
714 qtd = ehci_qtd_alloc (ehci, flags);
718 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
719 list_add_tail (&qtd->qtd_list, head);
723 * unless the caller requires manual cleanup after short reads,
724 * have the alt_next mechanism keep the queue running after the
725 * last data qtd (the only one, for control and most other cases).
727 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
728 || usb_pipecontrol (urb->pipe)))
729 qtd->hw_alt_next = EHCI_LIST_END(ehci);
732 * control requests may need a terminating data "status" ack;
733 * other OUT ones may need a terminating short packet
736 if (likely (urb->transfer_buffer_length != 0)) {
739 if (usb_pipecontrol (urb->pipe)) {
741 token ^= 0x0100; /* "in" <--> "out" */
742 token |= QTD_TOGGLE; /* force DATA1 */
743 } else if (usb_pipeout(urb->pipe)
744 && (urb->transfer_flags & URB_ZERO_PACKET)
745 && !(urb->transfer_buffer_length % maxpacket)) {
750 qtd = ehci_qtd_alloc (ehci, flags);
754 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
755 list_add_tail (&qtd->qtd_list, head);
757 /* never any data in such packets */
758 qtd_fill(ehci, qtd, 0, 0, token, 0);
762 /* by default, enable interrupt on urb completion */
763 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
764 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
768 qtd_list_free (ehci, urb, head);
772 /*-------------------------------------------------------------------------*/
774 // Would be best to create all qh's from config descriptors,
775 // when each interface/altsetting is established. Unlink
776 // any previous qh and cancel its urbs first; endpoints are
777 // implicitly reset then (data toggle too).
778 // That'd mean updating how usbcore talks to HCDs. (2.7?)
782 * Each QH holds a qtd list; a QH is used for everything except iso.
784 * For interrupt urbs, the scheduler must set the microframe scheduling
785 * mask(s) each time the QH gets scheduled. For highspeed, that's
786 * just one microframe in the s-mask. For split interrupt transactions
787 * there are additional complications: c-mask, maybe FSTNs.
789 static struct ehci_qh *
791 struct ehci_hcd *ehci,
795 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
796 u32 info1 = 0, info2 = 0;
799 struct usb_tt *tt = urb->dev->tt;
800 struct ehci_qh_hw *hw;
806 * init endpoint/device data for this QH
808 info1 |= usb_pipeendpoint (urb->pipe) << 8;
809 info1 |= usb_pipedevice (urb->pipe) << 0;
811 is_input = usb_pipein (urb->pipe);
812 type = usb_pipetype (urb->pipe);
813 maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
815 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
816 * acts like up to 3KB, but is built from smaller packets.
818 if (max_packet(maxp) > 1024) {
819 ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
823 /* Compute interrupt scheduling parameters just once, and save.
824 * - allowing for high bandwidth, how many nsec/uframe are used?
825 * - split transactions need a second CSPLIT uframe; same question
826 * - splits also need a schedule gap (for full/low speed I/O)
827 * - qh has a polling interval
829 * For control/bulk requests, the HC or TT handles these.
831 if (type == PIPE_INTERRUPT) {
832 qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
834 hb_mult(maxp) * max_packet(maxp)));
835 qh->start = NO_FRAME;
836 qh->stamp = ehci->periodic_stamp;
838 if (urb->dev->speed == USB_SPEED_HIGH) {
842 qh->period = urb->interval >> 3;
843 if (qh->period == 0 && urb->interval != 1) {
844 /* NOTE interval 2 or 4 uframes could work.
845 * But interval 1 scheduling is simpler, and
846 * includes high bandwidth.
849 } else if (qh->period > ehci->periodic_size) {
850 qh->period = ehci->periodic_size;
851 urb->interval = qh->period << 3;
856 /* gap is f(FS/LS transfer times) */
857 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
858 is_input, 0, maxp) / (125 * 1000);
860 /* FIXME this just approximates SPLIT/CSPLIT times */
861 if (is_input) { // SPLIT, gap, CSPLIT+DATA
862 qh->c_usecs = qh->usecs + HS_USECS (0);
863 qh->usecs = HS_USECS (1);
864 } else { // SPLIT+DATA, gap, CSPLIT
865 qh->usecs += HS_USECS (1);
866 qh->c_usecs = HS_USECS (0);
869 think_time = tt ? tt->think_time : 0;
870 qh->tt_usecs = NS_TO_US (think_time +
871 usb_calc_bus_time (urb->dev->speed,
872 is_input, 0, max_packet (maxp)));
873 qh->period = urb->interval;
874 if (qh->period > ehci->periodic_size) {
875 qh->period = ehci->periodic_size;
876 urb->interval = qh->period;
881 /* support for tt scheduling, and access to toggles */
885 switch (urb->dev->speed) {
887 info1 |= (1 << 12); /* EPS "low" */
891 /* EPS 0 means "full" */
892 if (type != PIPE_INTERRUPT)
893 info1 |= (EHCI_TUNE_RL_TT << 28);
894 if (type == PIPE_CONTROL) {
895 info1 |= (1 << 27); /* for TT */
896 info1 |= 1 << 14; /* toggle from qtd */
900 info2 |= (EHCI_TUNE_MULT_TT << 30);
902 /* Some Freescale processors have an erratum in which the
903 * port number in the queue head was 0..N-1 instead of 1..N.
905 if (ehci_has_fsl_portno_bug(ehci))
906 info2 |= (urb->dev->ttport-1) << 23;
908 info2 |= urb->dev->ttport << 23;
910 /* set the address of the TT; for TDI's integrated
911 * root hub tt, leave it zeroed.
913 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
914 info2 |= tt->hub->devnum << 16;
916 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
920 case USB_SPEED_HIGH: /* no TT involved */
921 info1 |= (2 << 12); /* EPS "high" */
922 if (type == PIPE_CONTROL) {
923 info1 |= (EHCI_TUNE_RL_HS << 28);
924 info1 |= 64 << 16; /* usb2 fixed maxpacket */
925 info1 |= 1 << 14; /* toggle from qtd */
926 info2 |= (EHCI_TUNE_MULT_HS << 30);
927 } else if (type == PIPE_BULK) {
928 info1 |= (EHCI_TUNE_RL_HS << 28);
929 /* The USB spec says that high speed bulk endpoints
930 * always use 512 byte maxpacket. But some device
931 * vendors decided to ignore that, and MSFT is happy
932 * to help them do so. So now people expect to use
933 * such nonconformant devices with Linux too; sigh.
935 info1 |= max_packet(maxp) << 16;
936 info2 |= (EHCI_TUNE_MULT_HS << 30);
937 } else { /* PIPE_INTERRUPT */
938 info1 |= max_packet (maxp) << 16;
939 info2 |= hb_mult (maxp) << 30;
943 dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
949 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
951 /* init as live, toggle clear, advance to dummy */
952 qh->qh_state = QH_STATE_IDLE;
954 hw->hw_info1 = cpu_to_hc32(ehci, info1);
955 hw->hw_info2 = cpu_to_hc32(ehci, info2);
956 qh->is_out = !is_input;
957 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
958 qh_refresh (ehci, qh);
962 /*-------------------------------------------------------------------------*/
964 /* move qh (and its qtds) onto async queue; maybe enable queue. */
966 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
968 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
969 struct ehci_qh *head;
971 /* Don't link a QH if there's a Clear-TT-Buffer pending */
972 if (unlikely(qh->clearing_tt))
975 WARN_ON(qh->qh_state != QH_STATE_IDLE);
977 /* (re)start the async schedule? */
979 timer_action_done (ehci, TIMER_ASYNC_OFF);
980 if (!head->qh_next.qh) {
981 u32 cmd = ehci_readl(ehci, &ehci->regs->command);
983 if (!(cmd & CMD_ASE)) {
984 /* in case a clear of CMD_ASE didn't take yet */
985 (void)handshake(ehci, &ehci->regs->status,
988 ehci_writel(ehci, cmd, &ehci->regs->command);
989 /* posted write need not be known to HC yet ... */
993 /* clear halt and/or toggle; and maybe recover from silicon quirk */
994 qh_refresh(ehci, qh);
996 /* splice right after start */
997 qh->qh_next = head->qh_next;
998 qh->hw->hw_next = head->hw->hw_next;
1001 head->qh_next.qh = qh;
1002 head->hw->hw_next = dma;
1006 qh->qh_state = QH_STATE_LINKED;
1007 /* qtd completions reported later by interrupt */
1010 /*-------------------------------------------------------------------------*/
1013 * For control/bulk/interrupt, return QH with these TDs appended.
1014 * Allocates and initializes the QH if necessary.
1015 * Returns null if it can't allocate a QH it needs to.
1016 * If the QH has TDs (urbs) already, that's great.
1018 static struct ehci_qh *qh_append_tds (
1019 struct ehci_hcd *ehci,
1021 struct list_head *qtd_list,
1026 struct ehci_qh *qh = NULL;
1027 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
1029 qh = (struct ehci_qh *) *ptr;
1030 if (unlikely (qh == NULL)) {
1031 /* can't sleep here, we have ehci->lock... */
1032 qh = qh_make (ehci, urb, GFP_ATOMIC);
1035 if (likely (qh != NULL)) {
1036 struct ehci_qtd *qtd;
1038 if (unlikely (list_empty (qtd_list)))
1041 qtd = list_entry (qtd_list->next, struct ehci_qtd,
1044 /* control qh may need patching ... */
1045 if (unlikely (epnum == 0)) {
1047 /* usb_reset_device() briefly reverts to address 0 */
1048 if (usb_pipedevice (urb->pipe) == 0)
1049 qh->hw->hw_info1 &= ~qh_addr_mask;
1052 /* just one way to queue requests: swap with the dummy qtd.
1053 * only hc or qh_refresh() ever modify the overlay.
1055 if (likely (qtd != NULL)) {
1056 struct ehci_qtd *dummy;
1060 /* to avoid racing the HC, use the dummy td instead of
1061 * the first td of our list (becomes new dummy). both
1062 * tds stay deactivated until we're done, when the
1063 * HC is allowed to fetch the old dummy (4.10.2).
1065 token = qtd->hw_token;
1066 qtd->hw_token = HALT_BIT(ehci);
1070 dma = dummy->qtd_dma;
1072 dummy->qtd_dma = dma;
1074 list_del (&qtd->qtd_list);
1075 list_add (&dummy->qtd_list, qtd_list);
1076 list_splice_tail(qtd_list, &qh->qtd_list);
1078 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
1081 /* hc must see the new dummy at list end */
1083 qtd = list_entry (qh->qtd_list.prev,
1084 struct ehci_qtd, qtd_list);
1085 qtd->hw_next = QTD_NEXT(ehci, dma);
1087 /* let the hc process these next qtds */
1089 dummy->hw_token = token;
1091 urb->hcpriv = qh_get (qh);
1097 /*-------------------------------------------------------------------------*/
1101 struct ehci_hcd *ehci,
1103 struct list_head *qtd_list,
1107 unsigned long flags;
1108 struct ehci_qh *qh = NULL;
1111 epnum = urb->ep->desc.bEndpointAddress;
1113 #ifdef EHCI_URB_TRACE
1115 struct ehci_qtd *qtd;
1116 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
1118 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1119 __func__, urb->dev->devpath, urb,
1120 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
1121 urb->transfer_buffer_length,
1122 qtd, urb->ep->hcpriv);
1126 spin_lock_irqsave (&ehci->lock, flags);
1127 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1131 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1135 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
1136 if (unlikely(qh == NULL)) {
1137 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1142 /* Control/bulk operations through TTs don't need scheduling,
1143 * the HC and TT handle it when the TT has a buffer ready.
1145 if (likely (qh->qh_state == QH_STATE_IDLE))
1146 qh_link_async(ehci, qh);
1148 spin_unlock_irqrestore (&ehci->lock, flags);
1149 if (unlikely (qh == NULL))
1150 qtd_list_free (ehci, urb, qtd_list);
1154 /*-------------------------------------------------------------------------*/
1156 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
1158 static void end_unlink_async (struct ehci_hcd *ehci)
1160 struct ehci_qh *qh = ehci->reclaim;
1161 struct ehci_qh *next;
1163 iaa_watchdog_done(ehci);
1165 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
1166 qh->qh_state = QH_STATE_IDLE;
1167 qh->qh_next.qh = NULL;
1168 qh_put (qh); // refcount from reclaim
1170 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1172 ehci->reclaim = next;
1175 qh_completions (ehci, qh);
1177 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
1178 qh_link_async (ehci, qh);
1180 /* it's not free to turn the async schedule on/off; leave it
1181 * active but idle for a while once it empties.
1183 if (ehci->rh_state == EHCI_RH_RUNNING
1184 && ehci->async->qh_next.qh == NULL)
1185 timer_action (ehci, TIMER_ASYNC_OFF);
1187 qh_put(qh); /* refcount from async list */
1190 ehci->reclaim = NULL;
1191 start_unlink_async (ehci, next);
1194 if (ehci->has_synopsys_hc_bug)
1195 ehci_writel(ehci, (u32) ehci->async->qh_dma,
1196 &ehci->regs->async_next);
1199 /* makes sure the async qh will become idle */
1200 /* caller must own ehci->lock */
1202 static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1204 int cmd = ehci_readl(ehci, &ehci->regs->command);
1205 struct ehci_qh *prev;
1208 assert_spin_locked(&ehci->lock);
1210 || (qh->qh_state != QH_STATE_LINKED
1211 && qh->qh_state != QH_STATE_UNLINK_WAIT)
1216 /* stop async schedule right now? */
1217 if (unlikely (qh == ehci->async)) {
1218 /* can't get here without STS_ASS set */
1219 if (ehci->rh_state != EHCI_RH_HALTED
1220 && !ehci->reclaim) {
1221 /* ... and CMD_IAAD clear */
1222 ehci_writel(ehci, cmd & ~CMD_ASE,
1223 &ehci->regs->command);
1225 // handshake later, if we need to
1226 timer_action_done (ehci, TIMER_ASYNC_OFF);
1231 qh->qh_state = QH_STATE_UNLINK;
1232 ehci->reclaim = qh = qh_get (qh);
1235 while (prev->qh_next.qh != qh)
1236 prev = prev->qh_next.qh;
1238 prev->hw->hw_next = qh->hw->hw_next;
1239 prev->qh_next = qh->qh_next;
1240 if (ehci->qh_scan_next == qh)
1241 ehci->qh_scan_next = qh->qh_next.qh;
1244 /* If the controller isn't running, we don't have to wait for it */
1245 if (unlikely(ehci->rh_state != EHCI_RH_RUNNING)) {
1246 /* if (unlikely (qh->reclaim != 0))
1247 * this will recurse, probably not much
1249 end_unlink_async (ehci);
1254 ehci_writel(ehci, cmd, &ehci->regs->command);
1255 (void)ehci_readl(ehci, &ehci->regs->command);
1256 iaa_watchdog_start(ehci);
1259 /*-------------------------------------------------------------------------*/
1261 static void scan_async (struct ehci_hcd *ehci)
1265 enum ehci_timer_action action = TIMER_IO_WATCHDOG;
1267 timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1268 stopped = (ehci->rh_state != EHCI_RH_RUNNING);
1270 ehci->qh_scan_next = ehci->async->qh_next.qh;
1271 while (ehci->qh_scan_next) {
1272 qh = ehci->qh_scan_next;
1273 ehci->qh_scan_next = qh->qh_next.qh;
1275 /* clean any finished work for this qh */
1276 if (!list_empty(&qh->qtd_list)) {
1280 * Unlinks could happen here; completion reporting
1281 * drops the lock. That's why ehci->qh_scan_next
1282 * always holds the next qh to scan; if the next qh
1283 * gets unlinked then ehci->qh_scan_next is adjusted
1284 * in start_unlink_async().
1287 temp = qh_completions(ehci, qh);
1288 if (qh->needs_rescan)
1289 unlink_async(ehci, qh);
1290 qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
1296 /* unlink idle entries, reducing DMA usage as well
1297 * as HCD schedule-scanning costs. delay for any qh
1298 * we just scanned, there's a not-unusual case that it
1299 * doesn't stay idle for long.
1300 * (plus, avoids some kind of re-activation race.)
1302 if (list_empty(&qh->qtd_list)
1303 && qh->qh_state == QH_STATE_LINKED) {
1304 if (!ehci->reclaim && (stopped ||
1305 time_after_eq(jiffies, qh->unlink_time)))
1306 start_unlink_async(ehci, qh);
1308 action = TIMER_ASYNC_SHRINK;
1311 if (action == TIMER_ASYNC_SHRINK)
1312 timer_action (ehci, TIMER_ASYNC_SHRINK);