2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
33 #define DRIVER_AUTHOR "Sarah Sharp"
34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
36 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
37 static int link_quirk;
38 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
41 /* TODO: copied from ehci-hcd.c - can this be refactored? */
43 * handshake - spin reading hc until handshake completes or fails
44 * @ptr: address of hc register to be read
45 * @mask: bits to look at in result of read
46 * @done: value of those bits when handshake succeeds
47 * @usec: timeout in microseconds
49 * Returns negative errno, or zero on success
51 * Success happens when the "mask" bits have the specified value (hardware
52 * handshake done). There are two failure modes: "usec" have passed (major
53 * hardware flakeout), or the register reads as all-ones (hardware removed).
55 int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
56 u32 mask, u32 done, int usec)
61 result = xhci_readl(xhci, ptr);
62 if (result == ~(u32)0) /* card removed */
74 * Disable interrupts and begin the xHCI halting process.
76 void xhci_quiesce(struct xhci_hcd *xhci)
83 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
87 cmd = xhci_readl(xhci, &xhci->op_regs->command);
89 xhci_writel(xhci, cmd, &xhci->op_regs->command);
93 * Force HC into halt state.
95 * Disable any IRQs and clear the run/stop bit.
96 * HC will complete any current and actively pipelined transactions, and
97 * should halt within 16 ms of the run/stop bit being cleared.
98 * Read HC Halted bit in the status register to see when the HC is finished.
100 int xhci_halt(struct xhci_hcd *xhci)
103 xhci_dbg(xhci, "// Halt the HC\n");
106 ret = handshake(xhci, &xhci->op_regs->status,
107 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
109 xhci->xhc_state |= XHCI_STATE_HALTED;
110 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
112 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
118 * Set the run bit and wait for the host to be running.
120 static int xhci_start(struct xhci_hcd *xhci)
125 temp = xhci_readl(xhci, &xhci->op_regs->command);
127 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
129 xhci_writel(xhci, temp, &xhci->op_regs->command);
132 * Wait for the HCHalted Status bit to be 0 to indicate the host is
135 ret = handshake(xhci, &xhci->op_regs->status,
136 STS_HALT, 0, XHCI_MAX_HALT_USEC);
137 if (ret == -ETIMEDOUT)
138 xhci_err(xhci, "Host took too long to start, "
139 "waited %u microseconds.\n",
142 xhci->xhc_state &= ~XHCI_STATE_HALTED;
149 * This resets pipelines, timers, counters, state machines, etc.
150 * Transactions will be terminated immediately, and operational registers
151 * will be set to their defaults.
153 int xhci_reset(struct xhci_hcd *xhci)
159 state = xhci_readl(xhci, &xhci->op_regs->status);
160 if ((state & STS_HALT) == 0) {
161 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
165 xhci_dbg(xhci, "// Reset the HC\n");
166 command = xhci_readl(xhci, &xhci->op_regs->command);
167 command |= CMD_RESET;
168 xhci_writel(xhci, command, &xhci->op_regs->command);
170 ret = handshake(xhci, &xhci->op_regs->command,
171 CMD_RESET, 0, 10 * 1000 * 1000);
175 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
177 * xHCI cannot write to any doorbells or operational registers other
178 * than status until the "Controller Not Ready" flag is cleared.
180 return handshake(xhci, &xhci->op_regs->status,
181 STS_CNR, 0, 10 * 1000 * 1000);
185 static int xhci_free_msi(struct xhci_hcd *xhci)
189 if (!xhci->msix_entries)
192 for (i = 0; i < xhci->msix_count; i++)
193 if (xhci->msix_entries[i].vector)
194 free_irq(xhci->msix_entries[i].vector,
202 static int xhci_setup_msi(struct xhci_hcd *xhci)
205 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
207 ret = pci_enable_msi(pdev);
209 xhci_dbg(xhci, "failed to allocate MSI entry\n");
213 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
214 0, "xhci_hcd", xhci_to_hcd(xhci));
216 xhci_dbg(xhci, "disable MSI interrupt\n");
217 pci_disable_msi(pdev);
225 * free all IRQs request
227 static void xhci_free_irq(struct xhci_hcd *xhci)
229 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
232 /* return if using legacy interrupt */
233 if (xhci_to_hcd(xhci)->irq >= 0)
236 ret = xhci_free_msi(xhci);
240 free_irq(pdev->irq, xhci_to_hcd(xhci));
248 static int xhci_setup_msix(struct xhci_hcd *xhci)
251 struct usb_hcd *hcd = xhci_to_hcd(xhci);
252 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
255 * calculate number of msi-x vectors supported.
256 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
257 * with max number of interrupters based on the xhci HCSPARAMS1.
258 * - num_online_cpus: maximum msi-x vectors per CPUs core.
259 * Add additional 1 vector to ensure always available interrupt.
261 xhci->msix_count = min(num_online_cpus() + 1,
262 HCS_MAX_INTRS(xhci->hcs_params1));
265 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
267 if (!xhci->msix_entries) {
268 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
272 for (i = 0; i < xhci->msix_count; i++) {
273 xhci->msix_entries[i].entry = i;
274 xhci->msix_entries[i].vector = 0;
277 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
279 xhci_dbg(xhci, "Failed to enable MSI-X\n");
283 for (i = 0; i < xhci->msix_count; i++) {
284 ret = request_irq(xhci->msix_entries[i].vector,
285 (irq_handler_t)xhci_msi_irq,
286 0, "xhci_hcd", xhci_to_hcd(xhci));
291 hcd->msix_enabled = 1;
295 xhci_dbg(xhci, "disable MSI-X interrupt\n");
297 pci_disable_msix(pdev);
299 kfree(xhci->msix_entries);
300 xhci->msix_entries = NULL;
304 /* Free any IRQs and disable MSI-X */
305 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
307 struct usb_hcd *hcd = xhci_to_hcd(xhci);
308 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
312 if (xhci->msix_entries) {
313 pci_disable_msix(pdev);
314 kfree(xhci->msix_entries);
315 xhci->msix_entries = NULL;
317 pci_disable_msi(pdev);
320 hcd->msix_enabled = 0;
324 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
328 if (xhci->msix_entries) {
329 for (i = 0; i < xhci->msix_count; i++)
330 synchronize_irq(xhci->msix_entries[i].vector);
334 static int xhci_try_enable_msi(struct usb_hcd *hcd)
336 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
337 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
341 * Some Fresco Logic host controllers advertise MSI, but fail to
342 * generate interrupts. Don't even try to enable MSI.
344 if (xhci->quirks & XHCI_BROKEN_MSI)
347 /* unregister the legacy interrupt */
349 free_irq(hcd->irq, hcd);
352 ret = xhci_setup_msix(xhci);
354 /* fall back to msi*/
355 ret = xhci_setup_msi(xhci);
358 /* hcd->irq is -1, we have MSI */
362 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
367 /* fall back to legacy interrupt*/
368 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
369 hcd->irq_descr, hcd);
371 xhci_err(xhci, "request interrupt %d failed\n",
375 hcd->irq = pdev->irq;
381 static int xhci_try_enable_msi(struct usb_hcd *hcd)
386 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
390 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
396 static void compliance_mode_recovery(unsigned long arg)
398 struct xhci_hcd *xhci;
403 xhci = (struct xhci_hcd *)arg;
405 for (i = 0; i < xhci->num_usb3_ports; i++) {
406 temp = xhci_readl(xhci, xhci->usb3_ports[i]);
407 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
409 * Compliance Mode Detected. Letting USB Core
410 * handle the Warm Reset
412 xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
414 xhci_dbg(xhci, "Attempting Recovery routine!\n");
415 hcd = xhci->shared_hcd;
417 if (hcd->state == HC_STATE_SUSPENDED)
418 usb_hcd_resume_root_hub(hcd);
420 usb_hcd_poll_rh_status(hcd);
424 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
425 mod_timer(&xhci->comp_mode_recovery_timer,
426 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
430 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
431 * that causes ports behind that hardware to enter compliance mode sometimes.
432 * The quirk creates a timer that polls every 2 seconds the link state of
433 * each host controller's port and recovers it by issuing a Warm reset
434 * if Compliance mode is detected, otherwise the port will become "dead" (no
435 * device connections or disconnections will be detected anymore). Becasue no
436 * status event is generated when entering compliance mode (per xhci spec),
437 * this quirk is needed on systems that have the failing hardware installed.
439 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
441 xhci->port_status_u0 = 0;
442 init_timer(&xhci->comp_mode_recovery_timer);
444 xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
445 xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
446 xhci->comp_mode_recovery_timer.expires = jiffies +
447 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
449 set_timer_slack(&xhci->comp_mode_recovery_timer,
450 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
451 add_timer(&xhci->comp_mode_recovery_timer);
452 xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
456 * This function identifies the systems that have installed the SN65LVPE502CP
457 * USB3.0 re-driver and that need the Compliance Mode Quirk.
459 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
461 static bool compliance_mode_recovery_timer_quirk_check(void)
463 const char *dmi_product_name, *dmi_sys_vendor;
465 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
466 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
467 if (!dmi_product_name || !dmi_sys_vendor)
470 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
473 if (strstr(dmi_product_name, "Z420") ||
474 strstr(dmi_product_name, "Z620") ||
475 strstr(dmi_product_name, "Z820") ||
476 strstr(dmi_product_name, "Z1 Workstation"))
482 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
484 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
489 * Initialize memory for HCD and xHC (one-time init).
491 * Program the PAGESIZE register, initialize the device context array, create
492 * device contexts (?), set up a command ring segment (or two?), create event
493 * ring (one for now).
495 int xhci_init(struct usb_hcd *hcd)
497 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
500 xhci_dbg(xhci, "xhci_init\n");
501 spin_lock_init(&xhci->lock);
502 if (xhci->hci_version == 0x95 && link_quirk) {
503 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
504 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
506 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
508 retval = xhci_mem_init(xhci, GFP_KERNEL);
509 xhci_dbg(xhci, "Finished xhci_init\n");
511 /* Initializing Compliance Mode Recovery Data If Needed */
512 if (compliance_mode_recovery_timer_quirk_check()) {
513 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
514 compliance_mode_recovery_timer_init(xhci);
520 /*-------------------------------------------------------------------------*/
523 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
524 static void xhci_event_ring_work(unsigned long arg)
529 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
532 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
534 spin_lock_irqsave(&xhci->lock, flags);
535 temp = xhci_readl(xhci, &xhci->op_regs->status);
536 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
537 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
538 (xhci->xhc_state & XHCI_STATE_HALTED)) {
539 xhci_dbg(xhci, "HW died, polling stopped.\n");
540 spin_unlock_irqrestore(&xhci->lock, flags);
544 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
545 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
546 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
547 xhci->error_bitmask = 0;
548 xhci_dbg(xhci, "Event ring:\n");
549 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
550 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
551 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
552 temp_64 &= ~ERST_PTR_MASK;
553 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
554 xhci_dbg(xhci, "Command ring:\n");
555 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
556 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
557 xhci_dbg_cmd_ptrs(xhci);
558 for (i = 0; i < MAX_HC_SLOTS; ++i) {
561 for (j = 0; j < 31; ++j) {
562 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
565 spin_unlock_irqrestore(&xhci->lock, flags);
568 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
570 xhci_dbg(xhci, "Quit polling the event ring.\n");
574 static int xhci_run_finished(struct xhci_hcd *xhci)
576 if (xhci_start(xhci)) {
580 xhci->shared_hcd->state = HC_STATE_RUNNING;
581 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
583 if (xhci->quirks & XHCI_NEC_HOST)
584 xhci_ring_cmd_db(xhci);
586 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
591 * Start the HC after it was halted.
593 * This function is called by the USB core when the HC driver is added.
594 * Its opposite is xhci_stop().
596 * xhci_init() must be called once before this function can be called.
597 * Reset the HC, enable device slot contexts, program DCBAAP, and
598 * set command ring pointer and event ring pointer.
600 * Setup MSI-X vectors and enable interrupts.
602 int xhci_run(struct usb_hcd *hcd)
607 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
609 /* Start the xHCI host controller running only after the USB 2.0 roothub
613 hcd->uses_new_polling = 1;
614 if (!usb_hcd_is_primary_hcd(hcd))
615 return xhci_run_finished(xhci);
617 xhci_dbg(xhci, "xhci_run\n");
619 ret = xhci_try_enable_msi(hcd);
623 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
624 init_timer(&xhci->event_ring_timer);
625 xhci->event_ring_timer.data = (unsigned long) xhci;
626 xhci->event_ring_timer.function = xhci_event_ring_work;
627 /* Poll the event ring */
628 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
630 xhci_dbg(xhci, "Setting event ring polling timer\n");
631 add_timer(&xhci->event_ring_timer);
634 xhci_dbg(xhci, "Command ring memory map follows:\n");
635 xhci_debug_ring(xhci, xhci->cmd_ring);
636 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
637 xhci_dbg_cmd_ptrs(xhci);
639 xhci_dbg(xhci, "ERST memory map follows:\n");
640 xhci_dbg_erst(xhci, &xhci->erst);
641 xhci_dbg(xhci, "Event ring:\n");
642 xhci_debug_ring(xhci, xhci->event_ring);
643 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
644 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
645 temp_64 &= ~ERST_PTR_MASK;
646 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
648 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
649 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
650 temp &= ~ER_IRQ_INTERVAL_MASK;
652 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
654 /* Set the HCD state before we enable the irqs */
655 temp = xhci_readl(xhci, &xhci->op_regs->command);
657 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
659 xhci_writel(xhci, temp, &xhci->op_regs->command);
661 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
662 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
663 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
664 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
665 &xhci->ir_set->irq_pending);
666 xhci_print_ir_set(xhci, 0);
668 if (xhci->quirks & XHCI_NEC_HOST)
669 xhci_queue_vendor_command(xhci, 0, 0, 0,
670 TRB_TYPE(TRB_NEC_GET_FW));
672 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
676 static void xhci_only_stop_hcd(struct usb_hcd *hcd)
678 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
680 spin_lock_irq(&xhci->lock);
683 /* The shared_hcd is going to be deallocated shortly (the USB core only
684 * calls this function when allocation fails in usb_add_hcd(), or
685 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
687 xhci->shared_hcd = NULL;
688 spin_unlock_irq(&xhci->lock);
694 * This function is called by the USB core when the HC driver is removed.
695 * Its opposite is xhci_run().
697 * Disable device contexts, disable IRQs, and quiesce the HC.
698 * Reset the HC, finish any completed transactions, and cleanup memory.
700 void xhci_stop(struct usb_hcd *hcd)
703 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
705 if (!usb_hcd_is_primary_hcd(hcd)) {
706 xhci_only_stop_hcd(xhci->shared_hcd);
710 spin_lock_irq(&xhci->lock);
711 /* Make sure the xHC is halted for a USB3 roothub
712 * (xhci_stop() could be called as part of failed init).
716 spin_unlock_irq(&xhci->lock);
718 xhci_cleanup_msix(xhci);
720 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
721 /* Tell the event ring poll function not to reschedule */
723 del_timer_sync(&xhci->event_ring_timer);
726 /* Deleting Compliance Mode Recovery Timer */
727 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
728 (!(xhci_all_ports_seen_u0(xhci))))
729 del_timer_sync(&xhci->comp_mode_recovery_timer);
731 if (xhci->quirks & XHCI_AMD_PLL_FIX)
734 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
735 temp = xhci_readl(xhci, &xhci->op_regs->status);
736 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
737 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
738 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
739 &xhci->ir_set->irq_pending);
740 xhci_print_ir_set(xhci, 0);
742 xhci_dbg(xhci, "cleaning up memory\n");
743 xhci_mem_cleanup(xhci);
744 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
745 xhci_readl(xhci, &xhci->op_regs->status));
749 * Shutdown HC (not bus-specific)
751 * This is called when the machine is rebooting or halting. We assume that the
752 * machine will be powered off, and the HC's internal state will be reset.
753 * Don't bother to free memory.
755 * This will only ever be called with the main usb_hcd (the USB3 roothub).
757 void xhci_shutdown(struct usb_hcd *hcd)
759 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
761 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
762 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
764 spin_lock_irq(&xhci->lock);
766 spin_unlock_irq(&xhci->lock);
768 xhci_cleanup_msix(xhci);
770 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
771 xhci_readl(xhci, &xhci->op_regs->status));
775 static void xhci_save_registers(struct xhci_hcd *xhci)
777 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
778 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
779 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
780 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
781 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
782 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
783 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
784 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
785 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
788 static void xhci_restore_registers(struct xhci_hcd *xhci)
790 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
791 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
792 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
793 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
794 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
795 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
796 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
797 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
798 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
801 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
805 /* step 2: initialize command ring buffer */
806 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
807 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
808 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
809 xhci->cmd_ring->dequeue) &
810 (u64) ~CMD_RING_RSVD_BITS) |
811 xhci->cmd_ring->cycle_state;
812 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
813 (long unsigned long) val_64);
814 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
818 * The whole command ring must be cleared to zero when we suspend the host.
820 * The host doesn't save the command ring pointer in the suspend well, so we
821 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
822 * aligned, because of the reserved bits in the command ring dequeue pointer
823 * register. Therefore, we can't just set the dequeue pointer back in the
824 * middle of the ring (TRBs are 16-byte aligned).
826 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
828 struct xhci_ring *ring;
829 struct xhci_segment *seg;
831 ring = xhci->cmd_ring;
835 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
836 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
837 cpu_to_le32(~TRB_CYCLE);
839 } while (seg != ring->deq_seg);
841 /* Reset the software enqueue and dequeue pointers */
842 ring->deq_seg = ring->first_seg;
843 ring->dequeue = ring->first_seg->trbs;
844 ring->enq_seg = ring->deq_seg;
845 ring->enqueue = ring->dequeue;
848 * Ring is now zeroed, so the HW should look for change of ownership
849 * when the cycle bit is set to 1.
851 ring->cycle_state = 1;
854 * Reset the hardware dequeue pointer.
855 * Yes, this will need to be re-written after resume, but we're paranoid
856 * and want to make sure the hardware doesn't access bogus memory
857 * because, say, the BIOS or an SMI started the host without changing
858 * the command ring pointers.
860 xhci_set_cmd_ring_deq(xhci);
864 * Stop HC (not bus-specific)
866 * This is called when the machine transition into S3/S4 mode.
869 int xhci_suspend(struct xhci_hcd *xhci)
872 struct usb_hcd *hcd = xhci_to_hcd(xhci);
875 /* Don't poll the roothubs on bus suspend. */
876 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
877 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
878 del_timer_sync(&hcd->rh_timer);
880 spin_lock_irq(&xhci->lock);
881 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
882 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
883 /* step 1: stop endpoint */
884 /* skipped assuming that port suspend has done */
886 /* step 2: clear Run/Stop bit */
887 command = xhci_readl(xhci, &xhci->op_regs->command);
889 xhci_writel(xhci, command, &xhci->op_regs->command);
890 if (handshake(xhci, &xhci->op_regs->status,
891 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
892 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
893 spin_unlock_irq(&xhci->lock);
896 xhci_clear_command_ring(xhci);
898 /* step 3: save registers */
899 xhci_save_registers(xhci);
901 /* step 4: set CSS flag */
902 command = xhci_readl(xhci, &xhci->op_regs->command);
904 xhci_writel(xhci, command, &xhci->op_regs->command);
905 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
906 xhci_warn(xhci, "WARN: xHC save state timeout\n");
907 spin_unlock_irq(&xhci->lock);
910 spin_unlock_irq(&xhci->lock);
913 * Deleting Compliance Mode Recovery Timer because the xHCI Host
914 * is about to be suspended.
916 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
917 (!(xhci_all_ports_seen_u0(xhci)))) {
918 del_timer_sync(&xhci->comp_mode_recovery_timer);
919 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
922 /* step 5: remove core well power */
923 /* synchronize irq when using MSI-X */
924 xhci_msix_sync_irqs(xhci);
930 * start xHC (not bus-specific)
932 * This is called when the machine transition from S3/S4 mode.
935 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
937 u32 command, temp = 0;
938 struct usb_hcd *hcd = xhci_to_hcd(xhci);
939 struct usb_hcd *secondary_hcd;
942 /* Wait a bit if either of the roothubs need to settle from the
943 * transition into bus suspend.
945 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
947 xhci->bus_state[1].next_statechange))
950 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
951 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
953 spin_lock_irq(&xhci->lock);
954 if (xhci->quirks & XHCI_RESET_ON_RESUME)
958 /* step 1: restore register */
959 xhci_restore_registers(xhci);
960 /* step 2: initialize command ring buffer */
961 xhci_set_cmd_ring_deq(xhci);
962 /* step 3: restore state and start state*/
963 /* step 3: set CRS flag */
964 command = xhci_readl(xhci, &xhci->op_regs->command);
966 xhci_writel(xhci, command, &xhci->op_regs->command);
967 if (handshake(xhci, &xhci->op_regs->status,
968 STS_RESTORE, 0, 10 * 1000)) {
969 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
970 spin_unlock_irq(&xhci->lock);
973 temp = xhci_readl(xhci, &xhci->op_regs->status);
976 /* If restore operation fails, re-initialize the HC during resume */
977 if ((temp & STS_SRE) || hibernated) {
978 /* Let the USB core know _both_ roothubs lost power. */
979 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
980 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
982 xhci_dbg(xhci, "Stop HCD\n");
985 spin_unlock_irq(&xhci->lock);
986 xhci_cleanup_msix(xhci);
988 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
989 /* Tell the event ring poll function not to reschedule */
991 del_timer_sync(&xhci->event_ring_timer);
994 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
995 temp = xhci_readl(xhci, &xhci->op_regs->status);
996 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
997 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
998 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
999 &xhci->ir_set->irq_pending);
1000 xhci_print_ir_set(xhci, 0);
1002 xhci_dbg(xhci, "cleaning up memory\n");
1003 xhci_mem_cleanup(xhci);
1004 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1005 xhci_readl(xhci, &xhci->op_regs->status));
1007 /* USB core calls the PCI reinit and start functions twice:
1008 * first with the primary HCD, and then with the secondary HCD.
1009 * If we don't do the same, the host will never be started.
1011 if (!usb_hcd_is_primary_hcd(hcd))
1012 secondary_hcd = hcd;
1014 secondary_hcd = xhci->shared_hcd;
1016 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1017 retval = xhci_init(hcd->primary_hcd);
1020 xhci_dbg(xhci, "Start the primary HCD\n");
1021 retval = xhci_run(hcd->primary_hcd);
1023 xhci_dbg(xhci, "Start the secondary HCD\n");
1024 retval = xhci_run(secondary_hcd);
1026 hcd->state = HC_STATE_SUSPENDED;
1027 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1031 /* step 4: set Run/Stop bit */
1032 command = xhci_readl(xhci, &xhci->op_regs->command);
1034 xhci_writel(xhci, command, &xhci->op_regs->command);
1035 handshake(xhci, &xhci->op_regs->status, STS_HALT,
1038 /* step 5: walk topology and initialize portsc,
1039 * portpmsc and portli
1041 /* this is done in bus_resume */
1043 /* step 6: restart each of the previously
1044 * Running endpoints by ringing their doorbells
1047 spin_unlock_irq(&xhci->lock);
1051 usb_hcd_resume_root_hub(hcd);
1052 usb_hcd_resume_root_hub(xhci->shared_hcd);
1056 * If system is subject to the Quirk, Compliance Mode Timer needs to
1057 * be re-initialized Always after a system resume. Ports are subject
1058 * to suffer the Compliance Mode issue again. It doesn't matter if
1059 * ports have entered previously to U0 before system's suspension.
1061 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
1062 compliance_mode_recovery_timer_init(xhci);
1064 /* Re-enable port polling. */
1065 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1066 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1067 usb_hcd_poll_rh_status(hcd);
1071 #endif /* CONFIG_PM */
1073 /*-------------------------------------------------------------------------*/
1076 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1077 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1078 * value to right shift 1 for the bitmask.
1080 * Index = (epnum * 2) + direction - 1,
1081 * where direction = 0 for OUT, 1 for IN.
1082 * For control endpoints, the IN index is used (OUT index is unused), so
1083 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1085 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1088 if (usb_endpoint_xfer_control(desc))
1089 index = (unsigned int) (usb_endpoint_num(desc)*2);
1091 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1092 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1096 /* Find the flag for this endpoint (for use in the control context). Use the
1097 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1100 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1102 return 1 << (xhci_get_endpoint_index(desc) + 1);
1105 /* Find the flag for this endpoint (for use in the control context). Use the
1106 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1109 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1111 return 1 << (ep_index + 1);
1114 /* Compute the last valid endpoint context index. Basically, this is the
1115 * endpoint index plus one. For slot contexts with more than valid endpoint,
1116 * we find the most significant bit set in the added contexts flags.
1117 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1118 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1120 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1122 return fls(added_ctxs) - 1;
1125 /* Returns 1 if the arguments are OK;
1126 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1128 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1129 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1131 struct xhci_hcd *xhci;
1132 struct xhci_virt_device *virt_dev;
1134 if (!hcd || (check_ep && !ep) || !udev) {
1135 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1139 if (!udev->parent) {
1140 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1145 xhci = hcd_to_xhci(hcd);
1146 if (xhci->xhc_state & XHCI_STATE_HALTED)
1149 if (check_virt_dev) {
1150 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1151 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1156 virt_dev = xhci->devs[udev->slot_id];
1157 if (virt_dev->udev != udev) {
1158 printk(KERN_DEBUG "xHCI %s called with udev and "
1159 "virt_dev does not match\n", func);
1167 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1168 struct usb_device *udev, struct xhci_command *command,
1169 bool ctx_change, bool must_succeed);
1172 * Full speed devices may have a max packet size greater than 8 bytes, but the
1173 * USB core doesn't know that until it reads the first 8 bytes of the
1174 * descriptor. If the usb_device's max packet size changes after that point,
1175 * we need to issue an evaluate context command and wait on it.
1177 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1178 unsigned int ep_index, struct urb *urb)
1180 struct xhci_container_ctx *in_ctx;
1181 struct xhci_container_ctx *out_ctx;
1182 struct xhci_input_control_ctx *ctrl_ctx;
1183 struct xhci_ep_ctx *ep_ctx;
1184 int max_packet_size;
1185 int hw_max_packet_size;
1188 out_ctx = xhci->devs[slot_id]->out_ctx;
1189 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1190 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1191 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1192 if (hw_max_packet_size != max_packet_size) {
1193 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1194 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1196 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1197 hw_max_packet_size);
1198 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1200 /* Set up the modified control endpoint 0 */
1201 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1202 xhci->devs[slot_id]->out_ctx, ep_index);
1203 in_ctx = xhci->devs[slot_id]->in_ctx;
1204 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1205 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1206 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1208 /* Set up the input context flags for the command */
1209 /* FIXME: This won't work if a non-default control endpoint
1210 * changes max packet sizes.
1212 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1213 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1214 ctrl_ctx->drop_flags = 0;
1216 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1217 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1218 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1219 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1221 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1224 /* Clean up the input context for later use by bandwidth
1227 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1233 * non-error returns are a promise to giveback() the urb later
1234 * we drop ownership so next owner (or urb unlink) can get it
1236 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1238 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1239 struct xhci_td *buffer;
1240 unsigned long flags;
1242 unsigned int slot_id, ep_index;
1243 struct urb_priv *urb_priv;
1246 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1247 true, true, __func__) <= 0)
1250 slot_id = urb->dev->slot_id;
1251 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1253 if (!HCD_HW_ACCESSIBLE(hcd)) {
1254 if (!in_interrupt())
1255 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1260 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1261 size = urb->number_of_packets;
1265 urb_priv = kzalloc(sizeof(struct urb_priv) +
1266 size * sizeof(struct xhci_td *), mem_flags);
1270 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1276 for (i = 0; i < size; i++) {
1277 urb_priv->td[i] = buffer;
1281 urb_priv->length = size;
1282 urb_priv->td_cnt = 0;
1283 urb->hcpriv = urb_priv;
1285 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1286 /* Check to see if the max packet size for the default control
1287 * endpoint changed during FS device enumeration
1289 if (urb->dev->speed == USB_SPEED_FULL) {
1290 ret = xhci_check_maxpacket(xhci, slot_id,
1293 xhci_urb_free_priv(xhci, urb_priv);
1299 /* We have a spinlock and interrupts disabled, so we must pass
1300 * atomic context to this function, which may allocate memory.
1302 spin_lock_irqsave(&xhci->lock, flags);
1303 if (xhci->xhc_state & XHCI_STATE_DYING)
1305 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1309 spin_unlock_irqrestore(&xhci->lock, flags);
1310 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1311 spin_lock_irqsave(&xhci->lock, flags);
1312 if (xhci->xhc_state & XHCI_STATE_DYING)
1314 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1315 EP_GETTING_STREAMS) {
1316 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1317 "is transitioning to using streams.\n");
1319 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1320 EP_GETTING_NO_STREAMS) {
1321 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1322 "is transitioning to "
1323 "not having streams.\n");
1326 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1331 spin_unlock_irqrestore(&xhci->lock, flags);
1332 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1333 spin_lock_irqsave(&xhci->lock, flags);
1334 if (xhci->xhc_state & XHCI_STATE_DYING)
1336 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1340 spin_unlock_irqrestore(&xhci->lock, flags);
1342 spin_lock_irqsave(&xhci->lock, flags);
1343 if (xhci->xhc_state & XHCI_STATE_DYING)
1345 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1349 spin_unlock_irqrestore(&xhci->lock, flags);
1354 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1355 "non-responsive xHCI host.\n",
1356 urb->ep->desc.bEndpointAddress, urb);
1359 xhci_urb_free_priv(xhci, urb_priv);
1361 spin_unlock_irqrestore(&xhci->lock, flags);
1365 /* Get the right ring for the given URB.
1366 * If the endpoint supports streams, boundary check the URB's stream ID.
1367 * If the endpoint doesn't support streams, return the singular endpoint ring.
1369 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1372 unsigned int slot_id;
1373 unsigned int ep_index;
1374 unsigned int stream_id;
1375 struct xhci_virt_ep *ep;
1377 slot_id = urb->dev->slot_id;
1378 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1379 stream_id = urb->stream_id;
1380 ep = &xhci->devs[slot_id]->eps[ep_index];
1381 /* Common case: no streams */
1382 if (!(ep->ep_state & EP_HAS_STREAMS))
1385 if (stream_id == 0) {
1387 "WARN: Slot ID %u, ep index %u has streams, "
1388 "but URB has no stream ID.\n",
1393 if (stream_id < ep->stream_info->num_streams)
1394 return ep->stream_info->stream_rings[stream_id];
1397 "WARN: Slot ID %u, ep index %u has "
1398 "stream IDs 1 to %u allocated, "
1399 "but stream ID %u is requested.\n",
1401 ep->stream_info->num_streams - 1,
1407 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1408 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1409 * should pick up where it left off in the TD, unless a Set Transfer Ring
1410 * Dequeue Pointer is issued.
1412 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1413 * the ring. Since the ring is a contiguous structure, they can't be physically
1414 * removed. Instead, there are two options:
1416 * 1) If the HC is in the middle of processing the URB to be canceled, we
1417 * simply move the ring's dequeue pointer past those TRBs using the Set
1418 * Transfer Ring Dequeue Pointer command. This will be the common case,
1419 * when drivers timeout on the last submitted URB and attempt to cancel.
1421 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1422 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1423 * HC will need to invalidate the any TRBs it has cached after the stop
1424 * endpoint command, as noted in the xHCI 0.95 errata.
1426 * 3) The TD may have completed by the time the Stop Endpoint Command
1427 * completes, so software needs to handle that case too.
1429 * This function should protect against the TD enqueueing code ringing the
1430 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1431 * It also needs to account for multiple cancellations on happening at the same
1432 * time for the same endpoint.
1434 * Note that this function can be called in any context, or so says
1435 * usb_hcd_unlink_urb()
1437 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1439 unsigned long flags;
1442 struct xhci_hcd *xhci;
1443 struct urb_priv *urb_priv;
1445 unsigned int ep_index;
1446 struct xhci_ring *ep_ring;
1447 struct xhci_virt_ep *ep;
1449 xhci = hcd_to_xhci(hcd);
1450 spin_lock_irqsave(&xhci->lock, flags);
1451 /* Make sure the URB hasn't completed or been unlinked already */
1452 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1453 if (ret || !urb->hcpriv)
1455 temp = xhci_readl(xhci, &xhci->op_regs->status);
1456 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1457 xhci_dbg(xhci, "HW died, freeing TD.\n");
1458 urb_priv = urb->hcpriv;
1459 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1460 td = urb_priv->td[i];
1461 if (!list_empty(&td->td_list))
1462 list_del_init(&td->td_list);
1463 if (!list_empty(&td->cancelled_td_list))
1464 list_del_init(&td->cancelled_td_list);
1467 usb_hcd_unlink_urb_from_ep(hcd, urb);
1468 spin_unlock_irqrestore(&xhci->lock, flags);
1469 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1470 xhci_urb_free_priv(xhci, urb_priv);
1473 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1474 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1475 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1476 "non-responsive xHCI host.\n",
1477 urb->ep->desc.bEndpointAddress, urb);
1478 /* Let the stop endpoint command watchdog timer (which set this
1479 * state) finish cleaning up the endpoint TD lists. We must
1480 * have caught it in the middle of dropping a lock and giving
1486 xhci_dbg(xhci, "Cancel URB %p\n", urb);
1487 xhci_dbg(xhci, "Event ring:\n");
1488 xhci_debug_ring(xhci, xhci->event_ring);
1489 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1490 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1491 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1497 xhci_dbg(xhci, "Endpoint ring:\n");
1498 xhci_debug_ring(xhci, ep_ring);
1500 urb_priv = urb->hcpriv;
1502 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1503 td = urb_priv->td[i];
1504 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1507 /* Queue a stop endpoint command, but only if this is
1508 * the first cancellation to be handled.
1510 if (!(ep->ep_state & EP_HALT_PENDING)) {
1511 ep->ep_state |= EP_HALT_PENDING;
1512 ep->stop_cmds_pending++;
1513 ep->stop_cmd_timer.expires = jiffies +
1514 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1515 add_timer(&ep->stop_cmd_timer);
1516 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1517 xhci_ring_cmd_db(xhci);
1520 spin_unlock_irqrestore(&xhci->lock, flags);
1524 /* Drop an endpoint from a new bandwidth configuration for this device.
1525 * Only one call to this function is allowed per endpoint before
1526 * check_bandwidth() or reset_bandwidth() must be called.
1527 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1528 * add the endpoint to the schedule with possibly new parameters denoted by a
1529 * different endpoint descriptor in usb_host_endpoint.
1530 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1533 * The USB core will not allow URBs to be queued to an endpoint that is being
1534 * disabled, so there's no need for mutual exclusion to protect
1535 * the xhci->devs[slot_id] structure.
1537 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1538 struct usb_host_endpoint *ep)
1540 struct xhci_hcd *xhci;
1541 struct xhci_container_ctx *in_ctx, *out_ctx;
1542 struct xhci_input_control_ctx *ctrl_ctx;
1543 struct xhci_slot_ctx *slot_ctx;
1544 unsigned int last_ctx;
1545 unsigned int ep_index;
1546 struct xhci_ep_ctx *ep_ctx;
1548 u32 new_add_flags, new_drop_flags, new_slot_info;
1551 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1554 xhci = hcd_to_xhci(hcd);
1555 if (xhci->xhc_state & XHCI_STATE_DYING)
1558 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1559 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1560 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1561 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1562 __func__, drop_flag);
1566 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1567 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1568 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1569 ep_index = xhci_get_endpoint_index(&ep->desc);
1570 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1571 /* If the HC already knows the endpoint is disabled,
1572 * or the HCD has noted it is disabled, ignore this request
1574 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1575 cpu_to_le32(EP_STATE_DISABLED)) ||
1576 le32_to_cpu(ctrl_ctx->drop_flags) &
1577 xhci_get_endpoint_flag(&ep->desc)) {
1578 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1583 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1584 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1586 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1587 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1589 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1590 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1591 /* Update the last valid endpoint context, if we deleted the last one */
1592 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1593 LAST_CTX(last_ctx)) {
1594 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1595 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1597 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1599 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1601 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1602 (unsigned int) ep->desc.bEndpointAddress,
1604 (unsigned int) new_drop_flags,
1605 (unsigned int) new_add_flags,
1606 (unsigned int) new_slot_info);
1610 /* Add an endpoint to a new possible bandwidth configuration for this device.
1611 * Only one call to this function is allowed per endpoint before
1612 * check_bandwidth() or reset_bandwidth() must be called.
1613 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1614 * add the endpoint to the schedule with possibly new parameters denoted by a
1615 * different endpoint descriptor in usb_host_endpoint.
1616 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1619 * The USB core will not allow URBs to be queued to an endpoint until the
1620 * configuration or alt setting is installed in the device, so there's no need
1621 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1623 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1624 struct usb_host_endpoint *ep)
1626 struct xhci_hcd *xhci;
1627 struct xhci_container_ctx *in_ctx, *out_ctx;
1628 unsigned int ep_index;
1629 struct xhci_ep_ctx *ep_ctx;
1630 struct xhci_slot_ctx *slot_ctx;
1631 struct xhci_input_control_ctx *ctrl_ctx;
1633 unsigned int last_ctx;
1634 u32 new_add_flags, new_drop_flags, new_slot_info;
1635 struct xhci_virt_device *virt_dev;
1638 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1640 /* So we won't queue a reset ep command for a root hub */
1644 xhci = hcd_to_xhci(hcd);
1645 if (xhci->xhc_state & XHCI_STATE_DYING)
1648 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1649 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1650 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1651 /* FIXME when we have to issue an evaluate endpoint command to
1652 * deal with ep0 max packet size changing once we get the
1655 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1656 __func__, added_ctxs);
1660 virt_dev = xhci->devs[udev->slot_id];
1661 in_ctx = virt_dev->in_ctx;
1662 out_ctx = virt_dev->out_ctx;
1663 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1664 ep_index = xhci_get_endpoint_index(&ep->desc);
1665 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1667 /* If this endpoint is already in use, and the upper layers are trying
1668 * to add it again without dropping it, reject the addition.
1670 if (virt_dev->eps[ep_index].ring &&
1671 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1672 xhci_get_endpoint_flag(&ep->desc))) {
1673 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1674 "without dropping it.\n",
1675 (unsigned int) ep->desc.bEndpointAddress);
1679 /* If the HCD has already noted the endpoint is enabled,
1680 * ignore this request.
1682 if (le32_to_cpu(ctrl_ctx->add_flags) &
1683 xhci_get_endpoint_flag(&ep->desc)) {
1684 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1690 * Configuration and alternate setting changes must be done in
1691 * process context, not interrupt context (or so documenation
1692 * for usb_set_interface() and usb_set_configuration() claim).
1694 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1695 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1696 __func__, ep->desc.bEndpointAddress);
1700 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1701 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1703 /* If xhci_endpoint_disable() was called for this endpoint, but the
1704 * xHC hasn't been notified yet through the check_bandwidth() call,
1705 * this re-adds a new state for the endpoint from the new endpoint
1706 * descriptors. We must drop and re-add this endpoint, so we leave the
1709 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1711 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1712 /* Update the last valid endpoint context, if we just added one past */
1713 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1714 LAST_CTX(last_ctx)) {
1715 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1716 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1718 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1720 /* Store the usb_device pointer for later use */
1723 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1724 (unsigned int) ep->desc.bEndpointAddress,
1726 (unsigned int) new_drop_flags,
1727 (unsigned int) new_add_flags,
1728 (unsigned int) new_slot_info);
1732 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1734 struct xhci_input_control_ctx *ctrl_ctx;
1735 struct xhci_ep_ctx *ep_ctx;
1736 struct xhci_slot_ctx *slot_ctx;
1739 /* When a device's add flag and drop flag are zero, any subsequent
1740 * configure endpoint command will leave that endpoint's state
1741 * untouched. Make sure we don't leave any old state in the input
1742 * endpoint contexts.
1744 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1745 ctrl_ctx->drop_flags = 0;
1746 ctrl_ctx->add_flags = 0;
1747 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1748 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1749 /* Endpoint 0 is always valid */
1750 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1751 for (i = 1; i < 31; ++i) {
1752 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1753 ep_ctx->ep_info = 0;
1754 ep_ctx->ep_info2 = 0;
1756 ep_ctx->tx_info = 0;
1760 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1761 struct usb_device *udev, u32 *cmd_status)
1765 switch (*cmd_status) {
1767 dev_warn(&udev->dev, "Not enough host controller resources "
1768 "for new device state.\n");
1770 /* FIXME: can we allocate more resources for the HC? */
1773 case COMP_2ND_BW_ERR:
1774 dev_warn(&udev->dev, "Not enough bandwidth "
1775 "for new device state.\n");
1777 /* FIXME: can we go back to the old state? */
1780 /* the HCD set up something wrong */
1781 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1783 "and endpoint is not disabled.\n");
1787 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1788 "configure command.\n");
1792 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1796 xhci_err(xhci, "ERROR: unexpected command completion "
1797 "code 0x%x.\n", *cmd_status);
1804 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1805 struct usb_device *udev, u32 *cmd_status)
1808 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1810 switch (*cmd_status) {
1812 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1813 "context command.\n");
1817 dev_warn(&udev->dev, "WARN: slot not enabled for"
1818 "evaluate context command.\n");
1819 case COMP_CTX_STATE:
1820 dev_warn(&udev->dev, "WARN: invalid context state for "
1821 "evaluate context command.\n");
1822 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1826 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1827 "context command.\n");
1831 /* Max Exit Latency too large error */
1832 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1836 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1840 xhci_err(xhci, "ERROR: unexpected command completion "
1841 "code 0x%x.\n", *cmd_status);
1848 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1849 struct xhci_container_ctx *in_ctx)
1851 struct xhci_input_control_ctx *ctrl_ctx;
1852 u32 valid_add_flags;
1853 u32 valid_drop_flags;
1855 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1856 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1857 * (bit 1). The default control endpoint is added during the Address
1858 * Device command and is never removed until the slot is disabled.
1860 valid_add_flags = ctrl_ctx->add_flags >> 2;
1861 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1863 /* Use hweight32 to count the number of ones in the add flags, or
1864 * number of endpoints added. Don't count endpoints that are changed
1865 * (both added and dropped).
1867 return hweight32(valid_add_flags) -
1868 hweight32(valid_add_flags & valid_drop_flags);
1871 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1872 struct xhci_container_ctx *in_ctx)
1874 struct xhci_input_control_ctx *ctrl_ctx;
1875 u32 valid_add_flags;
1876 u32 valid_drop_flags;
1878 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1879 valid_add_flags = ctrl_ctx->add_flags >> 2;
1880 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1882 return hweight32(valid_drop_flags) -
1883 hweight32(valid_add_flags & valid_drop_flags);
1887 * We need to reserve the new number of endpoints before the configure endpoint
1888 * command completes. We can't subtract the dropped endpoints from the number
1889 * of active endpoints until the command completes because we can oversubscribe
1890 * the host in this case:
1892 * - the first configure endpoint command drops more endpoints than it adds
1893 * - a second configure endpoint command that adds more endpoints is queued
1894 * - the first configure endpoint command fails, so the config is unchanged
1895 * - the second command may succeed, even though there isn't enough resources
1897 * Must be called with xhci->lock held.
1899 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1900 struct xhci_container_ctx *in_ctx)
1904 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1905 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1906 xhci_dbg(xhci, "Not enough ep ctxs: "
1907 "%u active, need to add %u, limit is %u.\n",
1908 xhci->num_active_eps, added_eps,
1909 xhci->limit_active_eps);
1912 xhci->num_active_eps += added_eps;
1913 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1914 xhci->num_active_eps);
1919 * The configure endpoint was failed by the xHC for some other reason, so we
1920 * need to revert the resources that failed configuration would have used.
1922 * Must be called with xhci->lock held.
1924 static void xhci_free_host_resources(struct xhci_hcd *xhci,
1925 struct xhci_container_ctx *in_ctx)
1929 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1930 xhci->num_active_eps -= num_failed_eps;
1931 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1933 xhci->num_active_eps);
1937 * Now that the command has completed, clean up the active endpoint count by
1938 * subtracting out the endpoints that were dropped (but not changed).
1940 * Must be called with xhci->lock held.
1942 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1943 struct xhci_container_ctx *in_ctx)
1945 u32 num_dropped_eps;
1947 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1948 xhci->num_active_eps -= num_dropped_eps;
1949 if (num_dropped_eps)
1950 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1952 xhci->num_active_eps);
1955 unsigned int xhci_get_block_size(struct usb_device *udev)
1957 switch (udev->speed) {
1959 case USB_SPEED_FULL:
1961 case USB_SPEED_HIGH:
1963 case USB_SPEED_SUPER:
1965 case USB_SPEED_UNKNOWN:
1966 case USB_SPEED_WIRELESS:
1968 /* Should never happen */
1973 unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1975 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1977 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1982 /* If we are changing a LS/FS device under a HS hub,
1983 * make sure (if we are activating a new TT) that the HS bus has enough
1984 * bandwidth for this new TT.
1986 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1987 struct xhci_virt_device *virt_dev,
1990 struct xhci_interval_bw_table *bw_table;
1991 struct xhci_tt_bw_info *tt_info;
1993 /* Find the bandwidth table for the root port this TT is attached to. */
1994 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1995 tt_info = virt_dev->tt_info;
1996 /* If this TT already had active endpoints, the bandwidth for this TT
1997 * has already been added. Removing all periodic endpoints (and thus
1998 * making the TT enactive) will only decrease the bandwidth used.
2002 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2003 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2007 /* Not sure why we would have no new active endpoints...
2009 * Maybe because of an Evaluate Context change for a hub update or a
2010 * control endpoint 0 max packet size change?
2011 * FIXME: skip the bandwidth calculation in that case.
2016 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2017 struct xhci_virt_device *virt_dev)
2019 unsigned int bw_reserved;
2021 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2022 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2025 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2026 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2033 * This algorithm is a very conservative estimate of the worst-case scheduling
2034 * scenario for any one interval. The hardware dynamically schedules the
2035 * packets, so we can't tell which microframe could be the limiting factor in
2036 * the bandwidth scheduling. This only takes into account periodic endpoints.
2038 * Obviously, we can't solve an NP complete problem to find the minimum worst
2039 * case scenario. Instead, we come up with an estimate that is no less than
2040 * the worst case bandwidth used for any one microframe, but may be an
2043 * We walk the requirements for each endpoint by interval, starting with the
2044 * smallest interval, and place packets in the schedule where there is only one
2045 * possible way to schedule packets for that interval. In order to simplify
2046 * this algorithm, we record the largest max packet size for each interval, and
2047 * assume all packets will be that size.
2049 * For interval 0, we obviously must schedule all packets for each interval.
2050 * The bandwidth for interval 0 is just the amount of data to be transmitted
2051 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2052 * the number of packets).
2054 * For interval 1, we have two possible microframes to schedule those packets
2055 * in. For this algorithm, if we can schedule the same number of packets for
2056 * each possible scheduling opportunity (each microframe), we will do so. The
2057 * remaining number of packets will be saved to be transmitted in the gaps in
2058 * the next interval's scheduling sequence.
2060 * As we move those remaining packets to be scheduled with interval 2 packets,
2061 * we have to double the number of remaining packets to transmit. This is
2062 * because the intervals are actually powers of 2, and we would be transmitting
2063 * the previous interval's packets twice in this interval. We also have to be
2064 * sure that when we look at the largest max packet size for this interval, we
2065 * also look at the largest max packet size for the remaining packets and take
2066 * the greater of the two.
2068 * The algorithm continues to evenly distribute packets in each scheduling
2069 * opportunity, and push the remaining packets out, until we get to the last
2070 * interval. Then those packets and their associated overhead are just added
2071 * to the bandwidth used.
2073 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2074 struct xhci_virt_device *virt_dev,
2077 unsigned int bw_reserved;
2078 unsigned int max_bandwidth;
2079 unsigned int bw_used;
2080 unsigned int block_size;
2081 struct xhci_interval_bw_table *bw_table;
2082 unsigned int packet_size = 0;
2083 unsigned int overhead = 0;
2084 unsigned int packets_transmitted = 0;
2085 unsigned int packets_remaining = 0;
2088 if (virt_dev->udev->speed == USB_SPEED_SUPER)
2089 return xhci_check_ss_bw(xhci, virt_dev);
2091 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2092 max_bandwidth = HS_BW_LIMIT;
2093 /* Convert percent of bus BW reserved to blocks reserved */
2094 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2096 max_bandwidth = FS_BW_LIMIT;
2097 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2100 bw_table = virt_dev->bw_table;
2101 /* We need to translate the max packet size and max ESIT payloads into
2102 * the units the hardware uses.
2104 block_size = xhci_get_block_size(virt_dev->udev);
2106 /* If we are manipulating a LS/FS device under a HS hub, double check
2107 * that the HS bus has enough bandwidth if we are activing a new TT.
2109 if (virt_dev->tt_info) {
2110 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2111 virt_dev->real_port);
2112 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2113 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2114 "newly activated TT.\n");
2117 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
2118 virt_dev->tt_info->slot_id,
2119 virt_dev->tt_info->ttport);
2121 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2122 virt_dev->real_port);
2125 /* Add in how much bandwidth will be used for interval zero, or the
2126 * rounded max ESIT payload + number of packets * largest overhead.
2128 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2129 bw_table->interval_bw[0].num_packets *
2130 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2132 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2133 unsigned int bw_added;
2134 unsigned int largest_mps;
2135 unsigned int interval_overhead;
2138 * How many packets could we transmit in this interval?
2139 * If packets didn't fit in the previous interval, we will need
2140 * to transmit that many packets twice within this interval.
2142 packets_remaining = 2 * packets_remaining +
2143 bw_table->interval_bw[i].num_packets;
2145 /* Find the largest max packet size of this or the previous
2148 if (list_empty(&bw_table->interval_bw[i].endpoints))
2151 struct xhci_virt_ep *virt_ep;
2152 struct list_head *ep_entry;
2154 ep_entry = bw_table->interval_bw[i].endpoints.next;
2155 virt_ep = list_entry(ep_entry,
2156 struct xhci_virt_ep, bw_endpoint_list);
2157 /* Convert to blocks, rounding up */
2158 largest_mps = DIV_ROUND_UP(
2159 virt_ep->bw_info.max_packet_size,
2162 if (largest_mps > packet_size)
2163 packet_size = largest_mps;
2165 /* Use the larger overhead of this or the previous interval. */
2166 interval_overhead = xhci_get_largest_overhead(
2167 &bw_table->interval_bw[i]);
2168 if (interval_overhead > overhead)
2169 overhead = interval_overhead;
2171 /* How many packets can we evenly distribute across
2172 * (1 << (i + 1)) possible scheduling opportunities?
2174 packets_transmitted = packets_remaining >> (i + 1);
2176 /* Add in the bandwidth used for those scheduled packets */
2177 bw_added = packets_transmitted * (overhead + packet_size);
2179 /* How many packets do we have remaining to transmit? */
2180 packets_remaining = packets_remaining % (1 << (i + 1));
2182 /* What largest max packet size should those packets have? */
2183 /* If we've transmitted all packets, don't carry over the
2184 * largest packet size.
2186 if (packets_remaining == 0) {
2189 } else if (packets_transmitted > 0) {
2190 /* Otherwise if we do have remaining packets, and we've
2191 * scheduled some packets in this interval, take the
2192 * largest max packet size from endpoints with this
2195 packet_size = largest_mps;
2196 overhead = interval_overhead;
2198 /* Otherwise carry over packet_size and overhead from the last
2199 * time we had a remainder.
2201 bw_used += bw_added;
2202 if (bw_used > max_bandwidth) {
2203 xhci_warn(xhci, "Not enough bandwidth. "
2204 "Proposed: %u, Max: %u\n",
2205 bw_used, max_bandwidth);
2210 * Ok, we know we have some packets left over after even-handedly
2211 * scheduling interval 15. We don't know which microframes they will
2212 * fit into, so we over-schedule and say they will be scheduled every
2215 if (packets_remaining > 0)
2216 bw_used += overhead + packet_size;
2218 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2219 unsigned int port_index = virt_dev->real_port - 1;
2221 /* OK, we're manipulating a HS device attached to a
2222 * root port bandwidth domain. Include the number of active TTs
2223 * in the bandwidth used.
2225 bw_used += TT_HS_OVERHEAD *
2226 xhci->rh_bw[port_index].num_active_tts;
2229 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2230 "Available: %u " "percent\n",
2231 bw_used, max_bandwidth, bw_reserved,
2232 (max_bandwidth - bw_used - bw_reserved) * 100 /
2235 bw_used += bw_reserved;
2236 if (bw_used > max_bandwidth) {
2237 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2238 bw_used, max_bandwidth);
2242 bw_table->bw_used = bw_used;
2246 static bool xhci_is_async_ep(unsigned int ep_type)
2248 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2249 ep_type != ISOC_IN_EP &&
2250 ep_type != INT_IN_EP);
2253 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2255 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2258 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2260 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2262 if (ep_bw->ep_interval == 0)
2263 return SS_OVERHEAD_BURST +
2264 (ep_bw->mult * ep_bw->num_packets *
2265 (SS_OVERHEAD + mps));
2266 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2267 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2268 1 << ep_bw->ep_interval);
2272 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2273 struct xhci_bw_info *ep_bw,
2274 struct xhci_interval_bw_table *bw_table,
2275 struct usb_device *udev,
2276 struct xhci_virt_ep *virt_ep,
2277 struct xhci_tt_bw_info *tt_info)
2279 struct xhci_interval_bw *interval_bw;
2280 int normalized_interval;
2282 if (xhci_is_async_ep(ep_bw->type))
2285 if (udev->speed == USB_SPEED_SUPER) {
2286 if (xhci_is_sync_in_ep(ep_bw->type))
2287 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2288 xhci_get_ss_bw_consumed(ep_bw);
2290 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2291 xhci_get_ss_bw_consumed(ep_bw);
2295 /* SuperSpeed endpoints never get added to intervals in the table, so
2296 * this check is only valid for HS/FS/LS devices.
2298 if (list_empty(&virt_ep->bw_endpoint_list))
2300 /* For LS/FS devices, we need to translate the interval expressed in
2301 * microframes to frames.
2303 if (udev->speed == USB_SPEED_HIGH)
2304 normalized_interval = ep_bw->ep_interval;
2306 normalized_interval = ep_bw->ep_interval - 3;
2308 if (normalized_interval == 0)
2309 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2310 interval_bw = &bw_table->interval_bw[normalized_interval];
2311 interval_bw->num_packets -= ep_bw->num_packets;
2312 switch (udev->speed) {
2314 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2316 case USB_SPEED_FULL:
2317 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2319 case USB_SPEED_HIGH:
2320 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2322 case USB_SPEED_SUPER:
2323 case USB_SPEED_UNKNOWN:
2324 case USB_SPEED_WIRELESS:
2325 /* Should never happen because only LS/FS/HS endpoints will get
2326 * added to the endpoint list.
2331 tt_info->active_eps -= 1;
2332 list_del_init(&virt_ep->bw_endpoint_list);
2335 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2336 struct xhci_bw_info *ep_bw,
2337 struct xhci_interval_bw_table *bw_table,
2338 struct usb_device *udev,
2339 struct xhci_virt_ep *virt_ep,
2340 struct xhci_tt_bw_info *tt_info)
2342 struct xhci_interval_bw *interval_bw;
2343 struct xhci_virt_ep *smaller_ep;
2344 int normalized_interval;
2346 if (xhci_is_async_ep(ep_bw->type))
2349 if (udev->speed == USB_SPEED_SUPER) {
2350 if (xhci_is_sync_in_ep(ep_bw->type))
2351 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2352 xhci_get_ss_bw_consumed(ep_bw);
2354 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2355 xhci_get_ss_bw_consumed(ep_bw);
2359 /* For LS/FS devices, we need to translate the interval expressed in
2360 * microframes to frames.
2362 if (udev->speed == USB_SPEED_HIGH)
2363 normalized_interval = ep_bw->ep_interval;
2365 normalized_interval = ep_bw->ep_interval - 3;
2367 if (normalized_interval == 0)
2368 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2369 interval_bw = &bw_table->interval_bw[normalized_interval];
2370 interval_bw->num_packets += ep_bw->num_packets;
2371 switch (udev->speed) {
2373 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2375 case USB_SPEED_FULL:
2376 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2378 case USB_SPEED_HIGH:
2379 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2381 case USB_SPEED_SUPER:
2382 case USB_SPEED_UNKNOWN:
2383 case USB_SPEED_WIRELESS:
2384 /* Should never happen because only LS/FS/HS endpoints will get
2385 * added to the endpoint list.
2391 tt_info->active_eps += 1;
2392 /* Insert the endpoint into the list, largest max packet size first. */
2393 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2395 if (ep_bw->max_packet_size >=
2396 smaller_ep->bw_info.max_packet_size) {
2397 /* Add the new ep before the smaller endpoint */
2398 list_add_tail(&virt_ep->bw_endpoint_list,
2399 &smaller_ep->bw_endpoint_list);
2403 /* Add the new endpoint at the end of the list. */
2404 list_add_tail(&virt_ep->bw_endpoint_list,
2405 &interval_bw->endpoints);
2408 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2409 struct xhci_virt_device *virt_dev,
2412 struct xhci_root_port_bw_info *rh_bw_info;
2413 if (!virt_dev->tt_info)
2416 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2417 if (old_active_eps == 0 &&
2418 virt_dev->tt_info->active_eps != 0) {
2419 rh_bw_info->num_active_tts += 1;
2420 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2421 } else if (old_active_eps != 0 &&
2422 virt_dev->tt_info->active_eps == 0) {
2423 rh_bw_info->num_active_tts -= 1;
2424 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2428 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2429 struct xhci_virt_device *virt_dev,
2430 struct xhci_container_ctx *in_ctx)
2432 struct xhci_bw_info ep_bw_info[31];
2434 struct xhci_input_control_ctx *ctrl_ctx;
2435 int old_active_eps = 0;
2437 if (virt_dev->tt_info)
2438 old_active_eps = virt_dev->tt_info->active_eps;
2440 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2442 for (i = 0; i < 31; i++) {
2443 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2446 /* Make a copy of the BW info in case we need to revert this */
2447 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2448 sizeof(ep_bw_info[i]));
2449 /* Drop the endpoint from the interval table if the endpoint is
2450 * being dropped or changed.
2452 if (EP_IS_DROPPED(ctrl_ctx, i))
2453 xhci_drop_ep_from_interval_table(xhci,
2454 &virt_dev->eps[i].bw_info,
2460 /* Overwrite the information stored in the endpoints' bw_info */
2461 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2462 for (i = 0; i < 31; i++) {
2463 /* Add any changed or added endpoints to the interval table */
2464 if (EP_IS_ADDED(ctrl_ctx, i))
2465 xhci_add_ep_to_interval_table(xhci,
2466 &virt_dev->eps[i].bw_info,
2473 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2474 /* Ok, this fits in the bandwidth we have.
2475 * Update the number of active TTs.
2477 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2481 /* We don't have enough bandwidth for this, revert the stored info. */
2482 for (i = 0; i < 31; i++) {
2483 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2486 /* Drop the new copies of any added or changed endpoints from
2487 * the interval table.
2489 if (EP_IS_ADDED(ctrl_ctx, i)) {
2490 xhci_drop_ep_from_interval_table(xhci,
2491 &virt_dev->eps[i].bw_info,
2497 /* Revert the endpoint back to its old information */
2498 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2499 sizeof(ep_bw_info[i]));
2500 /* Add any changed or dropped endpoints back into the table */
2501 if (EP_IS_DROPPED(ctrl_ctx, i))
2502 xhci_add_ep_to_interval_table(xhci,
2503 &virt_dev->eps[i].bw_info,
2513 /* Issue a configure endpoint command or evaluate context command
2514 * and wait for it to finish.
2516 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2517 struct usb_device *udev,
2518 struct xhci_command *command,
2519 bool ctx_change, bool must_succeed)
2523 unsigned long flags;
2524 struct xhci_container_ctx *in_ctx;
2525 struct completion *cmd_completion;
2527 struct xhci_virt_device *virt_dev;
2528 union xhci_trb *cmd_trb;
2530 spin_lock_irqsave(&xhci->lock, flags);
2531 virt_dev = xhci->devs[udev->slot_id];
2534 in_ctx = command->in_ctx;
2536 in_ctx = virt_dev->in_ctx;
2538 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2539 xhci_reserve_host_resources(xhci, in_ctx)) {
2540 spin_unlock_irqrestore(&xhci->lock, flags);
2541 xhci_warn(xhci, "Not enough host resources, "
2542 "active endpoint contexts = %u\n",
2543 xhci->num_active_eps);
2546 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2547 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2548 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2549 xhci_free_host_resources(xhci, in_ctx);
2550 spin_unlock_irqrestore(&xhci->lock, flags);
2551 xhci_warn(xhci, "Not enough bandwidth\n");
2556 cmd_completion = command->completion;
2557 cmd_status = &command->status;
2558 command->command_trb = xhci->cmd_ring->enqueue;
2560 /* Enqueue pointer can be left pointing to the link TRB,
2561 * we must handle that
2563 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2564 command->command_trb =
2565 xhci->cmd_ring->enq_seg->next->trbs;
2567 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2569 cmd_completion = &virt_dev->cmd_completion;
2570 cmd_status = &virt_dev->cmd_status;
2572 init_completion(cmd_completion);
2574 cmd_trb = xhci->cmd_ring->dequeue;
2576 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2577 udev->slot_id, must_succeed);
2579 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2583 list_del(&command->cmd_list);
2584 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2585 xhci_free_host_resources(xhci, in_ctx);
2586 spin_unlock_irqrestore(&xhci->lock, flags);
2587 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2590 xhci_ring_cmd_db(xhci);
2591 spin_unlock_irqrestore(&xhci->lock, flags);
2593 /* Wait for the configure endpoint command to complete */
2594 timeleft = wait_for_completion_interruptible_timeout(
2596 XHCI_CMD_DEFAULT_TIMEOUT);
2597 if (timeleft <= 0) {
2598 xhci_warn(xhci, "%s while waiting for %s command\n",
2599 timeleft == 0 ? "Timeout" : "Signal",
2601 "configure endpoint" :
2602 "evaluate context");
2603 /* cancel the configure endpoint command */
2604 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2611 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2613 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2615 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2616 spin_lock_irqsave(&xhci->lock, flags);
2617 /* If the command failed, remove the reserved resources.
2618 * Otherwise, clean up the estimate to include dropped eps.
2621 xhci_free_host_resources(xhci, in_ctx);
2623 xhci_finish_resource_reservation(xhci, in_ctx);
2624 spin_unlock_irqrestore(&xhci->lock, flags);
2629 /* Called after one or more calls to xhci_add_endpoint() or
2630 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2631 * to call xhci_reset_bandwidth().
2633 * Since we are in the middle of changing either configuration or
2634 * installing a new alt setting, the USB core won't allow URBs to be
2635 * enqueued for any endpoint on the old config or interface. Nothing
2636 * else should be touching the xhci->devs[slot_id] structure, so we
2637 * don't need to take the xhci->lock for manipulating that.
2639 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2643 struct xhci_hcd *xhci;
2644 struct xhci_virt_device *virt_dev;
2645 struct xhci_input_control_ctx *ctrl_ctx;
2646 struct xhci_slot_ctx *slot_ctx;
2648 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2651 xhci = hcd_to_xhci(hcd);
2652 if (xhci->xhc_state & XHCI_STATE_DYING)
2655 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2656 virt_dev = xhci->devs[udev->slot_id];
2658 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2659 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2660 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2661 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2662 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2664 /* Don't issue the command if there's no endpoints to update. */
2665 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2666 ctrl_ctx->drop_flags == 0)
2669 xhci_dbg(xhci, "New Input Control Context:\n");
2670 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2671 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2672 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2674 ret = xhci_configure_endpoint(xhci, udev, NULL,
2677 /* Callee should call reset_bandwidth() */
2681 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2682 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2683 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2685 /* Free any rings that were dropped, but not changed. */
2686 for (i = 1; i < 31; ++i) {
2687 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2688 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2689 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2691 xhci_zero_in_ctx(xhci, virt_dev);
2693 * Install any rings for completely new endpoints or changed endpoints,
2694 * and free or cache any old rings from changed endpoints.
2696 for (i = 1; i < 31; ++i) {
2697 if (!virt_dev->eps[i].new_ring)
2699 /* Only cache or free the old ring if it exists.
2700 * It may not if this is the first add of an endpoint.
2702 if (virt_dev->eps[i].ring) {
2703 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2705 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2706 virt_dev->eps[i].new_ring = NULL;
2712 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2714 struct xhci_hcd *xhci;
2715 struct xhci_virt_device *virt_dev;
2718 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2721 xhci = hcd_to_xhci(hcd);
2723 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2724 virt_dev = xhci->devs[udev->slot_id];
2725 /* Free any rings allocated for added endpoints */
2726 for (i = 0; i < 31; ++i) {
2727 if (virt_dev->eps[i].new_ring) {
2728 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2729 virt_dev->eps[i].new_ring = NULL;
2732 xhci_zero_in_ctx(xhci, virt_dev);
2735 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2736 struct xhci_container_ctx *in_ctx,
2737 struct xhci_container_ctx *out_ctx,
2738 u32 add_flags, u32 drop_flags)
2740 struct xhci_input_control_ctx *ctrl_ctx;
2741 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2742 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2743 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2744 xhci_slot_copy(xhci, in_ctx, out_ctx);
2745 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2747 xhci_dbg(xhci, "Input Context:\n");
2748 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2751 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2752 unsigned int slot_id, unsigned int ep_index,
2753 struct xhci_dequeue_state *deq_state)
2755 struct xhci_container_ctx *in_ctx;
2756 struct xhci_ep_ctx *ep_ctx;
2760 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2761 xhci->devs[slot_id]->out_ctx, ep_index);
2762 in_ctx = xhci->devs[slot_id]->in_ctx;
2763 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2764 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2765 deq_state->new_deq_ptr);
2767 xhci_warn(xhci, "WARN Cannot submit config ep after "
2768 "reset ep command\n");
2769 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2770 deq_state->new_deq_seg,
2771 deq_state->new_deq_ptr);
2774 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2776 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2777 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2778 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2781 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2782 struct usb_device *udev, unsigned int ep_index)
2784 struct xhci_dequeue_state deq_state;
2785 struct xhci_virt_ep *ep;
2787 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2788 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2789 /* We need to move the HW's dequeue pointer past this TD,
2790 * or it will attempt to resend it on the next doorbell ring.
2792 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2793 ep_index, ep->stopped_stream, ep->stopped_td,
2796 /* HW with the reset endpoint quirk will use the saved dequeue state to
2797 * issue a configure endpoint command later.
2799 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2800 xhci_dbg(xhci, "Queueing new dequeue state\n");
2801 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2802 ep_index, ep->stopped_stream, &deq_state);
2804 /* Better hope no one uses the input context between now and the
2805 * reset endpoint completion!
2806 * XXX: No idea how this hardware will react when stream rings
2809 xhci_dbg(xhci, "Setting up input context for "
2810 "configure endpoint command\n");
2811 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2812 ep_index, &deq_state);
2816 /* Deal with stalled endpoints. The core should have sent the control message
2817 * to clear the halt condition. However, we need to make the xHCI hardware
2818 * reset its sequence number, since a device will expect a sequence number of
2819 * zero after the halt condition is cleared.
2820 * Context: in_interrupt
2822 void xhci_endpoint_reset(struct usb_hcd *hcd,
2823 struct usb_host_endpoint *ep)
2825 struct xhci_hcd *xhci;
2826 struct usb_device *udev;
2827 unsigned int ep_index;
2828 unsigned long flags;
2830 struct xhci_virt_ep *virt_ep;
2832 xhci = hcd_to_xhci(hcd);
2833 udev = (struct usb_device *) ep->hcpriv;
2834 /* Called with a root hub endpoint (or an endpoint that wasn't added
2835 * with xhci_add_endpoint()
2839 ep_index = xhci_get_endpoint_index(&ep->desc);
2840 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2841 if (!virt_ep->stopped_td) {
2842 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2843 ep->desc.bEndpointAddress);
2846 if (usb_endpoint_xfer_control(&ep->desc)) {
2847 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2851 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2852 spin_lock_irqsave(&xhci->lock, flags);
2853 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2855 * Can't change the ring dequeue pointer until it's transitioned to the
2856 * stopped state, which is only upon a successful reset endpoint
2857 * command. Better hope that last command worked!
2860 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2861 kfree(virt_ep->stopped_td);
2862 xhci_ring_cmd_db(xhci);
2864 virt_ep->stopped_td = NULL;
2865 virt_ep->stopped_trb = NULL;
2866 virt_ep->stopped_stream = 0;
2867 spin_unlock_irqrestore(&xhci->lock, flags);
2870 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2873 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2874 struct usb_device *udev, struct usb_host_endpoint *ep,
2875 unsigned int slot_id)
2878 unsigned int ep_index;
2879 unsigned int ep_state;
2883 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2886 if (ep->ss_ep_comp.bmAttributes == 0) {
2887 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2888 " descriptor for ep 0x%x does not support streams\n",
2889 ep->desc.bEndpointAddress);
2893 ep_index = xhci_get_endpoint_index(&ep->desc);
2894 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2895 if (ep_state & EP_HAS_STREAMS ||
2896 ep_state & EP_GETTING_STREAMS) {
2897 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2898 "already has streams set up.\n",
2899 ep->desc.bEndpointAddress);
2900 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2901 "dynamic stream context array reallocation.\n");
2904 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2905 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2906 "endpoint 0x%x; URBs are pending.\n",
2907 ep->desc.bEndpointAddress);
2913 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2914 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2916 unsigned int max_streams;
2918 /* The stream context array size must be a power of two */
2919 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2921 * Find out how many primary stream array entries the host controller
2922 * supports. Later we may use secondary stream arrays (similar to 2nd
2923 * level page entries), but that's an optional feature for xHCI host
2924 * controllers. xHCs must support at least 4 stream IDs.
2926 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2927 if (*num_stream_ctxs > max_streams) {
2928 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2930 *num_stream_ctxs = max_streams;
2931 *num_streams = max_streams;
2935 /* Returns an error code if one of the endpoint already has streams.
2936 * This does not change any data structures, it only checks and gathers
2939 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2940 struct usb_device *udev,
2941 struct usb_host_endpoint **eps, unsigned int num_eps,
2942 unsigned int *num_streams, u32 *changed_ep_bitmask)
2944 unsigned int max_streams;
2945 unsigned int endpoint_flag;
2949 for (i = 0; i < num_eps; i++) {
2950 ret = xhci_check_streams_endpoint(xhci, udev,
2951 eps[i], udev->slot_id);
2955 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2956 if (max_streams < (*num_streams - 1)) {
2957 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2958 eps[i]->desc.bEndpointAddress,
2960 *num_streams = max_streams+1;
2963 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2964 if (*changed_ep_bitmask & endpoint_flag)
2966 *changed_ep_bitmask |= endpoint_flag;
2971 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2972 struct usb_device *udev,
2973 struct usb_host_endpoint **eps, unsigned int num_eps)
2975 u32 changed_ep_bitmask = 0;
2976 unsigned int slot_id;
2977 unsigned int ep_index;
2978 unsigned int ep_state;
2981 slot_id = udev->slot_id;
2982 if (!xhci->devs[slot_id])
2985 for (i = 0; i < num_eps; i++) {
2986 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2987 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2988 /* Are streams already being freed for the endpoint? */
2989 if (ep_state & EP_GETTING_NO_STREAMS) {
2990 xhci_warn(xhci, "WARN Can't disable streams for "
2992 "streams are being disabled already.",
2993 eps[i]->desc.bEndpointAddress);
2996 /* Are there actually any streams to free? */
2997 if (!(ep_state & EP_HAS_STREAMS) &&
2998 !(ep_state & EP_GETTING_STREAMS)) {
2999 xhci_warn(xhci, "WARN Can't disable streams for "
3001 "streams are already disabled!",
3002 eps[i]->desc.bEndpointAddress);
3003 xhci_warn(xhci, "WARN xhci_free_streams() called "
3004 "with non-streams endpoint\n");
3007 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3009 return changed_ep_bitmask;
3013 * The USB device drivers use this function (though the HCD interface in USB
3014 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3015 * coordinate mass storage command queueing across multiple endpoints (basically
3016 * a stream ID == a task ID).
3018 * Setting up streams involves allocating the same size stream context array
3019 * for each endpoint and issuing a configure endpoint command for all endpoints.
3021 * Don't allow the call to succeed if one endpoint only supports one stream
3022 * (which means it doesn't support streams at all).
3024 * Drivers may get less stream IDs than they asked for, if the host controller
3025 * hardware or endpoints claim they can't support the number of requested
3028 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3029 struct usb_host_endpoint **eps, unsigned int num_eps,
3030 unsigned int num_streams, gfp_t mem_flags)
3033 struct xhci_hcd *xhci;
3034 struct xhci_virt_device *vdev;
3035 struct xhci_command *config_cmd;
3036 unsigned int ep_index;
3037 unsigned int num_stream_ctxs;
3038 unsigned long flags;
3039 u32 changed_ep_bitmask = 0;
3044 /* Add one to the number of streams requested to account for
3045 * stream 0 that is reserved for xHCI usage.
3048 xhci = hcd_to_xhci(hcd);
3049 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3052 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3054 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3058 /* Check to make sure all endpoints are not already configured for
3059 * streams. While we're at it, find the maximum number of streams that
3060 * all the endpoints will support and check for duplicate endpoints.
3062 spin_lock_irqsave(&xhci->lock, flags);
3063 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3064 num_eps, &num_streams, &changed_ep_bitmask);
3066 xhci_free_command(xhci, config_cmd);
3067 spin_unlock_irqrestore(&xhci->lock, flags);
3070 if (num_streams <= 1) {
3071 xhci_warn(xhci, "WARN: endpoints can't handle "
3072 "more than one stream.\n");
3073 xhci_free_command(xhci, config_cmd);
3074 spin_unlock_irqrestore(&xhci->lock, flags);
3077 vdev = xhci->devs[udev->slot_id];
3078 /* Mark each endpoint as being in transition, so
3079 * xhci_urb_enqueue() will reject all URBs.
3081 for (i = 0; i < num_eps; i++) {
3082 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3083 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3085 spin_unlock_irqrestore(&xhci->lock, flags);
3087 /* Setup internal data structures and allocate HW data structures for
3088 * streams (but don't install the HW structures in the input context
3089 * until we're sure all memory allocation succeeded).
3091 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3092 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3093 num_stream_ctxs, num_streams);
3095 for (i = 0; i < num_eps; i++) {
3096 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3097 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3099 num_streams, mem_flags);
3100 if (!vdev->eps[ep_index].stream_info)
3102 /* Set maxPstreams in endpoint context and update deq ptr to
3103 * point to stream context array. FIXME
3107 /* Set up the input context for a configure endpoint command. */
3108 for (i = 0; i < num_eps; i++) {
3109 struct xhci_ep_ctx *ep_ctx;
3111 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3112 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3114 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3115 vdev->out_ctx, ep_index);
3116 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3117 vdev->eps[ep_index].stream_info);
3119 /* Tell the HW to drop its old copy of the endpoint context info
3120 * and add the updated copy from the input context.
3122 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3123 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3125 /* Issue and wait for the configure endpoint command */
3126 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3129 /* xHC rejected the configure endpoint command for some reason, so we
3130 * leave the old ring intact and free our internal streams data
3136 spin_lock_irqsave(&xhci->lock, flags);
3137 for (i = 0; i < num_eps; i++) {
3138 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3139 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3140 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3141 udev->slot_id, ep_index);
3142 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3144 xhci_free_command(xhci, config_cmd);
3145 spin_unlock_irqrestore(&xhci->lock, flags);
3147 /* Subtract 1 for stream 0, which drivers can't use */
3148 return num_streams - 1;
3151 /* If it didn't work, free the streams! */
3152 for (i = 0; i < num_eps; i++) {
3153 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3154 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3155 vdev->eps[ep_index].stream_info = NULL;
3156 /* FIXME Unset maxPstreams in endpoint context and
3157 * update deq ptr to point to normal string ring.
3159 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3160 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3161 xhci_endpoint_zero(xhci, vdev, eps[i]);
3163 xhci_free_command(xhci, config_cmd);
3167 /* Transition the endpoint from using streams to being a "normal" endpoint
3170 * Modify the endpoint context state, submit a configure endpoint command,
3171 * and free all endpoint rings for streams if that completes successfully.
3173 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3174 struct usb_host_endpoint **eps, unsigned int num_eps,
3178 struct xhci_hcd *xhci;
3179 struct xhci_virt_device *vdev;
3180 struct xhci_command *command;
3181 unsigned int ep_index;
3182 unsigned long flags;
3183 u32 changed_ep_bitmask;
3185 xhci = hcd_to_xhci(hcd);
3186 vdev = xhci->devs[udev->slot_id];
3188 /* Set up a configure endpoint command to remove the streams rings */
3189 spin_lock_irqsave(&xhci->lock, flags);
3190 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3191 udev, eps, num_eps);
3192 if (changed_ep_bitmask == 0) {
3193 spin_unlock_irqrestore(&xhci->lock, flags);
3197 /* Use the xhci_command structure from the first endpoint. We may have
3198 * allocated too many, but the driver may call xhci_free_streams() for
3199 * each endpoint it grouped into one call to xhci_alloc_streams().
3201 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3202 command = vdev->eps[ep_index].stream_info->free_streams_command;
3203 for (i = 0; i < num_eps; i++) {
3204 struct xhci_ep_ctx *ep_ctx;
3206 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3207 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3208 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3209 EP_GETTING_NO_STREAMS;
3211 xhci_endpoint_copy(xhci, command->in_ctx,
3212 vdev->out_ctx, ep_index);
3213 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3214 &vdev->eps[ep_index]);
3216 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3217 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3218 spin_unlock_irqrestore(&xhci->lock, flags);
3220 /* Issue and wait for the configure endpoint command,
3221 * which must succeed.
3223 ret = xhci_configure_endpoint(xhci, udev, command,
3226 /* xHC rejected the configure endpoint command for some reason, so we
3227 * leave the streams rings intact.
3232 spin_lock_irqsave(&xhci->lock, flags);
3233 for (i = 0; i < num_eps; i++) {
3234 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3235 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3236 vdev->eps[ep_index].stream_info = NULL;
3237 /* FIXME Unset maxPstreams in endpoint context and
3238 * update deq ptr to point to normal string ring.
3240 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3241 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3243 spin_unlock_irqrestore(&xhci->lock, flags);
3249 * Deletes endpoint resources for endpoints that were active before a Reset
3250 * Device command, or a Disable Slot command. The Reset Device command leaves
3251 * the control endpoint intact, whereas the Disable Slot command deletes it.
3253 * Must be called with xhci->lock held.
3255 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3256 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3259 unsigned int num_dropped_eps = 0;
3260 unsigned int drop_flags = 0;
3262 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3263 if (virt_dev->eps[i].ring) {
3264 drop_flags |= 1 << i;
3268 xhci->num_active_eps -= num_dropped_eps;
3269 if (num_dropped_eps)
3270 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3272 num_dropped_eps, drop_flags,
3273 xhci->num_active_eps);
3277 * This submits a Reset Device Command, which will set the device state to 0,
3278 * set the device address to 0, and disable all the endpoints except the default
3279 * control endpoint. The USB core should come back and call
3280 * xhci_address_device(), and then re-set up the configuration. If this is
3281 * called because of a usb_reset_and_verify_device(), then the old alternate
3282 * settings will be re-installed through the normal bandwidth allocation
3285 * Wait for the Reset Device command to finish. Remove all structures
3286 * associated with the endpoints that were disabled. Clear the input device
3287 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3289 * If the virt_dev to be reset does not exist or does not match the udev,
3290 * it means the device is lost, possibly due to the xHC restore error and
3291 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3292 * re-allocate the device.
3294 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3297 unsigned long flags;
3298 struct xhci_hcd *xhci;
3299 unsigned int slot_id;
3300 struct xhci_virt_device *virt_dev;
3301 struct xhci_command *reset_device_cmd;
3303 int last_freed_endpoint;
3304 struct xhci_slot_ctx *slot_ctx;
3305 int old_active_eps = 0;
3307 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3310 xhci = hcd_to_xhci(hcd);
3311 slot_id = udev->slot_id;
3312 virt_dev = xhci->devs[slot_id];
3314 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3315 "not exist. Re-allocate the device\n", slot_id);
3316 ret = xhci_alloc_dev(hcd, udev);
3323 if (virt_dev->udev != udev) {
3324 /* If the virt_dev and the udev does not match, this virt_dev
3325 * may belong to another udev.
3326 * Re-allocate the device.
3328 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3329 "not match the udev. Re-allocate the device\n",
3331 ret = xhci_alloc_dev(hcd, udev);
3338 /* If device is not setup, there is no point in resetting it */
3339 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3340 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3341 SLOT_STATE_DISABLED)
3344 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3345 /* Allocate the command structure that holds the struct completion.
3346 * Assume we're in process context, since the normal device reset
3347 * process has to wait for the device anyway. Storage devices are
3348 * reset as part of error handling, so use GFP_NOIO instead of
3351 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3352 if (!reset_device_cmd) {
3353 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3357 /* Attempt to submit the Reset Device command to the command ring */
3358 spin_lock_irqsave(&xhci->lock, flags);
3359 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3361 /* Enqueue pointer can be left pointing to the link TRB,
3362 * we must handle that
3364 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3365 reset_device_cmd->command_trb =
3366 xhci->cmd_ring->enq_seg->next->trbs;
3368 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3369 ret = xhci_queue_reset_device(xhci, slot_id);
3371 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3372 list_del(&reset_device_cmd->cmd_list);
3373 spin_unlock_irqrestore(&xhci->lock, flags);
3374 goto command_cleanup;
3376 xhci_ring_cmd_db(xhci);
3377 spin_unlock_irqrestore(&xhci->lock, flags);
3379 /* Wait for the Reset Device command to finish */
3380 timeleft = wait_for_completion_interruptible_timeout(
3381 reset_device_cmd->completion,
3382 USB_CTRL_SET_TIMEOUT);
3383 if (timeleft <= 0) {
3384 xhci_warn(xhci, "%s while waiting for reset device command\n",
3385 timeleft == 0 ? "Timeout" : "Signal");
3386 spin_lock_irqsave(&xhci->lock, flags);
3387 /* The timeout might have raced with the event ring handler, so
3388 * only delete from the list if the item isn't poisoned.
3390 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3391 list_del(&reset_device_cmd->cmd_list);
3392 spin_unlock_irqrestore(&xhci->lock, flags);
3394 goto command_cleanup;
3397 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3398 * unless we tried to reset a slot ID that wasn't enabled,
3399 * or the device wasn't in the addressed or configured state.
3401 ret = reset_device_cmd->status;
3403 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3404 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3405 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3407 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3408 xhci_info(xhci, "Not freeing device rings.\n");
3409 /* Don't treat this as an error. May change my mind later. */
3411 goto command_cleanup;
3413 xhci_dbg(xhci, "Successful reset device command.\n");
3416 if (xhci_is_vendor_info_code(xhci, ret))
3418 xhci_warn(xhci, "Unknown completion code %u for "
3419 "reset device command.\n", ret);
3421 goto command_cleanup;
3424 /* Free up host controller endpoint resources */
3425 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3426 spin_lock_irqsave(&xhci->lock, flags);
3427 /* Don't delete the default control endpoint resources */
3428 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3429 spin_unlock_irqrestore(&xhci->lock, flags);
3432 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3433 last_freed_endpoint = 1;
3434 for (i = 1; i < 31; ++i) {
3435 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3437 if (ep->ep_state & EP_HAS_STREAMS) {
3438 xhci_free_stream_info(xhci, ep->stream_info);
3439 ep->stream_info = NULL;
3440 ep->ep_state &= ~EP_HAS_STREAMS;
3444 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3445 last_freed_endpoint = i;
3447 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3448 xhci_drop_ep_from_interval_table(xhci,
3449 &virt_dev->eps[i].bw_info,
3454 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3456 /* If necessary, update the number of active TTs on this root port */
3457 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3459 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3460 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3464 xhci_free_command(xhci, reset_device_cmd);
3469 * At this point, the struct usb_device is about to go away, the device has
3470 * disconnected, and all traffic has been stopped and the endpoints have been
3471 * disabled. Free any HC data structures associated with that device.
3473 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3475 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3476 struct xhci_virt_device *virt_dev;
3477 unsigned long flags;
3481 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3482 /* If the host is halted due to driver unload, we still need to free the
3485 if (ret <= 0 && ret != -ENODEV)
3488 virt_dev = xhci->devs[udev->slot_id];
3490 /* Stop any wayward timer functions (which may grab the lock) */
3491 for (i = 0; i < 31; ++i) {
3492 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3493 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3496 if (udev->usb2_hw_lpm_enabled) {
3497 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3498 udev->usb2_hw_lpm_enabled = 0;
3501 spin_lock_irqsave(&xhci->lock, flags);
3502 /* Don't disable the slot if the host controller is dead. */
3503 state = xhci_readl(xhci, &xhci->op_regs->status);
3504 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3505 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3506 xhci_free_virt_device(xhci, udev->slot_id);
3507 spin_unlock_irqrestore(&xhci->lock, flags);
3511 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3512 spin_unlock_irqrestore(&xhci->lock, flags);
3513 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3516 xhci_ring_cmd_db(xhci);
3517 spin_unlock_irqrestore(&xhci->lock, flags);
3519 * Event command completion handler will free any data structures
3520 * associated with the slot. XXX Can free sleep?
3525 * Checks if we have enough host controller resources for the default control
3528 * Must be called with xhci->lock held.
3530 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3532 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3533 xhci_dbg(xhci, "Not enough ep ctxs: "
3534 "%u active, need to add 1, limit is %u.\n",
3535 xhci->num_active_eps, xhci->limit_active_eps);
3538 xhci->num_active_eps += 1;
3539 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3540 xhci->num_active_eps);
3546 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3547 * timed out, or allocating memory failed. Returns 1 on success.
3549 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3551 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3552 unsigned long flags;
3555 union xhci_trb *cmd_trb;
3557 spin_lock_irqsave(&xhci->lock, flags);
3558 cmd_trb = xhci->cmd_ring->dequeue;
3559 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3561 spin_unlock_irqrestore(&xhci->lock, flags);
3562 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3565 xhci_ring_cmd_db(xhci);
3566 spin_unlock_irqrestore(&xhci->lock, flags);
3568 /* XXX: how much time for xHC slot assignment? */
3569 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3570 XHCI_CMD_DEFAULT_TIMEOUT);
3571 if (timeleft <= 0) {
3572 xhci_warn(xhci, "%s while waiting for a slot\n",
3573 timeleft == 0 ? "Timeout" : "Signal");
3574 /* cancel the enable slot request */
3575 return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3578 if (!xhci->slot_id) {
3579 xhci_err(xhci, "Error while assigning device slot ID\n");
3583 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3584 spin_lock_irqsave(&xhci->lock, flags);
3585 ret = xhci_reserve_host_control_ep_resources(xhci);
3587 spin_unlock_irqrestore(&xhci->lock, flags);
3588 xhci_warn(xhci, "Not enough host resources, "
3589 "active endpoint contexts = %u\n",
3590 xhci->num_active_eps);
3593 spin_unlock_irqrestore(&xhci->lock, flags);
3595 /* Use GFP_NOIO, since this function can be called from
3596 * xhci_discover_or_reset_device(), which may be called as part of
3597 * mass storage driver error handling.
3599 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3600 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3603 udev->slot_id = xhci->slot_id;
3604 /* Is this a LS or FS device under a HS hub? */
3605 /* Hub or peripherial? */
3609 /* Disable slot, if we can do it without mem alloc */
3610 spin_lock_irqsave(&xhci->lock, flags);
3611 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3612 xhci_ring_cmd_db(xhci);
3613 spin_unlock_irqrestore(&xhci->lock, flags);
3618 * Issue an Address Device command (which will issue a SetAddress request to
3620 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3621 * we should only issue and wait on one address command at the same time.
3623 * We add one to the device address issued by the hardware because the USB core
3624 * uses address 1 for the root hubs (even though they're not really devices).
3626 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3628 unsigned long flags;
3630 struct xhci_virt_device *virt_dev;
3632 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3633 struct xhci_slot_ctx *slot_ctx;
3634 struct xhci_input_control_ctx *ctrl_ctx;
3636 union xhci_trb *cmd_trb;
3638 if (!udev->slot_id) {
3639 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3643 virt_dev = xhci->devs[udev->slot_id];
3645 if (WARN_ON(!virt_dev)) {
3647 * In plug/unplug torture test with an NEC controller,
3648 * a zero-dereference was observed once due to virt_dev = 0.
3649 * Print useful debug rather than crash if it is observed again!
3651 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3656 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3658 * If this is the first Set Address since device plug-in or
3659 * virt_device realloaction after a resume with an xHCI power loss,
3660 * then set up the slot context.
3662 if (!slot_ctx->dev_info)
3663 xhci_setup_addressable_virt_dev(xhci, udev);
3664 /* Otherwise, update the control endpoint ring enqueue pointer. */
3666 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3667 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3668 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3669 ctrl_ctx->drop_flags = 0;
3671 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3672 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3674 spin_lock_irqsave(&xhci->lock, flags);
3675 cmd_trb = xhci->cmd_ring->dequeue;
3676 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3679 spin_unlock_irqrestore(&xhci->lock, flags);
3680 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3683 xhci_ring_cmd_db(xhci);
3684 spin_unlock_irqrestore(&xhci->lock, flags);
3686 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3687 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3688 XHCI_CMD_DEFAULT_TIMEOUT);
3689 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3690 * the SetAddress() "recovery interval" required by USB and aborting the
3691 * command on a timeout.
3693 if (timeleft <= 0) {
3694 xhci_warn(xhci, "%s while waiting for address device command\n",
3695 timeleft == 0 ? "Timeout" : "Signal");
3696 /* cancel the address device command */
3697 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3703 switch (virt_dev->cmd_status) {
3704 case COMP_CTX_STATE:
3706 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3711 dev_warn(&udev->dev, "Device not responding to set address.\n");
3715 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3716 "device command.\n");
3720 xhci_dbg(xhci, "Successful Address Device command\n");
3723 xhci_err(xhci, "ERROR: unexpected command completion "
3724 "code 0x%x.\n", virt_dev->cmd_status);
3725 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3726 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3733 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3734 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3735 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3737 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3738 (unsigned long long)
3739 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3740 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3741 (unsigned long long)virt_dev->out_ctx->dma);
3742 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3743 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3744 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3745 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3747 * USB core uses address 1 for the roothubs, so we add one to the
3748 * address given back to us by the HC.
3750 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3751 /* Use kernel assigned address for devices; store xHC assigned
3752 * address locally. */
3753 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3755 /* Zero the input context control for later use */
3756 ctrl_ctx->add_flags = 0;
3757 ctrl_ctx->drop_flags = 0;
3759 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3764 #ifdef CONFIG_USB_SUSPEND
3766 /* BESL to HIRD Encoding array for USB2 LPM */
3767 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3768 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3770 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3771 static int xhci_calculate_hird_besl(int u2del, bool use_besl)
3776 for (hird = 0; hird < 16; hird++) {
3777 if (xhci_besl_encoding[hird] >= u2del)
3784 hird = (u2del - 51) / 75 + 1;
3793 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3794 struct usb_device *udev)
3796 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3797 struct dev_info *dev_info;
3798 __le32 __iomem **port_array;
3799 __le32 __iomem *addr, *pm_addr;
3801 unsigned int port_num;
3802 unsigned long flags;
3806 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3810 /* we only support lpm for non-hub device connected to root hub yet */
3811 if (!udev->parent || udev->parent->parent ||
3812 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3815 spin_lock_irqsave(&xhci->lock, flags);
3817 /* Look for devices in lpm_failed_devs list */
3818 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3819 le16_to_cpu(udev->descriptor.idProduct);
3820 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3821 if (dev_info->dev_id == dev_id) {
3827 port_array = xhci->usb2_ports;
3828 port_num = udev->portnum - 1;
3830 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3831 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3837 * Test USB 2.0 software LPM.
3838 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3839 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3840 * in the June 2011 errata release.
3842 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3844 * Set L1 Device Slot and HIRD/BESL.
3845 * Check device's USB 2.0 extension descriptor to determine whether
3846 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3848 pm_addr = port_array[port_num] + 1;
3849 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3850 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3851 hird = xhci_calculate_hird_besl(u2del, 1);
3853 hird = xhci_calculate_hird_besl(u2del, 0);
3855 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3856 xhci_writel(xhci, temp, pm_addr);
3858 /* Set port link state to U2(L1) */
3859 addr = port_array[port_num];
3860 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3863 spin_unlock_irqrestore(&xhci->lock, flags);
3865 spin_lock_irqsave(&xhci->lock, flags);
3867 /* Check L1 Status */
3868 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3869 if (ret != -ETIMEDOUT) {
3870 /* enter L1 successfully */
3871 temp = xhci_readl(xhci, addr);
3872 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3876 temp = xhci_readl(xhci, pm_addr);
3877 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3878 port_num, temp & PORT_L1S_MASK);
3882 /* Resume the port */
3883 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3885 spin_unlock_irqrestore(&xhci->lock, flags);
3887 spin_lock_irqsave(&xhci->lock, flags);
3890 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3892 /* Check PORTSC to make sure the device is in the right state */
3894 temp = xhci_readl(xhci, addr);
3895 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3896 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3897 (temp & PORT_PLS_MASK) != XDEV_U0) {
3898 xhci_dbg(xhci, "port L1 resume fail\n");
3904 /* Insert dev to lpm_failed_devs list */
3905 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3907 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3912 dev_info->dev_id = dev_id;
3913 INIT_LIST_HEAD(&dev_info->list);
3914 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3916 xhci_ring_device(xhci, udev->slot_id);
3920 spin_unlock_irqrestore(&xhci->lock, flags);
3924 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3925 struct usb_device *udev, int enable)
3927 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3928 __le32 __iomem **port_array;
3929 __le32 __iomem *pm_addr;
3931 unsigned int port_num;
3932 unsigned long flags;
3935 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3939 if (!udev->parent || udev->parent->parent ||
3940 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3943 if (udev->usb2_hw_lpm_capable != 1)
3946 spin_lock_irqsave(&xhci->lock, flags);
3948 port_array = xhci->usb2_ports;
3949 port_num = udev->portnum - 1;
3950 pm_addr = port_array[port_num] + 1;
3951 temp = xhci_readl(xhci, pm_addr);
3953 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3954 enable ? "enable" : "disable", port_num);
3956 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3957 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3958 hird = xhci_calculate_hird_besl(u2del, 1);
3960 hird = xhci_calculate_hird_besl(u2del, 0);
3963 temp &= ~PORT_HIRD_MASK;
3964 temp |= PORT_HIRD(hird) | PORT_RWE;
3965 xhci_writel(xhci, temp, pm_addr);
3966 temp = xhci_readl(xhci, pm_addr);
3968 xhci_writel(xhci, temp, pm_addr);
3970 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3971 xhci_writel(xhci, temp, pm_addr);
3974 spin_unlock_irqrestore(&xhci->lock, flags);
3978 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3980 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3983 ret = xhci_usb2_software_lpm_test(hcd, udev);
3985 xhci_dbg(xhci, "software LPM test succeed\n");
3986 if (xhci->hw_lpm_support == 1) {
3987 udev->usb2_hw_lpm_capable = 1;
3988 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3990 udev->usb2_hw_lpm_enabled = 1;
3999 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4000 struct usb_device *udev, int enable)
4005 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4010 #endif /* CONFIG_USB_SUSPEND */
4012 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4013 * internal data structures for the device.
4015 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4016 struct usb_tt *tt, gfp_t mem_flags)
4018 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4019 struct xhci_virt_device *vdev;
4020 struct xhci_command *config_cmd;
4021 struct xhci_input_control_ctx *ctrl_ctx;
4022 struct xhci_slot_ctx *slot_ctx;
4023 unsigned long flags;
4024 unsigned think_time;
4027 /* Ignore root hubs */
4031 vdev = xhci->devs[hdev->slot_id];
4033 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4036 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4038 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4042 spin_lock_irqsave(&xhci->lock, flags);
4043 if (hdev->speed == USB_SPEED_HIGH &&
4044 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4045 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4046 xhci_free_command(xhci, config_cmd);
4047 spin_unlock_irqrestore(&xhci->lock, flags);
4051 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4052 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4053 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4054 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4055 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4057 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4058 if (xhci->hci_version > 0x95) {
4059 xhci_dbg(xhci, "xHCI version %x needs hub "
4060 "TT think time and number of ports\n",
4061 (unsigned int) xhci->hci_version);
4062 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4063 /* Set TT think time - convert from ns to FS bit times.
4064 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4065 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4067 * xHCI 1.0: this field shall be 0 if the device is not a
4070 think_time = tt->think_time;
4071 if (think_time != 0)
4072 think_time = (think_time / 666) - 1;
4073 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4074 slot_ctx->tt_info |=
4075 cpu_to_le32(TT_THINK_TIME(think_time));
4077 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4078 "TT think time or number of ports\n",
4079 (unsigned int) xhci->hci_version);
4081 slot_ctx->dev_state = 0;
4082 spin_unlock_irqrestore(&xhci->lock, flags);
4084 xhci_dbg(xhci, "Set up %s for hub device.\n",
4085 (xhci->hci_version > 0x95) ?
4086 "configure endpoint" : "evaluate context");
4087 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4088 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4090 /* Issue and wait for the configure endpoint or
4091 * evaluate context command.
4093 if (xhci->hci_version > 0x95)
4094 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4097 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4100 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4101 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4103 xhci_free_command(xhci, config_cmd);
4107 int xhci_get_frame(struct usb_hcd *hcd)
4109 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4110 /* EHCI mods by the periodic size. Why? */
4111 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4114 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4116 struct xhci_hcd *xhci;
4117 struct device *dev = hcd->self.controller;
4121 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
4123 if (usb_hcd_is_primary_hcd(hcd)) {
4124 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4127 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4128 xhci->main_hcd = hcd;
4129 /* Mark the first roothub as being USB 2.0.
4130 * The xHCI driver will register the USB 3.0 roothub.
4132 hcd->speed = HCD_USB2;
4133 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4135 * USB 2.0 roothub under xHCI has an integrated TT,
4136 * (rate matching hub) as opposed to having an OHCI/UHCI
4137 * companion controller.
4141 /* xHCI private pointer was set in xhci_pci_probe for the second
4142 * registered roothub.
4144 xhci = hcd_to_xhci(hcd);
4145 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4146 if (HCC_64BIT_ADDR(temp)) {
4147 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4148 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4150 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4155 xhci->cap_regs = hcd->regs;
4156 xhci->op_regs = hcd->regs +
4157 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4158 xhci->run_regs = hcd->regs +
4159 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4160 /* Cache read-only capability registers */
4161 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4162 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4163 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4164 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4165 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4166 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4167 xhci_print_registers(xhci);
4169 get_quirks(dev, xhci);
4171 /* Make sure the HC is halted. */
4172 retval = xhci_halt(xhci);
4176 xhci_dbg(xhci, "Resetting HCD\n");
4177 /* Reset the internal HC memory state and registers. */
4178 retval = xhci_reset(xhci);
4181 xhci_dbg(xhci, "Reset complete\n");
4183 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4184 if (HCC_64BIT_ADDR(temp)) {
4185 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4186 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4188 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4191 xhci_dbg(xhci, "Calling HCD init\n");
4192 /* Initialize HCD and host controller data structures. */
4193 retval = xhci_init(hcd);
4196 xhci_dbg(xhci, "Called HCD init\n");
4203 MODULE_DESCRIPTION(DRIVER_DESC);
4204 MODULE_AUTHOR(DRIVER_AUTHOR);
4205 MODULE_LICENSE("GPL");
4207 static int __init xhci_hcd_init(void)
4211 retval = xhci_register_pci();
4213 printk(KERN_DEBUG "Problem registering PCI driver.");
4217 * Check the compiler generated sizes of structures that must be laid
4218 * out in specific ways for hardware access.
4220 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4221 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4222 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4223 /* xhci_device_control has eight fields, and also
4224 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4226 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4227 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4228 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4229 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4230 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4231 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4232 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4233 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4236 module_init(xhci_hcd_init);
4238 static void __exit xhci_hcd_cleanup(void)
4240 xhci_unregister_pci();
4242 module_exit(xhci_hcd_cleanup);