547a03949deec5dd9c18f03a0b5f54562a470379
[pandora-kernel.git] / drivers / usb / host / xhci.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
30
31 #include "xhci.h"
32
33 #define DRIVER_AUTHOR "Sarah Sharp"
34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35
36 #define PORT_WAKE_BITS  (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
37
38 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
39 static int link_quirk;
40 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
41 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
42
43 /* TODO: copied from ehci-hcd.c - can this be refactored? */
44 /*
45  * handshake - spin reading hc until handshake completes or fails
46  * @ptr: address of hc register to be read
47  * @mask: bits to look at in result of read
48  * @done: value of those bits when handshake succeeds
49  * @usec: timeout in microseconds
50  *
51  * Returns negative errno, or zero on success
52  *
53  * Success happens when the "mask" bits have the specified value (hardware
54  * handshake done).  There are two failure modes:  "usec" have passed (major
55  * hardware flakeout), or the register reads as all-ones (hardware removed).
56  */
57 int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
58                       u32 mask, u32 done, int usec)
59 {
60         u32     result;
61
62         do {
63                 result = xhci_readl(xhci, ptr);
64                 if (result == ~(u32)0)          /* card removed */
65                         return -ENODEV;
66                 result &= mask;
67                 if (result == done)
68                         return 0;
69                 udelay(1);
70                 usec--;
71         } while (usec > 0);
72         return -ETIMEDOUT;
73 }
74
75 /*
76  * Disable interrupts and begin the xHCI halting process.
77  */
78 void xhci_quiesce(struct xhci_hcd *xhci)
79 {
80         u32 halted;
81         u32 cmd;
82         u32 mask;
83
84         mask = ~(XHCI_IRQS);
85         halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
86         if (!halted)
87                 mask &= ~CMD_RUN;
88
89         cmd = xhci_readl(xhci, &xhci->op_regs->command);
90         cmd &= mask;
91         xhci_writel(xhci, cmd, &xhci->op_regs->command);
92 }
93
94 /*
95  * Force HC into halt state.
96  *
97  * Disable any IRQs and clear the run/stop bit.
98  * HC will complete any current and actively pipelined transactions, and
99  * should halt within 16 ms of the run/stop bit being cleared.
100  * Read HC Halted bit in the status register to see when the HC is finished.
101  */
102 int xhci_halt(struct xhci_hcd *xhci)
103 {
104         int ret;
105         xhci_dbg(xhci, "// Halt the HC\n");
106         xhci_quiesce(xhci);
107
108         ret = handshake(xhci, &xhci->op_regs->status,
109                         STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
110         if (!ret) {
111                 xhci->xhc_state |= XHCI_STATE_HALTED;
112                 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
113         } else
114                 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
115                                 XHCI_MAX_HALT_USEC);
116         return ret;
117 }
118
119 /*
120  * Set the run bit and wait for the host to be running.
121  */
122 static int xhci_start(struct xhci_hcd *xhci)
123 {
124         u32 temp;
125         int ret;
126
127         temp = xhci_readl(xhci, &xhci->op_regs->command);
128         temp |= (CMD_RUN);
129         xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
130                         temp);
131         xhci_writel(xhci, temp, &xhci->op_regs->command);
132
133         /*
134          * Wait for the HCHalted Status bit to be 0 to indicate the host is
135          * running.
136          */
137         ret = handshake(xhci, &xhci->op_regs->status,
138                         STS_HALT, 0, XHCI_MAX_HALT_USEC);
139         if (ret == -ETIMEDOUT)
140                 xhci_err(xhci, "Host took too long to start, "
141                                 "waited %u microseconds.\n",
142                                 XHCI_MAX_HALT_USEC);
143         if (!ret)
144                 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
145
146         return ret;
147 }
148
149 /*
150  * Reset a halted HC.
151  *
152  * This resets pipelines, timers, counters, state machines, etc.
153  * Transactions will be terminated immediately, and operational registers
154  * will be set to their defaults.
155  */
156 int xhci_reset(struct xhci_hcd *xhci)
157 {
158         u32 command;
159         u32 state;
160         int ret;
161
162         state = xhci_readl(xhci, &xhci->op_regs->status);
163         if ((state & STS_HALT) == 0) {
164                 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
165                 return 0;
166         }
167
168         xhci_dbg(xhci, "// Reset the HC\n");
169         command = xhci_readl(xhci, &xhci->op_regs->command);
170         command |= CMD_RESET;
171         xhci_writel(xhci, command, &xhci->op_regs->command);
172
173         /* Existing Intel xHCI controllers require a delay of 1 mS,
174          * after setting the CMD_RESET bit, and before accessing any
175          * HC registers. This allows the HC to complete the
176          * reset operation and be ready for HC register access.
177          * Without this delay, the subsequent HC register access,
178          * may result in a system hang very rarely.
179          */
180         if (xhci->quirks & XHCI_INTEL_HOST)
181                 udelay(1000);
182
183         ret = handshake(xhci, &xhci->op_regs->command,
184                         CMD_RESET, 0, 10 * 1000 * 1000);
185         if (ret)
186                 return ret;
187
188         xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
189         /*
190          * xHCI cannot write to any doorbells or operational registers other
191          * than status until the "Controller Not Ready" flag is cleared.
192          */
193         return handshake(xhci, &xhci->op_regs->status,
194                          STS_CNR, 0, 10 * 1000 * 1000);
195 }
196
197 #ifdef CONFIG_PCI
198 static int xhci_free_msi(struct xhci_hcd *xhci)
199 {
200         int i;
201
202         if (!xhci->msix_entries)
203                 return -EINVAL;
204
205         for (i = 0; i < xhci->msix_count; i++)
206                 if (xhci->msix_entries[i].vector)
207                         free_irq(xhci->msix_entries[i].vector,
208                                         xhci_to_hcd(xhci));
209         return 0;
210 }
211
212 /*
213  * Set up MSI
214  */
215 static int xhci_setup_msi(struct xhci_hcd *xhci)
216 {
217         int ret;
218         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
219
220         ret = pci_enable_msi(pdev);
221         if (ret) {
222                 xhci_dbg(xhci, "failed to allocate MSI entry\n");
223                 return ret;
224         }
225
226         ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
227                                 0, "xhci_hcd", xhci_to_hcd(xhci));
228         if (ret) {
229                 xhci_dbg(xhci, "disable MSI interrupt\n");
230                 pci_disable_msi(pdev);
231         }
232
233         return ret;
234 }
235
236 /*
237  * Free IRQs
238  * free all IRQs request
239  */
240 static void xhci_free_irq(struct xhci_hcd *xhci)
241 {
242         struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
243         int ret;
244
245         /* return if using legacy interrupt */
246         if (xhci_to_hcd(xhci)->irq >= 0)
247                 return;
248
249         ret = xhci_free_msi(xhci);
250         if (!ret)
251                 return;
252         if (pdev->irq >= 0)
253                 free_irq(pdev->irq, xhci_to_hcd(xhci));
254
255         return;
256 }
257
258 /*
259  * Set up MSI-X
260  */
261 static int xhci_setup_msix(struct xhci_hcd *xhci)
262 {
263         int i, ret = 0;
264         struct usb_hcd *hcd = xhci_to_hcd(xhci);
265         struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
266
267         /*
268          * calculate number of msi-x vectors supported.
269          * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
270          *   with max number of interrupters based on the xhci HCSPARAMS1.
271          * - num_online_cpus: maximum msi-x vectors per CPUs core.
272          *   Add additional 1 vector to ensure always available interrupt.
273          */
274         xhci->msix_count = min(num_online_cpus() + 1,
275                                 HCS_MAX_INTRS(xhci->hcs_params1));
276
277         xhci->msix_entries =
278                 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
279                                 GFP_KERNEL);
280         if (!xhci->msix_entries) {
281                 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
282                 return -ENOMEM;
283         }
284
285         for (i = 0; i < xhci->msix_count; i++) {
286                 xhci->msix_entries[i].entry = i;
287                 xhci->msix_entries[i].vector = 0;
288         }
289
290         ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
291         if (ret) {
292                 xhci_dbg(xhci, "Failed to enable MSI-X\n");
293                 goto free_entries;
294         }
295
296         for (i = 0; i < xhci->msix_count; i++) {
297                 ret = request_irq(xhci->msix_entries[i].vector,
298                                 (irq_handler_t)xhci_msi_irq,
299                                 0, "xhci_hcd", xhci_to_hcd(xhci));
300                 if (ret)
301                         goto disable_msix;
302         }
303
304         hcd->msix_enabled = 1;
305         return ret;
306
307 disable_msix:
308         xhci_dbg(xhci, "disable MSI-X interrupt\n");
309         xhci_free_irq(xhci);
310         pci_disable_msix(pdev);
311 free_entries:
312         kfree(xhci->msix_entries);
313         xhci->msix_entries = NULL;
314         return ret;
315 }
316
317 /* Free any IRQs and disable MSI-X */
318 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
319 {
320         struct usb_hcd *hcd = xhci_to_hcd(xhci);
321         struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
322
323         xhci_free_irq(xhci);
324
325         if (xhci->msix_entries) {
326                 pci_disable_msix(pdev);
327                 kfree(xhci->msix_entries);
328                 xhci->msix_entries = NULL;
329         } else {
330                 pci_disable_msi(pdev);
331         }
332
333         hcd->msix_enabled = 0;
334         return;
335 }
336
337 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
338 {
339         int i;
340
341         if (xhci->msix_entries) {
342                 for (i = 0; i < xhci->msix_count; i++)
343                         synchronize_irq(xhci->msix_entries[i].vector);
344         }
345 }
346
347 static int xhci_try_enable_msi(struct usb_hcd *hcd)
348 {
349         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
350         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
351         int ret;
352
353         /*
354          * Some Fresco Logic host controllers advertise MSI, but fail to
355          * generate interrupts.  Don't even try to enable MSI.
356          */
357         if (xhci->quirks & XHCI_BROKEN_MSI)
358                 goto legacy_irq;
359
360         /* unregister the legacy interrupt */
361         if (hcd->irq)
362                 free_irq(hcd->irq, hcd);
363         hcd->irq = -1;
364
365         ret = xhci_setup_msix(xhci);
366         if (ret)
367                 /* fall back to msi*/
368                 ret = xhci_setup_msi(xhci);
369
370         if (!ret)
371                 /* hcd->irq is -1, we have MSI */
372                 return 0;
373
374         if (!pdev->irq) {
375                 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
376                 return -EINVAL;
377         }
378
379  legacy_irq:
380         /* fall back to legacy interrupt*/
381         ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
382                         hcd->irq_descr, hcd);
383         if (ret) {
384                 xhci_err(xhci, "request interrupt %d failed\n",
385                                 pdev->irq);
386                 return ret;
387         }
388         hcd->irq = pdev->irq;
389         return 0;
390 }
391
392 #else
393
394 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
395 {
396         return 0;
397 }
398
399 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
400 {
401 }
402
403 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
404 {
405 }
406
407 #endif
408
409 static void compliance_mode_recovery(unsigned long arg)
410 {
411         struct xhci_hcd *xhci;
412         struct usb_hcd *hcd;
413         u32 temp;
414         int i;
415
416         xhci = (struct xhci_hcd *)arg;
417
418         for (i = 0; i < xhci->num_usb3_ports; i++) {
419                 temp = xhci_readl(xhci, xhci->usb3_ports[i]);
420                 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
421                         /*
422                          * Compliance Mode Detected. Letting USB Core
423                          * handle the Warm Reset
424                          */
425                         xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
426                                         i + 1);
427                         xhci_dbg(xhci, "Attempting Recovery routine!\n");
428                         hcd = xhci->shared_hcd;
429
430                         if (hcd->state == HC_STATE_SUSPENDED)
431                                 usb_hcd_resume_root_hub(hcd);
432
433                         usb_hcd_poll_rh_status(hcd);
434                 }
435         }
436
437         if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
438                 mod_timer(&xhci->comp_mode_recovery_timer,
439                         jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
440 }
441
442 /*
443  * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
444  * that causes ports behind that hardware to enter compliance mode sometimes.
445  * The quirk creates a timer that polls every 2 seconds the link state of
446  * each host controller's port and recovers it by issuing a Warm reset
447  * if Compliance mode is detected, otherwise the port will become "dead" (no
448  * device connections or disconnections will be detected anymore). Becasue no
449  * status event is generated when entering compliance mode (per xhci spec),
450  * this quirk is needed on systems that have the failing hardware installed.
451  */
452 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
453 {
454         xhci->port_status_u0 = 0;
455         init_timer(&xhci->comp_mode_recovery_timer);
456
457         xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
458         xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
459         xhci->comp_mode_recovery_timer.expires = jiffies +
460                         msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
461
462         set_timer_slack(&xhci->comp_mode_recovery_timer,
463                         msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
464         add_timer(&xhci->comp_mode_recovery_timer);
465         xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
466 }
467
468 /*
469  * This function identifies the systems that have installed the SN65LVPE502CP
470  * USB3.0 re-driver and that need the Compliance Mode Quirk.
471  * Systems:
472  * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
473  */
474 static bool compliance_mode_recovery_timer_quirk_check(void)
475 {
476         const char *dmi_product_name, *dmi_sys_vendor;
477
478         dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
479         dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
480         if (!dmi_product_name || !dmi_sys_vendor)
481                 return false;
482
483         if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
484                 return false;
485
486         if (strstr(dmi_product_name, "Z420") ||
487                         strstr(dmi_product_name, "Z620") ||
488                         strstr(dmi_product_name, "Z820") ||
489                         strstr(dmi_product_name, "Z1 Workstation"))
490                 return true;
491
492         return false;
493 }
494
495 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
496 {
497         return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
498 }
499
500
501 /*
502  * Initialize memory for HCD and xHC (one-time init).
503  *
504  * Program the PAGESIZE register, initialize the device context array, create
505  * device contexts (?), set up a command ring segment (or two?), create event
506  * ring (one for now).
507  */
508 int xhci_init(struct usb_hcd *hcd)
509 {
510         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
511         int retval = 0;
512
513         xhci_dbg(xhci, "xhci_init\n");
514         spin_lock_init(&xhci->lock);
515         if (xhci->hci_version == 0x95 && link_quirk) {
516                 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
517                 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
518         } else {
519                 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
520         }
521         retval = xhci_mem_init(xhci, GFP_KERNEL);
522         xhci_dbg(xhci, "Finished xhci_init\n");
523
524         /* Initializing Compliance Mode Recovery Data If Needed */
525         if (compliance_mode_recovery_timer_quirk_check()) {
526                 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
527                 compliance_mode_recovery_timer_init(xhci);
528         }
529
530         return retval;
531 }
532
533 /*-------------------------------------------------------------------------*/
534
535
536 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
537 static void xhci_event_ring_work(unsigned long arg)
538 {
539         unsigned long flags;
540         int temp;
541         u64 temp_64;
542         struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
543         int i, j;
544
545         xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
546
547         spin_lock_irqsave(&xhci->lock, flags);
548         temp = xhci_readl(xhci, &xhci->op_regs->status);
549         xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
550         if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
551                         (xhci->xhc_state & XHCI_STATE_HALTED)) {
552                 xhci_dbg(xhci, "HW died, polling stopped.\n");
553                 spin_unlock_irqrestore(&xhci->lock, flags);
554                 return;
555         }
556
557         temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
558         xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
559         xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
560         xhci->error_bitmask = 0;
561         xhci_dbg(xhci, "Event ring:\n");
562         xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
563         xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
564         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
565         temp_64 &= ~ERST_PTR_MASK;
566         xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
567         xhci_dbg(xhci, "Command ring:\n");
568         xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
569         xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
570         xhci_dbg_cmd_ptrs(xhci);
571         for (i = 0; i < MAX_HC_SLOTS; ++i) {
572                 if (!xhci->devs[i])
573                         continue;
574                 for (j = 0; j < 31; ++j) {
575                         xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
576                 }
577         }
578         spin_unlock_irqrestore(&xhci->lock, flags);
579
580         if (!xhci->zombie)
581                 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
582         else
583                 xhci_dbg(xhci, "Quit polling the event ring.\n");
584 }
585 #endif
586
587 static int xhci_run_finished(struct xhci_hcd *xhci)
588 {
589         if (xhci_start(xhci)) {
590                 xhci_halt(xhci);
591                 return -ENODEV;
592         }
593         xhci->shared_hcd->state = HC_STATE_RUNNING;
594         xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
595
596         if (xhci->quirks & XHCI_NEC_HOST)
597                 xhci_ring_cmd_db(xhci);
598
599         xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
600         return 0;
601 }
602
603 /*
604  * Start the HC after it was halted.
605  *
606  * This function is called by the USB core when the HC driver is added.
607  * Its opposite is xhci_stop().
608  *
609  * xhci_init() must be called once before this function can be called.
610  * Reset the HC, enable device slot contexts, program DCBAAP, and
611  * set command ring pointer and event ring pointer.
612  *
613  * Setup MSI-X vectors and enable interrupts.
614  */
615 int xhci_run(struct usb_hcd *hcd)
616 {
617         u32 temp;
618         u64 temp_64;
619         int ret;
620         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
621
622         /* Start the xHCI host controller running only after the USB 2.0 roothub
623          * is setup.
624          */
625
626         hcd->uses_new_polling = 1;
627         if (!usb_hcd_is_primary_hcd(hcd))
628                 return xhci_run_finished(xhci);
629
630         xhci_dbg(xhci, "xhci_run\n");
631
632         ret = xhci_try_enable_msi(hcd);
633         if (ret)
634                 return ret;
635
636 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
637         init_timer(&xhci->event_ring_timer);
638         xhci->event_ring_timer.data = (unsigned long) xhci;
639         xhci->event_ring_timer.function = xhci_event_ring_work;
640         /* Poll the event ring */
641         xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
642         xhci->zombie = 0;
643         xhci_dbg(xhci, "Setting event ring polling timer\n");
644         add_timer(&xhci->event_ring_timer);
645 #endif
646
647         xhci_dbg(xhci, "Command ring memory map follows:\n");
648         xhci_debug_ring(xhci, xhci->cmd_ring);
649         xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
650         xhci_dbg_cmd_ptrs(xhci);
651
652         xhci_dbg(xhci, "ERST memory map follows:\n");
653         xhci_dbg_erst(xhci, &xhci->erst);
654         xhci_dbg(xhci, "Event ring:\n");
655         xhci_debug_ring(xhci, xhci->event_ring);
656         xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
657         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
658         temp_64 &= ~ERST_PTR_MASK;
659         xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
660
661         xhci_dbg(xhci, "// Set the interrupt modulation register\n");
662         temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
663         temp &= ~ER_IRQ_INTERVAL_MASK;
664         temp |= (u32) 160;
665         xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
666
667         /* Set the HCD state before we enable the irqs */
668         temp = xhci_readl(xhci, &xhci->op_regs->command);
669         temp |= (CMD_EIE);
670         xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
671                         temp);
672         xhci_writel(xhci, temp, &xhci->op_regs->command);
673
674         temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
675         xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
676                         xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
677         xhci_writel(xhci, ER_IRQ_ENABLE(temp),
678                         &xhci->ir_set->irq_pending);
679         xhci_print_ir_set(xhci, 0);
680
681         if (xhci->quirks & XHCI_NEC_HOST)
682                 xhci_queue_vendor_command(xhci, 0, 0, 0,
683                                 TRB_TYPE(TRB_NEC_GET_FW));
684
685         xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
686         return 0;
687 }
688
689 static void xhci_only_stop_hcd(struct usb_hcd *hcd)
690 {
691         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
692
693         spin_lock_irq(&xhci->lock);
694         xhci_halt(xhci);
695
696         /* The shared_hcd is going to be deallocated shortly (the USB core only
697          * calls this function when allocation fails in usb_add_hcd(), or
698          * usb_remove_hcd() is called).  So we need to unset xHCI's pointer.
699          */
700         xhci->shared_hcd = NULL;
701         spin_unlock_irq(&xhci->lock);
702 }
703
704 /*
705  * Stop xHCI driver.
706  *
707  * This function is called by the USB core when the HC driver is removed.
708  * Its opposite is xhci_run().
709  *
710  * Disable device contexts, disable IRQs, and quiesce the HC.
711  * Reset the HC, finish any completed transactions, and cleanup memory.
712  */
713 void xhci_stop(struct usb_hcd *hcd)
714 {
715         u32 temp;
716         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
717
718         if (!usb_hcd_is_primary_hcd(hcd)) {
719                 xhci_only_stop_hcd(xhci->shared_hcd);
720                 return;
721         }
722
723         spin_lock_irq(&xhci->lock);
724         /* Make sure the xHC is halted for a USB3 roothub
725          * (xhci_stop() could be called as part of failed init).
726          */
727         xhci_halt(xhci);
728         xhci_reset(xhci);
729         spin_unlock_irq(&xhci->lock);
730
731         xhci_cleanup_msix(xhci);
732
733 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
734         /* Tell the event ring poll function not to reschedule */
735         xhci->zombie = 1;
736         del_timer_sync(&xhci->event_ring_timer);
737 #endif
738
739         /* Deleting Compliance Mode Recovery Timer */
740         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
741                         (!(xhci_all_ports_seen_u0(xhci))))
742                 del_timer_sync(&xhci->comp_mode_recovery_timer);
743
744         if (xhci->quirks & XHCI_AMD_PLL_FIX)
745                 usb_amd_dev_put();
746
747         xhci_dbg(xhci, "// Disabling event ring interrupts\n");
748         temp = xhci_readl(xhci, &xhci->op_regs->status);
749         xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
750         temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
751         xhci_writel(xhci, ER_IRQ_DISABLE(temp),
752                         &xhci->ir_set->irq_pending);
753         xhci_print_ir_set(xhci, 0);
754
755         xhci_dbg(xhci, "cleaning up memory\n");
756         xhci_mem_cleanup(xhci);
757         xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
758                     xhci_readl(xhci, &xhci->op_regs->status));
759 }
760
761 /*
762  * Shutdown HC (not bus-specific)
763  *
764  * This is called when the machine is rebooting or halting.  We assume that the
765  * machine will be powered off, and the HC's internal state will be reset.
766  * Don't bother to free memory.
767  *
768  * This will only ever be called with the main usb_hcd (the USB3 roothub).
769  */
770 void xhci_shutdown(struct usb_hcd *hcd)
771 {
772         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
773
774         if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
775                 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
776
777         spin_lock_irq(&xhci->lock);
778         xhci_halt(xhci);
779         /* Workaround for spurious wakeups at shutdown with HSW */
780         if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
781                 xhci_reset(xhci);
782         spin_unlock_irq(&xhci->lock);
783
784         xhci_cleanup_msix(xhci);
785
786         xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
787                     xhci_readl(xhci, &xhci->op_regs->status));
788
789         /* Yet another workaround for spurious wakeups at shutdown with HSW */
790         if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
791                 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
792 }
793
794 #ifdef CONFIG_PM
795 static void xhci_save_registers(struct xhci_hcd *xhci)
796 {
797         xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
798         xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
799         xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
800         xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
801         xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
802         xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
803         xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
804         xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
805         xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
806 }
807
808 static void xhci_restore_registers(struct xhci_hcd *xhci)
809 {
810         xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
811         xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
812         xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
813         xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
814         xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
815         xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
816         xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
817         xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
818         xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
819 }
820
821 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
822 {
823         u64     val_64;
824
825         /* step 2: initialize command ring buffer */
826         val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
827         val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
828                 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
829                                       xhci->cmd_ring->dequeue) &
830                  (u64) ~CMD_RING_RSVD_BITS) |
831                 xhci->cmd_ring->cycle_state;
832         xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
833                         (long unsigned long) val_64);
834         xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
835 }
836
837 /*
838  * The whole command ring must be cleared to zero when we suspend the host.
839  *
840  * The host doesn't save the command ring pointer in the suspend well, so we
841  * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
842  * aligned, because of the reserved bits in the command ring dequeue pointer
843  * register.  Therefore, we can't just set the dequeue pointer back in the
844  * middle of the ring (TRBs are 16-byte aligned).
845  */
846 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
847 {
848         struct xhci_ring *ring;
849         struct xhci_segment *seg;
850
851         ring = xhci->cmd_ring;
852         seg = ring->deq_seg;
853         do {
854                 memset(seg->trbs, 0,
855                         sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
856                 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
857                         cpu_to_le32(~TRB_CYCLE);
858                 seg = seg->next;
859         } while (seg != ring->deq_seg);
860
861         /* Reset the software enqueue and dequeue pointers */
862         ring->deq_seg = ring->first_seg;
863         ring->dequeue = ring->first_seg->trbs;
864         ring->enq_seg = ring->deq_seg;
865         ring->enqueue = ring->dequeue;
866
867         /*
868          * Ring is now zeroed, so the HW should look for change of ownership
869          * when the cycle bit is set to 1.
870          */
871         ring->cycle_state = 1;
872
873         /*
874          * Reset the hardware dequeue pointer.
875          * Yes, this will need to be re-written after resume, but we're paranoid
876          * and want to make sure the hardware doesn't access bogus memory
877          * because, say, the BIOS or an SMI started the host without changing
878          * the command ring pointers.
879          */
880         xhci_set_cmd_ring_deq(xhci);
881 }
882
883 static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
884 {
885         int port_index;
886         __le32 __iomem **port_array;
887         unsigned long flags;
888         u32 t1, t2;
889
890         spin_lock_irqsave(&xhci->lock, flags);
891
892         /* disble usb3 ports Wake bits*/
893         port_index = xhci->num_usb3_ports;
894         port_array = xhci->usb3_ports;
895         while (port_index--) {
896                 t1 = readl(port_array[port_index]);
897                 t1 = xhci_port_state_to_neutral(t1);
898                 t2 = t1 & ~PORT_WAKE_BITS;
899                 if (t1 != t2)
900                         writel(t2, port_array[port_index]);
901         }
902
903         /* disble usb2 ports Wake bits*/
904         port_index = xhci->num_usb2_ports;
905         port_array = xhci->usb2_ports;
906         while (port_index--) {
907                 t1 = readl(port_array[port_index]);
908                 t1 = xhci_port_state_to_neutral(t1);
909                 t2 = t1 & ~PORT_WAKE_BITS;
910                 if (t1 != t2)
911                         writel(t2, port_array[port_index]);
912         }
913
914         spin_unlock_irqrestore(&xhci->lock, flags);
915 }
916
917 /*
918  * Stop HC (not bus-specific)
919  *
920  * This is called when the machine transition into S3/S4 mode.
921  *
922  */
923 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
924 {
925         int                     rc = 0;
926         unsigned int            delay = XHCI_MAX_HALT_USEC;
927         struct usb_hcd          *hcd = xhci_to_hcd(xhci);
928         u32                     command;
929
930         /* Clear root port wake on bits if wakeup not allowed. */
931         if (!do_wakeup)
932                 xhci_disable_port_wake_on_bits(xhci);
933
934         /* Don't poll the roothubs on bus suspend. */
935         xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
936         clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
937         del_timer_sync(&hcd->rh_timer);
938
939         spin_lock_irq(&xhci->lock);
940         clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
941         clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
942         /* step 1: stop endpoint */
943         /* skipped assuming that port suspend has done */
944
945         /* step 2: clear Run/Stop bit */
946         command = xhci_readl(xhci, &xhci->op_regs->command);
947         command &= ~CMD_RUN;
948         xhci_writel(xhci, command, &xhci->op_regs->command);
949
950         /* Some chips from Fresco Logic need an extraordinary delay */
951         delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
952
953         if (handshake(xhci, &xhci->op_regs->status,
954                       STS_HALT, STS_HALT, delay)) {
955                 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
956                 spin_unlock_irq(&xhci->lock);
957                 return -ETIMEDOUT;
958         }
959         xhci_clear_command_ring(xhci);
960
961         /* step 3: save registers */
962         xhci_save_registers(xhci);
963
964         /* step 4: set CSS flag */
965         command = xhci_readl(xhci, &xhci->op_regs->command);
966         command |= CMD_CSS;
967         xhci_writel(xhci, command, &xhci->op_regs->command);
968         if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
969                 xhci_warn(xhci, "WARN: xHC save state timeout\n");
970                 spin_unlock_irq(&xhci->lock);
971                 return -ETIMEDOUT;
972         }
973         spin_unlock_irq(&xhci->lock);
974
975         /*
976          * Deleting Compliance Mode Recovery Timer because the xHCI Host
977          * is about to be suspended.
978          */
979         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
980                         (!(xhci_all_ports_seen_u0(xhci)))) {
981                 del_timer_sync(&xhci->comp_mode_recovery_timer);
982                 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
983         }
984
985         /* step 5: remove core well power */
986         /* synchronize irq when using MSI-X */
987         xhci_msix_sync_irqs(xhci);
988
989         return rc;
990 }
991
992 /*
993  * start xHC (not bus-specific)
994  *
995  * This is called when the machine transition from S3/S4 mode.
996  *
997  */
998 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
999 {
1000         u32                     command, temp = 0, status;
1001         struct usb_hcd          *hcd = xhci_to_hcd(xhci);
1002         struct usb_hcd          *secondary_hcd;
1003         int                     retval = 0;
1004         bool                    comp_timer_running = false;
1005
1006         /* Wait a bit if either of the roothubs need to settle from the
1007          * transition into bus suspend.
1008          */
1009         if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1010                         time_before(jiffies,
1011                                 xhci->bus_state[1].next_statechange))
1012                 msleep(100);
1013
1014         set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1015         set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1016
1017         spin_lock_irq(&xhci->lock);
1018         if (xhci->quirks & XHCI_RESET_ON_RESUME)
1019                 hibernated = true;
1020
1021         if (!hibernated) {
1022                 /* step 1: restore register */
1023                 xhci_restore_registers(xhci);
1024                 /* step 2: initialize command ring buffer */
1025                 xhci_set_cmd_ring_deq(xhci);
1026                 /* step 3: restore state and start state*/
1027                 /* step 3: set CRS flag */
1028                 command = xhci_readl(xhci, &xhci->op_regs->command);
1029                 command |= CMD_CRS;
1030                 xhci_writel(xhci, command, &xhci->op_regs->command);
1031                 if (handshake(xhci, &xhci->op_regs->status,
1032                               STS_RESTORE, 0, 10 * 1000)) {
1033                         xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1034                         spin_unlock_irq(&xhci->lock);
1035                         return -ETIMEDOUT;
1036                 }
1037                 temp = xhci_readl(xhci, &xhci->op_regs->status);
1038         }
1039
1040         /* If restore operation fails, re-initialize the HC during resume */
1041         if ((temp & STS_SRE) || hibernated) {
1042
1043                 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1044                                 !(xhci_all_ports_seen_u0(xhci))) {
1045                         del_timer_sync(&xhci->comp_mode_recovery_timer);
1046                         xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
1047                 }
1048
1049                 /* Let the USB core know _both_ roothubs lost power. */
1050                 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1051                 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1052
1053                 xhci_dbg(xhci, "Stop HCD\n");
1054                 xhci_halt(xhci);
1055                 xhci_reset(xhci);
1056                 spin_unlock_irq(&xhci->lock);
1057                 xhci_cleanup_msix(xhci);
1058
1059 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1060                 /* Tell the event ring poll function not to reschedule */
1061                 xhci->zombie = 1;
1062                 del_timer_sync(&xhci->event_ring_timer);
1063 #endif
1064
1065                 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1066                 temp = xhci_readl(xhci, &xhci->op_regs->status);
1067                 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
1068                 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
1069                 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
1070                                 &xhci->ir_set->irq_pending);
1071                 xhci_print_ir_set(xhci, 0);
1072
1073                 xhci_dbg(xhci, "cleaning up memory\n");
1074                 xhci_mem_cleanup(xhci);
1075                 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1076                             xhci_readl(xhci, &xhci->op_regs->status));
1077
1078                 /* USB core calls the PCI reinit and start functions twice:
1079                  * first with the primary HCD, and then with the secondary HCD.
1080                  * If we don't do the same, the host will never be started.
1081                  */
1082                 if (!usb_hcd_is_primary_hcd(hcd))
1083                         secondary_hcd = hcd;
1084                 else
1085                         secondary_hcd = xhci->shared_hcd;
1086
1087                 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1088                 retval = xhci_init(hcd->primary_hcd);
1089                 if (retval)
1090                         return retval;
1091                 comp_timer_running = true;
1092
1093                 xhci_dbg(xhci, "Start the primary HCD\n");
1094                 retval = xhci_run(hcd->primary_hcd);
1095                 if (!retval) {
1096                         xhci_dbg(xhci, "Start the secondary HCD\n");
1097                         retval = xhci_run(secondary_hcd);
1098                 }
1099                 hcd->state = HC_STATE_SUSPENDED;
1100                 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1101                 goto done;
1102         }
1103
1104         /* step 4: set Run/Stop bit */
1105         command = xhci_readl(xhci, &xhci->op_regs->command);
1106         command |= CMD_RUN;
1107         xhci_writel(xhci, command, &xhci->op_regs->command);
1108         handshake(xhci, &xhci->op_regs->status, STS_HALT,
1109                   0, 250 * 1000);
1110
1111         /* step 5: walk topology and initialize portsc,
1112          * portpmsc and portli
1113          */
1114         /* this is done in bus_resume */
1115
1116         /* step 6: restart each of the previously
1117          * Running endpoints by ringing their doorbells
1118          */
1119
1120         spin_unlock_irq(&xhci->lock);
1121
1122  done:
1123         if (retval == 0) {
1124                 /* Resume root hubs only when have pending events. */
1125                 status = readl(&xhci->op_regs->status);
1126                 if (status & STS_EINT) {
1127                         usb_hcd_resume_root_hub(hcd);
1128                         usb_hcd_resume_root_hub(xhci->shared_hcd);
1129                 }
1130         }
1131
1132         /*
1133          * If system is subject to the Quirk, Compliance Mode Timer needs to
1134          * be re-initialized Always after a system resume. Ports are subject
1135          * to suffer the Compliance Mode issue again. It doesn't matter if
1136          * ports have entered previously to U0 before system's suspension.
1137          */
1138         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1139                 compliance_mode_recovery_timer_init(xhci);
1140
1141         /* Re-enable port polling. */
1142         xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1143         set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1144         usb_hcd_poll_rh_status(hcd);
1145
1146         return retval;
1147 }
1148 #endif  /* CONFIG_PM */
1149
1150 /*-------------------------------------------------------------------------*/
1151
1152 /**
1153  * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1154  * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
1155  * value to right shift 1 for the bitmask.
1156  *
1157  * Index  = (epnum * 2) + direction - 1,
1158  * where direction = 0 for OUT, 1 for IN.
1159  * For control endpoints, the IN index is used (OUT index is unused), so
1160  * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1161  */
1162 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1163 {
1164         unsigned int index;
1165         if (usb_endpoint_xfer_control(desc))
1166                 index = (unsigned int) (usb_endpoint_num(desc)*2);
1167         else
1168                 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1169                         (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1170         return index;
1171 }
1172
1173 /* Find the flag for this endpoint (for use in the control context).  Use the
1174  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1175  * bit 1, etc.
1176  */
1177 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1178 {
1179         return 1 << (xhci_get_endpoint_index(desc) + 1);
1180 }
1181
1182 /* Find the flag for this endpoint (for use in the control context).  Use the
1183  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1184  * bit 1, etc.
1185  */
1186 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1187 {
1188         return 1 << (ep_index + 1);
1189 }
1190
1191 /* Compute the last valid endpoint context index.  Basically, this is the
1192  * endpoint index plus one.  For slot contexts with more than valid endpoint,
1193  * we find the most significant bit set in the added contexts flags.
1194  * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1195  * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1196  */
1197 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1198 {
1199         return fls(added_ctxs) - 1;
1200 }
1201
1202 /* Returns 1 if the arguments are OK;
1203  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1204  */
1205 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1206                 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1207                 const char *func) {
1208         struct xhci_hcd *xhci;
1209         struct xhci_virt_device *virt_dev;
1210
1211         if (!hcd || (check_ep && !ep) || !udev) {
1212                 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1213                                 func);
1214                 return -EINVAL;
1215         }
1216         if (!udev->parent) {
1217                 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1218                                 func);
1219                 return 0;
1220         }
1221
1222         xhci = hcd_to_xhci(hcd);
1223         if (check_virt_dev) {
1224                 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1225                         printk(KERN_DEBUG "xHCI %s called with unaddressed "
1226                                                 "device\n", func);
1227                         return -EINVAL;
1228                 }
1229
1230                 virt_dev = xhci->devs[udev->slot_id];
1231                 if (virt_dev->udev != udev) {
1232                         printk(KERN_DEBUG "xHCI %s called with udev and "
1233                                           "virt_dev does not match\n", func);
1234                         return -EINVAL;
1235                 }
1236         }
1237
1238         if (xhci->xhc_state & XHCI_STATE_HALTED)
1239                 return -ENODEV;
1240
1241         return 1;
1242 }
1243
1244 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1245                 struct usb_device *udev, struct xhci_command *command,
1246                 bool ctx_change, bool must_succeed);
1247
1248 /*
1249  * Full speed devices may have a max packet size greater than 8 bytes, but the
1250  * USB core doesn't know that until it reads the first 8 bytes of the
1251  * descriptor.  If the usb_device's max packet size changes after that point,
1252  * we need to issue an evaluate context command and wait on it.
1253  */
1254 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1255                 unsigned int ep_index, struct urb *urb)
1256 {
1257         struct xhci_container_ctx *in_ctx;
1258         struct xhci_container_ctx *out_ctx;
1259         struct xhci_input_control_ctx *ctrl_ctx;
1260         struct xhci_ep_ctx *ep_ctx;
1261         int max_packet_size;
1262         int hw_max_packet_size;
1263         int ret = 0;
1264
1265         out_ctx = xhci->devs[slot_id]->out_ctx;
1266         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1267         hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1268         max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1269         if (hw_max_packet_size != max_packet_size) {
1270                 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1271                 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1272                                 max_packet_size);
1273                 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1274                                 hw_max_packet_size);
1275                 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1276
1277                 /* Set up the modified control endpoint 0 */
1278                 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1279                                 xhci->devs[slot_id]->out_ctx, ep_index);
1280                 in_ctx = xhci->devs[slot_id]->in_ctx;
1281                 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1282                 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1283                 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1284
1285                 /* Set up the input context flags for the command */
1286                 /* FIXME: This won't work if a non-default control endpoint
1287                  * changes max packet sizes.
1288                  */
1289                 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1290                 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1291                 ctrl_ctx->drop_flags = 0;
1292
1293                 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1294                 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1295                 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1296                 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1297
1298                 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1299                                 true, false);
1300
1301                 /* Clean up the input context for later use by bandwidth
1302                  * functions.
1303                  */
1304                 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1305         }
1306         return ret;
1307 }
1308
1309 /*
1310  * non-error returns are a promise to giveback() the urb later
1311  * we drop ownership so next owner (or urb unlink) can get it
1312  */
1313 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1314 {
1315         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1316         struct xhci_td *buffer;
1317         unsigned long flags;
1318         int ret = 0;
1319         unsigned int slot_id, ep_index;
1320         struct urb_priv *urb_priv;
1321         int size, i;
1322
1323         if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1324                                         true, true, __func__) <= 0)
1325                 return -EINVAL;
1326
1327         slot_id = urb->dev->slot_id;
1328         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1329
1330         if (!HCD_HW_ACCESSIBLE(hcd)) {
1331                 if (!in_interrupt())
1332                         xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1333                 ret = -ESHUTDOWN;
1334                 goto exit;
1335         }
1336
1337         if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1338                 size = urb->number_of_packets;
1339         else
1340                 size = 1;
1341
1342         urb_priv = kzalloc(sizeof(struct urb_priv) +
1343                                   size * sizeof(struct xhci_td *), mem_flags);
1344         if (!urb_priv)
1345                 return -ENOMEM;
1346
1347         buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1348         if (!buffer) {
1349                 kfree(urb_priv);
1350                 return -ENOMEM;
1351         }
1352
1353         for (i = 0; i < size; i++) {
1354                 urb_priv->td[i] = buffer;
1355                 buffer++;
1356         }
1357
1358         urb_priv->length = size;
1359         urb_priv->td_cnt = 0;
1360         urb->hcpriv = urb_priv;
1361
1362         if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1363                 /* Check to see if the max packet size for the default control
1364                  * endpoint changed during FS device enumeration
1365                  */
1366                 if (urb->dev->speed == USB_SPEED_FULL) {
1367                         ret = xhci_check_maxpacket(xhci, slot_id,
1368                                         ep_index, urb);
1369                         if (ret < 0) {
1370                                 xhci_urb_free_priv(xhci, urb_priv);
1371                                 urb->hcpriv = NULL;
1372                                 return ret;
1373                         }
1374                 }
1375
1376                 /* We have a spinlock and interrupts disabled, so we must pass
1377                  * atomic context to this function, which may allocate memory.
1378                  */
1379                 spin_lock_irqsave(&xhci->lock, flags);
1380                 if (xhci->xhc_state & XHCI_STATE_DYING)
1381                         goto dying;
1382                 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1383                                 slot_id, ep_index);
1384                 if (ret)
1385                         goto free_priv;
1386                 spin_unlock_irqrestore(&xhci->lock, flags);
1387         } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1388                 spin_lock_irqsave(&xhci->lock, flags);
1389                 if (xhci->xhc_state & XHCI_STATE_DYING)
1390                         goto dying;
1391                 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1392                                 EP_GETTING_STREAMS) {
1393                         xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1394                                         "is transitioning to using streams.\n");
1395                         ret = -EINVAL;
1396                 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1397                                 EP_GETTING_NO_STREAMS) {
1398                         xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1399                                         "is transitioning to "
1400                                         "not having streams.\n");
1401                         ret = -EINVAL;
1402                 } else {
1403                         ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1404                                         slot_id, ep_index);
1405                 }
1406                 if (ret)
1407                         goto free_priv;
1408                 spin_unlock_irqrestore(&xhci->lock, flags);
1409         } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1410                 spin_lock_irqsave(&xhci->lock, flags);
1411                 if (xhci->xhc_state & XHCI_STATE_DYING)
1412                         goto dying;
1413                 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1414                                 slot_id, ep_index);
1415                 if (ret)
1416                         goto free_priv;
1417                 spin_unlock_irqrestore(&xhci->lock, flags);
1418         } else {
1419                 spin_lock_irqsave(&xhci->lock, flags);
1420                 if (xhci->xhc_state & XHCI_STATE_DYING)
1421                         goto dying;
1422                 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1423                                 slot_id, ep_index);
1424                 if (ret)
1425                         goto free_priv;
1426                 spin_unlock_irqrestore(&xhci->lock, flags);
1427         }
1428 exit:
1429         return ret;
1430 dying:
1431         xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1432                         "non-responsive xHCI host.\n",
1433                         urb->ep->desc.bEndpointAddress, urb);
1434         ret = -ESHUTDOWN;
1435 free_priv:
1436         xhci_urb_free_priv(xhci, urb_priv);
1437         urb->hcpriv = NULL;
1438         spin_unlock_irqrestore(&xhci->lock, flags);
1439         return ret;
1440 }
1441
1442 /* Get the right ring for the given URB.
1443  * If the endpoint supports streams, boundary check the URB's stream ID.
1444  * If the endpoint doesn't support streams, return the singular endpoint ring.
1445  */
1446 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1447                 struct urb *urb)
1448 {
1449         unsigned int slot_id;
1450         unsigned int ep_index;
1451         unsigned int stream_id;
1452         struct xhci_virt_ep *ep;
1453
1454         slot_id = urb->dev->slot_id;
1455         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1456         stream_id = urb->stream_id;
1457         ep = &xhci->devs[slot_id]->eps[ep_index];
1458         /* Common case: no streams */
1459         if (!(ep->ep_state & EP_HAS_STREAMS))
1460                 return ep->ring;
1461
1462         if (stream_id == 0) {
1463                 xhci_warn(xhci,
1464                                 "WARN: Slot ID %u, ep index %u has streams, "
1465                                 "but URB has no stream ID.\n",
1466                                 slot_id, ep_index);
1467                 return NULL;
1468         }
1469
1470         if (stream_id < ep->stream_info->num_streams)
1471                 return ep->stream_info->stream_rings[stream_id];
1472
1473         xhci_warn(xhci,
1474                         "WARN: Slot ID %u, ep index %u has "
1475                         "stream IDs 1 to %u allocated, "
1476                         "but stream ID %u is requested.\n",
1477                         slot_id, ep_index,
1478                         ep->stream_info->num_streams - 1,
1479                         stream_id);
1480         return NULL;
1481 }
1482
1483 /*
1484  * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1485  * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1486  * should pick up where it left off in the TD, unless a Set Transfer Ring
1487  * Dequeue Pointer is issued.
1488  *
1489  * The TRBs that make up the buffers for the canceled URB will be "removed" from
1490  * the ring.  Since the ring is a contiguous structure, they can't be physically
1491  * removed.  Instead, there are two options:
1492  *
1493  *  1) If the HC is in the middle of processing the URB to be canceled, we
1494  *     simply move the ring's dequeue pointer past those TRBs using the Set
1495  *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1496  *     when drivers timeout on the last submitted URB and attempt to cancel.
1497  *
1498  *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1499  *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1500  *     HC will need to invalidate the any TRBs it has cached after the stop
1501  *     endpoint command, as noted in the xHCI 0.95 errata.
1502  *
1503  *  3) The TD may have completed by the time the Stop Endpoint Command
1504  *     completes, so software needs to handle that case too.
1505  *
1506  * This function should protect against the TD enqueueing code ringing the
1507  * doorbell while this code is waiting for a Stop Endpoint command to complete.
1508  * It also needs to account for multiple cancellations on happening at the same
1509  * time for the same endpoint.
1510  *
1511  * Note that this function can be called in any context, or so says
1512  * usb_hcd_unlink_urb()
1513  */
1514 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1515 {
1516         unsigned long flags;
1517         int ret, i;
1518         u32 temp;
1519         struct xhci_hcd *xhci;
1520         struct urb_priv *urb_priv;
1521         struct xhci_td *td;
1522         unsigned int ep_index;
1523         struct xhci_ring *ep_ring;
1524         struct xhci_virt_ep *ep;
1525         struct xhci_virt_device *vdev;
1526
1527         xhci = hcd_to_xhci(hcd);
1528         spin_lock_irqsave(&xhci->lock, flags);
1529         /* Make sure the URB hasn't completed or been unlinked already */
1530         ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1531         if (ret)
1532                 goto done;
1533
1534         /* give back URB now if we can't queue it for cancel */
1535         vdev = xhci->devs[urb->dev->slot_id];
1536         urb_priv = urb->hcpriv;
1537         if (!vdev || !urb_priv)
1538                 goto err_giveback;
1539
1540         xhci_dbg(xhci, "Cancel URB %p\n", urb);
1541         xhci_dbg(xhci, "Event ring:\n");
1542         xhci_debug_ring(xhci, xhci->event_ring);
1543         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1544         ep = &vdev->eps[ep_index];
1545         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1546         if (!ep || !ep_ring)
1547                 goto err_giveback;
1548
1549         xhci_dbg(xhci, "Endpoint ring:\n");
1550         xhci_debug_ring(xhci, ep_ring);
1551
1552         temp = xhci_readl(xhci, &xhci->op_regs->status);
1553         if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1554                 xhci_dbg(xhci, "HW died, freeing TD.\n");
1555                 for (i = urb_priv->td_cnt;
1556                      i < urb_priv->length;
1557                      i++) {
1558                         td = urb_priv->td[i];
1559                         if (!list_empty(&td->td_list))
1560                                 list_del_init(&td->td_list);
1561                         if (!list_empty(&td->cancelled_td_list))
1562                                 list_del_init(&td->cancelled_td_list);
1563                 }
1564                 goto err_giveback;
1565         }
1566
1567         for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1568                 td = urb_priv->td[i];
1569                 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1570         }
1571
1572         /* Queue a stop endpoint command, but only if this is
1573          * the first cancellation to be handled.
1574          */
1575         if (!(ep->ep_state & EP_HALT_PENDING)) {
1576                 ep->ep_state |= EP_HALT_PENDING;
1577                 ep->stop_cmds_pending++;
1578                 ep->stop_cmd_timer.expires = jiffies +
1579                         XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1580                 add_timer(&ep->stop_cmd_timer);
1581                 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1582                 xhci_ring_cmd_db(xhci);
1583         }
1584 done:
1585         spin_unlock_irqrestore(&xhci->lock, flags);
1586         return ret;
1587
1588 err_giveback:
1589         if (urb_priv)
1590                 xhci_urb_free_priv(xhci, urb_priv);
1591         usb_hcd_unlink_urb_from_ep(hcd, urb);
1592         spin_unlock_irqrestore(&xhci->lock, flags);
1593         usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1594         return ret;
1595 }
1596
1597 /* Drop an endpoint from a new bandwidth configuration for this device.
1598  * Only one call to this function is allowed per endpoint before
1599  * check_bandwidth() or reset_bandwidth() must be called.
1600  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1601  * add the endpoint to the schedule with possibly new parameters denoted by a
1602  * different endpoint descriptor in usb_host_endpoint.
1603  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1604  * not allowed.
1605  *
1606  * The USB core will not allow URBs to be queued to an endpoint that is being
1607  * disabled, so there's no need for mutual exclusion to protect
1608  * the xhci->devs[slot_id] structure.
1609  */
1610 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1611                 struct usb_host_endpoint *ep)
1612 {
1613         struct xhci_hcd *xhci;
1614         struct xhci_container_ctx *in_ctx, *out_ctx;
1615         struct xhci_input_control_ctx *ctrl_ctx;
1616         struct xhci_slot_ctx *slot_ctx;
1617         unsigned int last_ctx;
1618         unsigned int ep_index;
1619         struct xhci_ep_ctx *ep_ctx;
1620         u32 drop_flag;
1621         u32 new_add_flags, new_drop_flags, new_slot_info;
1622         int ret;
1623
1624         ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1625         if (ret <= 0)
1626                 return ret;
1627         xhci = hcd_to_xhci(hcd);
1628         if (xhci->xhc_state & XHCI_STATE_DYING)
1629                 return -ENODEV;
1630
1631         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1632         drop_flag = xhci_get_endpoint_flag(&ep->desc);
1633         if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1634                 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1635                                 __func__, drop_flag);
1636                 return 0;
1637         }
1638
1639         in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1640         out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1641         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1642         ep_index = xhci_get_endpoint_index(&ep->desc);
1643         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1644         /* If the HC already knows the endpoint is disabled,
1645          * or the HCD has noted it is disabled, ignore this request
1646          */
1647         if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1648              cpu_to_le32(EP_STATE_DISABLED)) ||
1649             le32_to_cpu(ctrl_ctx->drop_flags) &
1650             xhci_get_endpoint_flag(&ep->desc)) {
1651                 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1652                                 __func__, ep);
1653                 return 0;
1654         }
1655
1656         ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1657         new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1658
1659         ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1660         new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1661
1662         last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1663         slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1664         /* Update the last valid endpoint context, if we deleted the last one */
1665         if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1666             LAST_CTX(last_ctx)) {
1667                 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1668                 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1669         }
1670         new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1671
1672         xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1673
1674         xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1675                         (unsigned int) ep->desc.bEndpointAddress,
1676                         udev->slot_id,
1677                         (unsigned int) new_drop_flags,
1678                         (unsigned int) new_add_flags,
1679                         (unsigned int) new_slot_info);
1680         return 0;
1681 }
1682
1683 /* Add an endpoint to a new possible bandwidth configuration for this device.
1684  * Only one call to this function is allowed per endpoint before
1685  * check_bandwidth() or reset_bandwidth() must be called.
1686  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1687  * add the endpoint to the schedule with possibly new parameters denoted by a
1688  * different endpoint descriptor in usb_host_endpoint.
1689  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1690  * not allowed.
1691  *
1692  * The USB core will not allow URBs to be queued to an endpoint until the
1693  * configuration or alt setting is installed in the device, so there's no need
1694  * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1695  */
1696 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1697                 struct usb_host_endpoint *ep)
1698 {
1699         struct xhci_hcd *xhci;
1700         struct xhci_container_ctx *in_ctx, *out_ctx;
1701         unsigned int ep_index;
1702         struct xhci_ep_ctx *ep_ctx;
1703         struct xhci_slot_ctx *slot_ctx;
1704         struct xhci_input_control_ctx *ctrl_ctx;
1705         u32 added_ctxs;
1706         unsigned int last_ctx;
1707         u32 new_add_flags, new_drop_flags, new_slot_info;
1708         struct xhci_virt_device *virt_dev;
1709         int ret = 0;
1710
1711         ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1712         if (ret <= 0) {
1713                 /* So we won't queue a reset ep command for a root hub */
1714                 ep->hcpriv = NULL;
1715                 return ret;
1716         }
1717         xhci = hcd_to_xhci(hcd);
1718         if (xhci->xhc_state & XHCI_STATE_DYING)
1719                 return -ENODEV;
1720
1721         added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1722         last_ctx = xhci_last_valid_endpoint(added_ctxs);
1723         if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1724                 /* FIXME when we have to issue an evaluate endpoint command to
1725                  * deal with ep0 max packet size changing once we get the
1726                  * descriptors
1727                  */
1728                 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1729                                 __func__, added_ctxs);
1730                 return 0;
1731         }
1732
1733         virt_dev = xhci->devs[udev->slot_id];
1734         in_ctx = virt_dev->in_ctx;
1735         out_ctx = virt_dev->out_ctx;
1736         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1737         ep_index = xhci_get_endpoint_index(&ep->desc);
1738         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1739
1740         /* If this endpoint is already in use, and the upper layers are trying
1741          * to add it again without dropping it, reject the addition.
1742          */
1743         if (virt_dev->eps[ep_index].ring &&
1744                         !(le32_to_cpu(ctrl_ctx->drop_flags) &
1745                                 xhci_get_endpoint_flag(&ep->desc))) {
1746                 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1747                                 "without dropping it.\n",
1748                                 (unsigned int) ep->desc.bEndpointAddress);
1749                 return -EINVAL;
1750         }
1751
1752         /* If the HCD has already noted the endpoint is enabled,
1753          * ignore this request.
1754          */
1755         if (le32_to_cpu(ctrl_ctx->add_flags) &
1756             xhci_get_endpoint_flag(&ep->desc)) {
1757                 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1758                                 __func__, ep);
1759                 return 0;
1760         }
1761
1762         /*
1763          * Configuration and alternate setting changes must be done in
1764          * process context, not interrupt context (or so documenation
1765          * for usb_set_interface() and usb_set_configuration() claim).
1766          */
1767         if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1768                 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1769                                 __func__, ep->desc.bEndpointAddress);
1770                 return -ENOMEM;
1771         }
1772
1773         ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1774         new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1775
1776         /* If xhci_endpoint_disable() was called for this endpoint, but the
1777          * xHC hasn't been notified yet through the check_bandwidth() call,
1778          * this re-adds a new state for the endpoint from the new endpoint
1779          * descriptors.  We must drop and re-add this endpoint, so we leave the
1780          * drop flags alone.
1781          */
1782         new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1783
1784         slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1785         /* Update the last valid endpoint context, if we just added one past */
1786         if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1787             LAST_CTX(last_ctx)) {
1788                 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1789                 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1790         }
1791         new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1792
1793         /* Store the usb_device pointer for later use */
1794         ep->hcpriv = udev;
1795
1796         xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1797                         (unsigned int) ep->desc.bEndpointAddress,
1798                         udev->slot_id,
1799                         (unsigned int) new_drop_flags,
1800                         (unsigned int) new_add_flags,
1801                         (unsigned int) new_slot_info);
1802         return 0;
1803 }
1804
1805 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1806 {
1807         struct xhci_input_control_ctx *ctrl_ctx;
1808         struct xhci_ep_ctx *ep_ctx;
1809         struct xhci_slot_ctx *slot_ctx;
1810         int i;
1811
1812         /* When a device's add flag and drop flag are zero, any subsequent
1813          * configure endpoint command will leave that endpoint's state
1814          * untouched.  Make sure we don't leave any old state in the input
1815          * endpoint contexts.
1816          */
1817         ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1818         ctrl_ctx->drop_flags = 0;
1819         ctrl_ctx->add_flags = 0;
1820         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1821         slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1822         /* Endpoint 0 is always valid */
1823         slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1824         for (i = 1; i < 31; ++i) {
1825                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1826                 ep_ctx->ep_info = 0;
1827                 ep_ctx->ep_info2 = 0;
1828                 ep_ctx->deq = 0;
1829                 ep_ctx->tx_info = 0;
1830         }
1831 }
1832
1833 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1834                 struct usb_device *udev, u32 *cmd_status)
1835 {
1836         int ret;
1837
1838         switch (*cmd_status) {
1839         case COMP_ENOMEM:
1840                 dev_warn(&udev->dev, "Not enough host controller resources "
1841                                 "for new device state.\n");
1842                 ret = -ENOMEM;
1843                 /* FIXME: can we allocate more resources for the HC? */
1844                 break;
1845         case COMP_BW_ERR:
1846         case COMP_2ND_BW_ERR:
1847                 dev_warn(&udev->dev, "Not enough bandwidth "
1848                                 "for new device state.\n");
1849                 ret = -ENOSPC;
1850                 /* FIXME: can we go back to the old state? */
1851                 break;
1852         case COMP_TRB_ERR:
1853                 /* the HCD set up something wrong */
1854                 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1855                                 "add flag = 1, "
1856                                 "and endpoint is not disabled.\n");
1857                 ret = -EINVAL;
1858                 break;
1859         case COMP_DEV_ERR:
1860                 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1861                                 "configure command.\n");
1862                 ret = -ENODEV;
1863                 break;
1864         case COMP_SUCCESS:
1865                 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1866                 ret = 0;
1867                 break;
1868         default:
1869                 xhci_err(xhci, "ERROR: unexpected command completion "
1870                                 "code 0x%x.\n", *cmd_status);
1871                 ret = -EINVAL;
1872                 break;
1873         }
1874         return ret;
1875 }
1876
1877 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1878                 struct usb_device *udev, u32 *cmd_status)
1879 {
1880         int ret;
1881         struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1882
1883         switch (*cmd_status) {
1884         case COMP_EINVAL:
1885                 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1886                                 "context command.\n");
1887                 ret = -EINVAL;
1888                 break;
1889         case COMP_EBADSLT:
1890                 dev_warn(&udev->dev, "WARN: slot not enabled for"
1891                                 "evaluate context command.\n");
1892         case COMP_CTX_STATE:
1893                 dev_warn(&udev->dev, "WARN: invalid context state for "
1894                                 "evaluate context command.\n");
1895                 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1896                 ret = -EINVAL;
1897                 break;
1898         case COMP_DEV_ERR:
1899                 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1900                                 "context command.\n");
1901                 ret = -ENODEV;
1902                 break;
1903         case COMP_MEL_ERR:
1904                 /* Max Exit Latency too large error */
1905                 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1906                 ret = -EINVAL;
1907                 break;
1908         case COMP_SUCCESS:
1909                 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1910                 ret = 0;
1911                 break;
1912         default:
1913                 xhci_err(xhci, "ERROR: unexpected command completion "
1914                                 "code 0x%x.\n", *cmd_status);
1915                 ret = -EINVAL;
1916                 break;
1917         }
1918         return ret;
1919 }
1920
1921 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1922                 struct xhci_container_ctx *in_ctx)
1923 {
1924         struct xhci_input_control_ctx *ctrl_ctx;
1925         u32 valid_add_flags;
1926         u32 valid_drop_flags;
1927
1928         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1929         /* Ignore the slot flag (bit 0), and the default control endpoint flag
1930          * (bit 1).  The default control endpoint is added during the Address
1931          * Device command and is never removed until the slot is disabled.
1932          */
1933         valid_add_flags = ctrl_ctx->add_flags >> 2;
1934         valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1935
1936         /* Use hweight32 to count the number of ones in the add flags, or
1937          * number of endpoints added.  Don't count endpoints that are changed
1938          * (both added and dropped).
1939          */
1940         return hweight32(valid_add_flags) -
1941                 hweight32(valid_add_flags & valid_drop_flags);
1942 }
1943
1944 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1945                 struct xhci_container_ctx *in_ctx)
1946 {
1947         struct xhci_input_control_ctx *ctrl_ctx;
1948         u32 valid_add_flags;
1949         u32 valid_drop_flags;
1950
1951         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1952         valid_add_flags = ctrl_ctx->add_flags >> 2;
1953         valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1954
1955         return hweight32(valid_drop_flags) -
1956                 hweight32(valid_add_flags & valid_drop_flags);
1957 }
1958
1959 /*
1960  * We need to reserve the new number of endpoints before the configure endpoint
1961  * command completes.  We can't subtract the dropped endpoints from the number
1962  * of active endpoints until the command completes because we can oversubscribe
1963  * the host in this case:
1964  *
1965  *  - the first configure endpoint command drops more endpoints than it adds
1966  *  - a second configure endpoint command that adds more endpoints is queued
1967  *  - the first configure endpoint command fails, so the config is unchanged
1968  *  - the second command may succeed, even though there isn't enough resources
1969  *
1970  * Must be called with xhci->lock held.
1971  */
1972 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1973                 struct xhci_container_ctx *in_ctx)
1974 {
1975         u32 added_eps;
1976
1977         added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1978         if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1979                 xhci_dbg(xhci, "Not enough ep ctxs: "
1980                                 "%u active, need to add %u, limit is %u.\n",
1981                                 xhci->num_active_eps, added_eps,
1982                                 xhci->limit_active_eps);
1983                 return -ENOMEM;
1984         }
1985         xhci->num_active_eps += added_eps;
1986         xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1987                         xhci->num_active_eps);
1988         return 0;
1989 }
1990
1991 /*
1992  * The configure endpoint was failed by the xHC for some other reason, so we
1993  * need to revert the resources that failed configuration would have used.
1994  *
1995  * Must be called with xhci->lock held.
1996  */
1997 static void xhci_free_host_resources(struct xhci_hcd *xhci,
1998                 struct xhci_container_ctx *in_ctx)
1999 {
2000         u32 num_failed_eps;
2001
2002         num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
2003         xhci->num_active_eps -= num_failed_eps;
2004         xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
2005                         num_failed_eps,
2006                         xhci->num_active_eps);
2007 }
2008
2009 /*
2010  * Now that the command has completed, clean up the active endpoint count by
2011  * subtracting out the endpoints that were dropped (but not changed).
2012  *
2013  * Must be called with xhci->lock held.
2014  */
2015 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2016                 struct xhci_container_ctx *in_ctx)
2017 {
2018         u32 num_dropped_eps;
2019
2020         num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
2021         xhci->num_active_eps -= num_dropped_eps;
2022         if (num_dropped_eps)
2023                 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
2024                                 num_dropped_eps,
2025                                 xhci->num_active_eps);
2026 }
2027
2028 unsigned int xhci_get_block_size(struct usb_device *udev)
2029 {
2030         switch (udev->speed) {
2031         case USB_SPEED_LOW:
2032         case USB_SPEED_FULL:
2033                 return FS_BLOCK;
2034         case USB_SPEED_HIGH:
2035                 return HS_BLOCK;
2036         case USB_SPEED_SUPER:
2037                 return SS_BLOCK;
2038         case USB_SPEED_UNKNOWN:
2039         case USB_SPEED_WIRELESS:
2040         default:
2041                 /* Should never happen */
2042                 return 1;
2043         }
2044 }
2045
2046 unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2047 {
2048         if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2049                 return LS_OVERHEAD;
2050         if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2051                 return FS_OVERHEAD;
2052         return HS_OVERHEAD;
2053 }
2054
2055 /* If we are changing a LS/FS device under a HS hub,
2056  * make sure (if we are activating a new TT) that the HS bus has enough
2057  * bandwidth for this new TT.
2058  */
2059 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2060                 struct xhci_virt_device *virt_dev,
2061                 int old_active_eps)
2062 {
2063         struct xhci_interval_bw_table *bw_table;
2064         struct xhci_tt_bw_info *tt_info;
2065
2066         /* Find the bandwidth table for the root port this TT is attached to. */
2067         bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2068         tt_info = virt_dev->tt_info;
2069         /* If this TT already had active endpoints, the bandwidth for this TT
2070          * has already been added.  Removing all periodic endpoints (and thus
2071          * making the TT enactive) will only decrease the bandwidth used.
2072          */
2073         if (old_active_eps)
2074                 return 0;
2075         if (old_active_eps == 0 && tt_info->active_eps != 0) {
2076                 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2077                         return -ENOMEM;
2078                 return 0;
2079         }
2080         /* Not sure why we would have no new active endpoints...
2081          *
2082          * Maybe because of an Evaluate Context change for a hub update or a
2083          * control endpoint 0 max packet size change?
2084          * FIXME: skip the bandwidth calculation in that case.
2085          */
2086         return 0;
2087 }
2088
2089 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2090                 struct xhci_virt_device *virt_dev)
2091 {
2092         unsigned int bw_reserved;
2093
2094         bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2095         if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2096                 return -ENOMEM;
2097
2098         bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2099         if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2100                 return -ENOMEM;
2101
2102         return 0;
2103 }
2104
2105 /*
2106  * This algorithm is a very conservative estimate of the worst-case scheduling
2107  * scenario for any one interval.  The hardware dynamically schedules the
2108  * packets, so we can't tell which microframe could be the limiting factor in
2109  * the bandwidth scheduling.  This only takes into account periodic endpoints.
2110  *
2111  * Obviously, we can't solve an NP complete problem to find the minimum worst
2112  * case scenario.  Instead, we come up with an estimate that is no less than
2113  * the worst case bandwidth used for any one microframe, but may be an
2114  * over-estimate.
2115  *
2116  * We walk the requirements for each endpoint by interval, starting with the
2117  * smallest interval, and place packets in the schedule where there is only one
2118  * possible way to schedule packets for that interval.  In order to simplify
2119  * this algorithm, we record the largest max packet size for each interval, and
2120  * assume all packets will be that size.
2121  *
2122  * For interval 0, we obviously must schedule all packets for each interval.
2123  * The bandwidth for interval 0 is just the amount of data to be transmitted
2124  * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2125  * the number of packets).
2126  *
2127  * For interval 1, we have two possible microframes to schedule those packets
2128  * in.  For this algorithm, if we can schedule the same number of packets for
2129  * each possible scheduling opportunity (each microframe), we will do so.  The
2130  * remaining number of packets will be saved to be transmitted in the gaps in
2131  * the next interval's scheduling sequence.
2132  *
2133  * As we move those remaining packets to be scheduled with interval 2 packets,
2134  * we have to double the number of remaining packets to transmit.  This is
2135  * because the intervals are actually powers of 2, and we would be transmitting
2136  * the previous interval's packets twice in this interval.  We also have to be
2137  * sure that when we look at the largest max packet size for this interval, we
2138  * also look at the largest max packet size for the remaining packets and take
2139  * the greater of the two.
2140  *
2141  * The algorithm continues to evenly distribute packets in each scheduling
2142  * opportunity, and push the remaining packets out, until we get to the last
2143  * interval.  Then those packets and their associated overhead are just added
2144  * to the bandwidth used.
2145  */
2146 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2147                 struct xhci_virt_device *virt_dev,
2148                 int old_active_eps)
2149 {
2150         unsigned int bw_reserved;
2151         unsigned int max_bandwidth;
2152         unsigned int bw_used;
2153         unsigned int block_size;
2154         struct xhci_interval_bw_table *bw_table;
2155         unsigned int packet_size = 0;
2156         unsigned int overhead = 0;
2157         unsigned int packets_transmitted = 0;
2158         unsigned int packets_remaining = 0;
2159         unsigned int i;
2160
2161         if (virt_dev->udev->speed == USB_SPEED_SUPER)
2162                 return xhci_check_ss_bw(xhci, virt_dev);
2163
2164         if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2165                 max_bandwidth = HS_BW_LIMIT;
2166                 /* Convert percent of bus BW reserved to blocks reserved */
2167                 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2168         } else {
2169                 max_bandwidth = FS_BW_LIMIT;
2170                 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2171         }
2172
2173         bw_table = virt_dev->bw_table;
2174         /* We need to translate the max packet size and max ESIT payloads into
2175          * the units the hardware uses.
2176          */
2177         block_size = xhci_get_block_size(virt_dev->udev);
2178
2179         /* If we are manipulating a LS/FS device under a HS hub, double check
2180          * that the HS bus has enough bandwidth if we are activing a new TT.
2181          */
2182         if (virt_dev->tt_info) {
2183                 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2184                                 virt_dev->real_port);
2185                 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2186                         xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2187                                         "newly activated TT.\n");
2188                         return -ENOMEM;
2189                 }
2190                 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
2191                                 virt_dev->tt_info->slot_id,
2192                                 virt_dev->tt_info->ttport);
2193         } else {
2194                 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2195                                 virt_dev->real_port);
2196         }
2197
2198         /* Add in how much bandwidth will be used for interval zero, or the
2199          * rounded max ESIT payload + number of packets * largest overhead.
2200          */
2201         bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2202                 bw_table->interval_bw[0].num_packets *
2203                 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2204
2205         for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2206                 unsigned int bw_added;
2207                 unsigned int largest_mps;
2208                 unsigned int interval_overhead;
2209
2210                 /*
2211                  * How many packets could we transmit in this interval?
2212                  * If packets didn't fit in the previous interval, we will need
2213                  * to transmit that many packets twice within this interval.
2214                  */
2215                 packets_remaining = 2 * packets_remaining +
2216                         bw_table->interval_bw[i].num_packets;
2217
2218                 /* Find the largest max packet size of this or the previous
2219                  * interval.
2220                  */
2221                 if (list_empty(&bw_table->interval_bw[i].endpoints))
2222                         largest_mps = 0;
2223                 else {
2224                         struct xhci_virt_ep *virt_ep;
2225                         struct list_head *ep_entry;
2226
2227                         ep_entry = bw_table->interval_bw[i].endpoints.next;
2228                         virt_ep = list_entry(ep_entry,
2229                                         struct xhci_virt_ep, bw_endpoint_list);
2230                         /* Convert to blocks, rounding up */
2231                         largest_mps = DIV_ROUND_UP(
2232                                         virt_ep->bw_info.max_packet_size,
2233                                         block_size);
2234                 }
2235                 if (largest_mps > packet_size)
2236                         packet_size = largest_mps;
2237
2238                 /* Use the larger overhead of this or the previous interval. */
2239                 interval_overhead = xhci_get_largest_overhead(
2240                                 &bw_table->interval_bw[i]);
2241                 if (interval_overhead > overhead)
2242                         overhead = interval_overhead;
2243
2244                 /* How many packets can we evenly distribute across
2245                  * (1 << (i + 1)) possible scheduling opportunities?
2246                  */
2247                 packets_transmitted = packets_remaining >> (i + 1);
2248
2249                 /* Add in the bandwidth used for those scheduled packets */
2250                 bw_added = packets_transmitted * (overhead + packet_size);
2251
2252                 /* How many packets do we have remaining to transmit? */
2253                 packets_remaining = packets_remaining % (1 << (i + 1));
2254
2255                 /* What largest max packet size should those packets have? */
2256                 /* If we've transmitted all packets, don't carry over the
2257                  * largest packet size.
2258                  */
2259                 if (packets_remaining == 0) {
2260                         packet_size = 0;
2261                         overhead = 0;
2262                 } else if (packets_transmitted > 0) {
2263                         /* Otherwise if we do have remaining packets, and we've
2264                          * scheduled some packets in this interval, take the
2265                          * largest max packet size from endpoints with this
2266                          * interval.
2267                          */
2268                         packet_size = largest_mps;
2269                         overhead = interval_overhead;
2270                 }
2271                 /* Otherwise carry over packet_size and overhead from the last
2272                  * time we had a remainder.
2273                  */
2274                 bw_used += bw_added;
2275                 if (bw_used > max_bandwidth) {
2276                         xhci_warn(xhci, "Not enough bandwidth. "
2277                                         "Proposed: %u, Max: %u\n",
2278                                 bw_used, max_bandwidth);
2279                         return -ENOMEM;
2280                 }
2281         }
2282         /*
2283          * Ok, we know we have some packets left over after even-handedly
2284          * scheduling interval 15.  We don't know which microframes they will
2285          * fit into, so we over-schedule and say they will be scheduled every
2286          * microframe.
2287          */
2288         if (packets_remaining > 0)
2289                 bw_used += overhead + packet_size;
2290
2291         if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2292                 unsigned int port_index = virt_dev->real_port - 1;
2293
2294                 /* OK, we're manipulating a HS device attached to a
2295                  * root port bandwidth domain.  Include the number of active TTs
2296                  * in the bandwidth used.
2297                  */
2298                 bw_used += TT_HS_OVERHEAD *
2299                         xhci->rh_bw[port_index].num_active_tts;
2300         }
2301
2302         xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2303                 "Available: %u " "percent\n",
2304                 bw_used, max_bandwidth, bw_reserved,
2305                 (max_bandwidth - bw_used - bw_reserved) * 100 /
2306                 max_bandwidth);
2307
2308         bw_used += bw_reserved;
2309         if (bw_used > max_bandwidth) {
2310                 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2311                                 bw_used, max_bandwidth);
2312                 return -ENOMEM;
2313         }
2314
2315         bw_table->bw_used = bw_used;
2316         return 0;
2317 }
2318
2319 static bool xhci_is_async_ep(unsigned int ep_type)
2320 {
2321         return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2322                                         ep_type != ISOC_IN_EP &&
2323                                         ep_type != INT_IN_EP);
2324 }
2325
2326 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2327 {
2328         return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2329 }
2330
2331 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2332 {
2333         unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2334
2335         if (ep_bw->ep_interval == 0)
2336                 return SS_OVERHEAD_BURST +
2337                         (ep_bw->mult * ep_bw->num_packets *
2338                                         (SS_OVERHEAD + mps));
2339         return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2340                                 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2341                                 1 << ep_bw->ep_interval);
2342
2343 }
2344
2345 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2346                 struct xhci_bw_info *ep_bw,
2347                 struct xhci_interval_bw_table *bw_table,
2348                 struct usb_device *udev,
2349                 struct xhci_virt_ep *virt_ep,
2350                 struct xhci_tt_bw_info *tt_info)
2351 {
2352         struct xhci_interval_bw *interval_bw;
2353         int normalized_interval;
2354
2355         if (xhci_is_async_ep(ep_bw->type))
2356                 return;
2357
2358         if (udev->speed == USB_SPEED_SUPER) {
2359                 if (xhci_is_sync_in_ep(ep_bw->type))
2360                         xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2361                                 xhci_get_ss_bw_consumed(ep_bw);
2362                 else
2363                         xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2364                                 xhci_get_ss_bw_consumed(ep_bw);
2365                 return;
2366         }
2367
2368         /* SuperSpeed endpoints never get added to intervals in the table, so
2369          * this check is only valid for HS/FS/LS devices.
2370          */
2371         if (list_empty(&virt_ep->bw_endpoint_list))
2372                 return;
2373         /* For LS/FS devices, we need to translate the interval expressed in
2374          * microframes to frames.
2375          */
2376         if (udev->speed == USB_SPEED_HIGH)
2377                 normalized_interval = ep_bw->ep_interval;
2378         else
2379                 normalized_interval = ep_bw->ep_interval - 3;
2380
2381         if (normalized_interval == 0)
2382                 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2383         interval_bw = &bw_table->interval_bw[normalized_interval];
2384         interval_bw->num_packets -= ep_bw->num_packets;
2385         switch (udev->speed) {
2386         case USB_SPEED_LOW:
2387                 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2388                 break;
2389         case USB_SPEED_FULL:
2390                 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2391                 break;
2392         case USB_SPEED_HIGH:
2393                 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2394                 break;
2395         case USB_SPEED_SUPER:
2396         case USB_SPEED_UNKNOWN:
2397         case USB_SPEED_WIRELESS:
2398                 /* Should never happen because only LS/FS/HS endpoints will get
2399                  * added to the endpoint list.
2400                  */
2401                 return;
2402         }
2403         if (tt_info)
2404                 tt_info->active_eps -= 1;
2405         list_del_init(&virt_ep->bw_endpoint_list);
2406 }
2407
2408 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2409                 struct xhci_bw_info *ep_bw,
2410                 struct xhci_interval_bw_table *bw_table,
2411                 struct usb_device *udev,
2412                 struct xhci_virt_ep *virt_ep,
2413                 struct xhci_tt_bw_info *tt_info)
2414 {
2415         struct xhci_interval_bw *interval_bw;
2416         struct xhci_virt_ep *smaller_ep;
2417         int normalized_interval;
2418
2419         if (xhci_is_async_ep(ep_bw->type))
2420                 return;
2421
2422         if (udev->speed == USB_SPEED_SUPER) {
2423                 if (xhci_is_sync_in_ep(ep_bw->type))
2424                         xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2425                                 xhci_get_ss_bw_consumed(ep_bw);
2426                 else
2427                         xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2428                                 xhci_get_ss_bw_consumed(ep_bw);
2429                 return;
2430         }
2431
2432         /* For LS/FS devices, we need to translate the interval expressed in
2433          * microframes to frames.
2434          */
2435         if (udev->speed == USB_SPEED_HIGH)
2436                 normalized_interval = ep_bw->ep_interval;
2437         else
2438                 normalized_interval = ep_bw->ep_interval - 3;
2439
2440         if (normalized_interval == 0)
2441                 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2442         interval_bw = &bw_table->interval_bw[normalized_interval];
2443         interval_bw->num_packets += ep_bw->num_packets;
2444         switch (udev->speed) {
2445         case USB_SPEED_LOW:
2446                 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2447                 break;
2448         case USB_SPEED_FULL:
2449                 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2450                 break;
2451         case USB_SPEED_HIGH:
2452                 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2453                 break;
2454         case USB_SPEED_SUPER:
2455         case USB_SPEED_UNKNOWN:
2456         case USB_SPEED_WIRELESS:
2457                 /* Should never happen because only LS/FS/HS endpoints will get
2458                  * added to the endpoint list.
2459                  */
2460                 return;
2461         }
2462
2463         if (tt_info)
2464                 tt_info->active_eps += 1;
2465         /* Insert the endpoint into the list, largest max packet size first. */
2466         list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2467                         bw_endpoint_list) {
2468                 if (ep_bw->max_packet_size >=
2469                                 smaller_ep->bw_info.max_packet_size) {
2470                         /* Add the new ep before the smaller endpoint */
2471                         list_add_tail(&virt_ep->bw_endpoint_list,
2472                                         &smaller_ep->bw_endpoint_list);
2473                         return;
2474                 }
2475         }
2476         /* Add the new endpoint at the end of the list. */
2477         list_add_tail(&virt_ep->bw_endpoint_list,
2478                         &interval_bw->endpoints);
2479 }
2480
2481 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2482                 struct xhci_virt_device *virt_dev,
2483                 int old_active_eps)
2484 {
2485         struct xhci_root_port_bw_info *rh_bw_info;
2486         if (!virt_dev->tt_info)
2487                 return;
2488
2489         rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2490         if (old_active_eps == 0 &&
2491                                 virt_dev->tt_info->active_eps != 0) {
2492                 rh_bw_info->num_active_tts += 1;
2493                 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2494         } else if (old_active_eps != 0 &&
2495                                 virt_dev->tt_info->active_eps == 0) {
2496                 rh_bw_info->num_active_tts -= 1;
2497                 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2498         }
2499 }
2500
2501 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2502                 struct xhci_virt_device *virt_dev,
2503                 struct xhci_container_ctx *in_ctx)
2504 {
2505         struct xhci_bw_info ep_bw_info[31];
2506         int i;
2507         struct xhci_input_control_ctx *ctrl_ctx;
2508         int old_active_eps = 0;
2509
2510         if (virt_dev->tt_info)
2511                 old_active_eps = virt_dev->tt_info->active_eps;
2512
2513         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2514
2515         for (i = 0; i < 31; i++) {
2516                 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2517                         continue;
2518
2519                 /* Make a copy of the BW info in case we need to revert this */
2520                 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2521                                 sizeof(ep_bw_info[i]));
2522                 /* Drop the endpoint from the interval table if the endpoint is
2523                  * being dropped or changed.
2524                  */
2525                 if (EP_IS_DROPPED(ctrl_ctx, i))
2526                         xhci_drop_ep_from_interval_table(xhci,
2527                                         &virt_dev->eps[i].bw_info,
2528                                         virt_dev->bw_table,
2529                                         virt_dev->udev,
2530                                         &virt_dev->eps[i],
2531                                         virt_dev->tt_info);
2532         }
2533         /* Overwrite the information stored in the endpoints' bw_info */
2534         xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2535         for (i = 0; i < 31; i++) {
2536                 /* Add any changed or added endpoints to the interval table */
2537                 if (EP_IS_ADDED(ctrl_ctx, i))
2538                         xhci_add_ep_to_interval_table(xhci,
2539                                         &virt_dev->eps[i].bw_info,
2540                                         virt_dev->bw_table,
2541                                         virt_dev->udev,
2542                                         &virt_dev->eps[i],
2543                                         virt_dev->tt_info);
2544         }
2545
2546         if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2547                 /* Ok, this fits in the bandwidth we have.
2548                  * Update the number of active TTs.
2549                  */
2550                 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2551                 return 0;
2552         }
2553
2554         /* We don't have enough bandwidth for this, revert the stored info. */
2555         for (i = 0; i < 31; i++) {
2556                 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2557                         continue;
2558
2559                 /* Drop the new copies of any added or changed endpoints from
2560                  * the interval table.
2561                  */
2562                 if (EP_IS_ADDED(ctrl_ctx, i)) {
2563                         xhci_drop_ep_from_interval_table(xhci,
2564                                         &virt_dev->eps[i].bw_info,
2565                                         virt_dev->bw_table,
2566                                         virt_dev->udev,
2567                                         &virt_dev->eps[i],
2568                                         virt_dev->tt_info);
2569                 }
2570                 /* Revert the endpoint back to its old information */
2571                 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2572                                 sizeof(ep_bw_info[i]));
2573                 /* Add any changed or dropped endpoints back into the table */
2574                 if (EP_IS_DROPPED(ctrl_ctx, i))
2575                         xhci_add_ep_to_interval_table(xhci,
2576                                         &virt_dev->eps[i].bw_info,
2577                                         virt_dev->bw_table,
2578                                         virt_dev->udev,
2579                                         &virt_dev->eps[i],
2580                                         virt_dev->tt_info);
2581         }
2582         return -ENOMEM;
2583 }
2584
2585
2586 /* Issue a configure endpoint command or evaluate context command
2587  * and wait for it to finish.
2588  */
2589 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2590                 struct usb_device *udev,
2591                 struct xhci_command *command,
2592                 bool ctx_change, bool must_succeed)
2593 {
2594         int ret;
2595         int timeleft;
2596         unsigned long flags;
2597         struct xhci_container_ctx *in_ctx;
2598         struct completion *cmd_completion;
2599         u32 *cmd_status;
2600         struct xhci_virt_device *virt_dev;
2601         union xhci_trb *cmd_trb;
2602
2603         spin_lock_irqsave(&xhci->lock, flags);
2604         virt_dev = xhci->devs[udev->slot_id];
2605
2606         if (command)
2607                 in_ctx = command->in_ctx;
2608         else
2609                 in_ctx = virt_dev->in_ctx;
2610
2611         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2612                         xhci_reserve_host_resources(xhci, in_ctx)) {
2613                 spin_unlock_irqrestore(&xhci->lock, flags);
2614                 xhci_warn(xhci, "Not enough host resources, "
2615                                 "active endpoint contexts = %u\n",
2616                                 xhci->num_active_eps);
2617                 return -ENOMEM;
2618         }
2619         if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2620                         xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2621                 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2622                         xhci_free_host_resources(xhci, in_ctx);
2623                 spin_unlock_irqrestore(&xhci->lock, flags);
2624                 xhci_warn(xhci, "Not enough bandwidth\n");
2625                 return -ENOMEM;
2626         }
2627
2628         if (command) {
2629                 cmd_completion = command->completion;
2630                 cmd_status = &command->status;
2631                 command->command_trb = xhci->cmd_ring->enqueue;
2632
2633                 /* Enqueue pointer can be left pointing to the link TRB,
2634                  * we must handle that
2635                  */
2636                 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2637                         command->command_trb =
2638                                 xhci->cmd_ring->enq_seg->next->trbs;
2639
2640                 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2641         } else {
2642                 cmd_completion = &virt_dev->cmd_completion;
2643                 cmd_status = &virt_dev->cmd_status;
2644         }
2645         init_completion(cmd_completion);
2646
2647         cmd_trb = xhci->cmd_ring->dequeue;
2648         if (!ctx_change)
2649                 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2650                                 udev->slot_id, must_succeed);
2651         else
2652                 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2653                                 udev->slot_id);
2654         if (ret < 0) {
2655                 if (command)
2656                         list_del(&command->cmd_list);
2657                 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2658                         xhci_free_host_resources(xhci, in_ctx);
2659                 spin_unlock_irqrestore(&xhci->lock, flags);
2660                 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2661                 return -ENOMEM;
2662         }
2663         xhci_ring_cmd_db(xhci);
2664         spin_unlock_irqrestore(&xhci->lock, flags);
2665
2666         /* Wait for the configure endpoint command to complete */
2667         timeleft = wait_for_completion_interruptible_timeout(
2668                         cmd_completion,
2669                         XHCI_CMD_DEFAULT_TIMEOUT);
2670         if (timeleft <= 0) {
2671                 xhci_warn(xhci, "%s while waiting for %s command\n",
2672                                 timeleft == 0 ? "Timeout" : "Signal",
2673                                 ctx_change == 0 ?
2674                                         "configure endpoint" :
2675                                         "evaluate context");
2676                 /* cancel the configure endpoint command */
2677                 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2678                 if (ret < 0)
2679                         return ret;
2680                 return -ETIME;
2681         }
2682
2683         if (!ctx_change)
2684                 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2685         else
2686                 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2687
2688         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2689                 spin_lock_irqsave(&xhci->lock, flags);
2690                 /* If the command failed, remove the reserved resources.
2691                  * Otherwise, clean up the estimate to include dropped eps.
2692                  */
2693                 if (ret)
2694                         xhci_free_host_resources(xhci, in_ctx);
2695                 else
2696                         xhci_finish_resource_reservation(xhci, in_ctx);
2697                 spin_unlock_irqrestore(&xhci->lock, flags);
2698         }
2699         return ret;
2700 }
2701
2702 /* Called after one or more calls to xhci_add_endpoint() or
2703  * xhci_drop_endpoint().  If this call fails, the USB core is expected
2704  * to call xhci_reset_bandwidth().
2705  *
2706  * Since we are in the middle of changing either configuration or
2707  * installing a new alt setting, the USB core won't allow URBs to be
2708  * enqueued for any endpoint on the old config or interface.  Nothing
2709  * else should be touching the xhci->devs[slot_id] structure, so we
2710  * don't need to take the xhci->lock for manipulating that.
2711  */
2712 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2713 {
2714         int i;
2715         int ret = 0;
2716         struct xhci_hcd *xhci;
2717         struct xhci_virt_device *virt_dev;
2718         struct xhci_input_control_ctx *ctrl_ctx;
2719         struct xhci_slot_ctx *slot_ctx;
2720
2721         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2722         if (ret <= 0)
2723                 return ret;
2724         xhci = hcd_to_xhci(hcd);
2725         if (xhci->xhc_state & XHCI_STATE_DYING)
2726                 return -ENODEV;
2727
2728         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2729         virt_dev = xhci->devs[udev->slot_id];
2730
2731         /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2732         ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2733         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2734         ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2735         ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2736
2737         /* Don't issue the command if there's no endpoints to update. */
2738         if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2739                         ctrl_ctx->drop_flags == 0)
2740                 return 0;
2741
2742         xhci_dbg(xhci, "New Input Control Context:\n");
2743         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2744         xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2745                      LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2746
2747         ret = xhci_configure_endpoint(xhci, udev, NULL,
2748                         false, false);
2749         if (ret) {
2750                 /* Callee should call reset_bandwidth() */
2751                 return ret;
2752         }
2753
2754         xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2755         xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2756                      LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2757
2758         /* Free any rings that were dropped, but not changed. */
2759         for (i = 1; i < 31; ++i) {
2760                 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2761                     !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2762                         xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2763         }
2764         xhci_zero_in_ctx(xhci, virt_dev);
2765         /*
2766          * Install any rings for completely new endpoints or changed endpoints,
2767          * and free or cache any old rings from changed endpoints.
2768          */
2769         for (i = 1; i < 31; ++i) {
2770                 if (!virt_dev->eps[i].new_ring)
2771                         continue;
2772                 /* Only cache or free the old ring if it exists.
2773                  * It may not if this is the first add of an endpoint.
2774                  */
2775                 if (virt_dev->eps[i].ring) {
2776                         xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2777                 }
2778                 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2779                 virt_dev->eps[i].new_ring = NULL;
2780         }
2781
2782         return ret;
2783 }
2784
2785 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2786 {
2787         struct xhci_hcd *xhci;
2788         struct xhci_virt_device *virt_dev;
2789         int i, ret;
2790
2791         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2792         if (ret <= 0)
2793                 return;
2794         xhci = hcd_to_xhci(hcd);
2795
2796         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2797         virt_dev = xhci->devs[udev->slot_id];
2798         /* Free any rings allocated for added endpoints */
2799         for (i = 0; i < 31; ++i) {
2800                 if (virt_dev->eps[i].new_ring) {
2801                         xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2802                         virt_dev->eps[i].new_ring = NULL;
2803                 }
2804         }
2805         xhci_zero_in_ctx(xhci, virt_dev);
2806 }
2807
2808 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2809                 struct xhci_container_ctx *in_ctx,
2810                 struct xhci_container_ctx *out_ctx,
2811                 u32 add_flags, u32 drop_flags)
2812 {
2813         struct xhci_input_control_ctx *ctrl_ctx;
2814         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2815         ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2816         ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2817         xhci_slot_copy(xhci, in_ctx, out_ctx);
2818         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2819
2820         xhci_dbg(xhci, "Input Context:\n");
2821         xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2822 }
2823
2824 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2825                 unsigned int slot_id, unsigned int ep_index,
2826                 struct xhci_dequeue_state *deq_state)
2827 {
2828         struct xhci_container_ctx *in_ctx;
2829         struct xhci_ep_ctx *ep_ctx;
2830         u32 added_ctxs;
2831         dma_addr_t addr;
2832
2833         xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2834                         xhci->devs[slot_id]->out_ctx, ep_index);
2835         in_ctx = xhci->devs[slot_id]->in_ctx;
2836         ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2837         addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2838                         deq_state->new_deq_ptr);
2839         if (addr == 0) {
2840                 xhci_warn(xhci, "WARN Cannot submit config ep after "
2841                                 "reset ep command\n");
2842                 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2843                                 deq_state->new_deq_seg,
2844                                 deq_state->new_deq_ptr);
2845                 return;
2846         }
2847         ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2848
2849         added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2850         xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2851                         xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2852 }
2853
2854 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2855                 struct usb_device *udev, unsigned int ep_index)
2856 {
2857         struct xhci_dequeue_state deq_state;
2858         struct xhci_virt_ep *ep;
2859
2860         xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2861         ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2862         /* We need to move the HW's dequeue pointer past this TD,
2863          * or it will attempt to resend it on the next doorbell ring.
2864          */
2865         xhci_find_new_dequeue_state(xhci, udev->slot_id,
2866                         ep_index, ep->stopped_stream, ep->stopped_td,
2867                         &deq_state);
2868
2869         if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2870                 return;
2871
2872         /* HW with the reset endpoint quirk will use the saved dequeue state to
2873          * issue a configure endpoint command later.
2874          */
2875         if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2876                 xhci_dbg(xhci, "Queueing new dequeue state\n");
2877                 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2878                                 ep_index, ep->stopped_stream, &deq_state);
2879         } else {
2880                 /* Better hope no one uses the input context between now and the
2881                  * reset endpoint completion!
2882                  * XXX: No idea how this hardware will react when stream rings
2883                  * are enabled.
2884                  */
2885                 xhci_dbg(xhci, "Setting up input context for "
2886                                 "configure endpoint command\n");
2887                 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2888                                 ep_index, &deq_state);
2889         }
2890 }
2891
2892 /* Called when clearing halted device. The core should have sent the control
2893  * message to clear the device halt condition. The host side of the halt should
2894  * already be cleared with a reset endpoint command issued when the STALL tx
2895  * event was received.
2896  *
2897  * Context: in_interrupt
2898  */
2899
2900 void xhci_endpoint_reset(struct usb_hcd *hcd,
2901                 struct usb_host_endpoint *ep)
2902 {
2903         struct xhci_hcd *xhci;
2904
2905         xhci = hcd_to_xhci(hcd);
2906
2907         /*
2908          * We might need to implement the config ep cmd in xhci 4.8.1 note:
2909          * The Reset Endpoint Command may only be issued to endpoints in the
2910          * Halted state. If software wishes reset the Data Toggle or Sequence
2911          * Number of an endpoint that isn't in the Halted state, then software
2912          * may issue a Configure Endpoint Command with the Drop and Add bits set
2913          * for the target endpoint. that is in the Stopped state.
2914          */
2915
2916         /* For now just print debug to follow the situation */
2917         xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2918                  ep->desc.bEndpointAddress);
2919 }
2920
2921 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2922                 struct usb_device *udev, struct usb_host_endpoint *ep,
2923                 unsigned int slot_id)
2924 {
2925         int ret;
2926         unsigned int ep_index;
2927         unsigned int ep_state;
2928
2929         if (!ep)
2930                 return -EINVAL;
2931         ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2932         if (ret <= 0)
2933                 return -EINVAL;
2934         if (ep->ss_ep_comp.bmAttributes == 0) {
2935                 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2936                                 " descriptor for ep 0x%x does not support streams\n",
2937                                 ep->desc.bEndpointAddress);
2938                 return -EINVAL;
2939         }
2940
2941         ep_index = xhci_get_endpoint_index(&ep->desc);
2942         ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2943         if (ep_state & EP_HAS_STREAMS ||
2944                         ep_state & EP_GETTING_STREAMS) {
2945                 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2946                                 "already has streams set up.\n",
2947                                 ep->desc.bEndpointAddress);
2948                 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2949                                 "dynamic stream context array reallocation.\n");
2950                 return -EINVAL;
2951         }
2952         if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2953                 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2954                                 "endpoint 0x%x; URBs are pending.\n",
2955                                 ep->desc.bEndpointAddress);
2956                 return -EINVAL;
2957         }
2958         return 0;
2959 }
2960
2961 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2962                 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2963 {
2964         unsigned int max_streams;
2965
2966         /* The stream context array size must be a power of two */
2967         *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2968         /*
2969          * Find out how many primary stream array entries the host controller
2970          * supports.  Later we may use secondary stream arrays (similar to 2nd
2971          * level page entries), but that's an optional feature for xHCI host
2972          * controllers. xHCs must support at least 4 stream IDs.
2973          */
2974         max_streams = HCC_MAX_PSA(xhci->hcc_params);
2975         if (*num_stream_ctxs > max_streams) {
2976                 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2977                                 max_streams);
2978                 *num_stream_ctxs = max_streams;
2979                 *num_streams = max_streams;
2980         }
2981 }
2982
2983 /* Returns an error code if one of the endpoint already has streams.
2984  * This does not change any data structures, it only checks and gathers
2985  * information.
2986  */
2987 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2988                 struct usb_device *udev,
2989                 struct usb_host_endpoint **eps, unsigned int num_eps,
2990                 unsigned int *num_streams, u32 *changed_ep_bitmask)
2991 {
2992         unsigned int max_streams;
2993         unsigned int endpoint_flag;
2994         int i;
2995         int ret;
2996
2997         for (i = 0; i < num_eps; i++) {
2998                 ret = xhci_check_streams_endpoint(xhci, udev,
2999                                 eps[i], udev->slot_id);
3000                 if (ret < 0)
3001                         return ret;
3002
3003                 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3004                 if (max_streams < (*num_streams - 1)) {
3005                         xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3006                                         eps[i]->desc.bEndpointAddress,
3007                                         max_streams);
3008                         *num_streams = max_streams+1;
3009                 }
3010
3011                 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3012                 if (*changed_ep_bitmask & endpoint_flag)
3013                         return -EINVAL;
3014                 *changed_ep_bitmask |= endpoint_flag;
3015         }
3016         return 0;
3017 }
3018
3019 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3020                 struct usb_device *udev,
3021                 struct usb_host_endpoint **eps, unsigned int num_eps)
3022 {
3023         u32 changed_ep_bitmask = 0;
3024         unsigned int slot_id;
3025         unsigned int ep_index;
3026         unsigned int ep_state;
3027         int i;
3028
3029         slot_id = udev->slot_id;
3030         if (!xhci->devs[slot_id])
3031                 return 0;
3032
3033         for (i = 0; i < num_eps; i++) {
3034                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3035                 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3036                 /* Are streams already being freed for the endpoint? */
3037                 if (ep_state & EP_GETTING_NO_STREAMS) {
3038                         xhci_warn(xhci, "WARN Can't disable streams for "
3039                                         "endpoint 0x%x\n, "
3040                                         "streams are being disabled already.",
3041                                         eps[i]->desc.bEndpointAddress);
3042                         return 0;
3043                 }
3044                 /* Are there actually any streams to free? */
3045                 if (!(ep_state & EP_HAS_STREAMS) &&
3046                                 !(ep_state & EP_GETTING_STREAMS)) {
3047                         xhci_warn(xhci, "WARN Can't disable streams for "
3048                                         "endpoint 0x%x\n, "
3049                                         "streams are already disabled!",
3050                                         eps[i]->desc.bEndpointAddress);
3051                         xhci_warn(xhci, "WARN xhci_free_streams() called "
3052                                         "with non-streams endpoint\n");
3053                         return 0;
3054                 }
3055                 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3056         }
3057         return changed_ep_bitmask;
3058 }
3059
3060 /*
3061  * The USB device drivers use this function (though the HCD interface in USB
3062  * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
3063  * coordinate mass storage command queueing across multiple endpoints (basically
3064  * a stream ID == a task ID).
3065  *
3066  * Setting up streams involves allocating the same size stream context array
3067  * for each endpoint and issuing a configure endpoint command for all endpoints.
3068  *
3069  * Don't allow the call to succeed if one endpoint only supports one stream
3070  * (which means it doesn't support streams at all).
3071  *
3072  * Drivers may get less stream IDs than they asked for, if the host controller
3073  * hardware or endpoints claim they can't support the number of requested
3074  * stream IDs.
3075  */
3076 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3077                 struct usb_host_endpoint **eps, unsigned int num_eps,
3078                 unsigned int num_streams, gfp_t mem_flags)
3079 {
3080         int i, ret;
3081         struct xhci_hcd *xhci;
3082         struct xhci_virt_device *vdev;
3083         struct xhci_command *config_cmd;
3084         unsigned int ep_index;
3085         unsigned int num_stream_ctxs;
3086         unsigned long flags;
3087         u32 changed_ep_bitmask = 0;
3088
3089         if (!eps)
3090                 return -EINVAL;
3091
3092         /* Add one to the number of streams requested to account for
3093          * stream 0 that is reserved for xHCI usage.
3094          */
3095         num_streams += 1;
3096         xhci = hcd_to_xhci(hcd);
3097         xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3098                         num_streams);
3099
3100         config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3101         if (!config_cmd) {
3102                 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3103                 return -ENOMEM;
3104         }
3105
3106         /* Check to make sure all endpoints are not already configured for
3107          * streams.  While we're at it, find the maximum number of streams that
3108          * all the endpoints will support and check for duplicate endpoints.
3109          */
3110         spin_lock_irqsave(&xhci->lock, flags);
3111         ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3112                         num_eps, &num_streams, &changed_ep_bitmask);
3113         if (ret < 0) {
3114                 xhci_free_command(xhci, config_cmd);
3115                 spin_unlock_irqrestore(&xhci->lock, flags);
3116                 return ret;
3117         }
3118         if (num_streams <= 1) {
3119                 xhci_warn(xhci, "WARN: endpoints can't handle "
3120                                 "more than one stream.\n");
3121                 xhci_free_command(xhci, config_cmd);
3122                 spin_unlock_irqrestore(&xhci->lock, flags);
3123                 return -EINVAL;
3124         }
3125         vdev = xhci->devs[udev->slot_id];
3126         /* Mark each endpoint as being in transition, so
3127          * xhci_urb_enqueue() will reject all URBs.
3128          */
3129         for (i = 0; i < num_eps; i++) {
3130                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3131                 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3132         }
3133         spin_unlock_irqrestore(&xhci->lock, flags);
3134
3135         /* Setup internal data structures and allocate HW data structures for
3136          * streams (but don't install the HW structures in the input context
3137          * until we're sure all memory allocation succeeded).
3138          */
3139         xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3140         xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3141                         num_stream_ctxs, num_streams);
3142
3143         for (i = 0; i < num_eps; i++) {
3144                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3145                 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3146                                 num_stream_ctxs,
3147                                 num_streams, mem_flags);
3148                 if (!vdev->eps[ep_index].stream_info)
3149                         goto cleanup;
3150                 /* Set maxPstreams in endpoint context and update deq ptr to
3151                  * point to stream context array. FIXME
3152                  */
3153         }
3154
3155         /* Set up the input context for a configure endpoint command. */
3156         for (i = 0; i < num_eps; i++) {
3157                 struct xhci_ep_ctx *ep_ctx;
3158
3159                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3160                 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3161
3162                 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3163                                 vdev->out_ctx, ep_index);
3164                 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3165                                 vdev->eps[ep_index].stream_info);
3166         }
3167         /* Tell the HW to drop its old copy of the endpoint context info
3168          * and add the updated copy from the input context.
3169          */
3170         xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3171                         vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3172
3173         /* Issue and wait for the configure endpoint command */
3174         ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3175                         false, false);
3176
3177         /* xHC rejected the configure endpoint command for some reason, so we
3178          * leave the old ring intact and free our internal streams data
3179          * structure.
3180          */
3181         if (ret < 0)
3182                 goto cleanup;
3183
3184         spin_lock_irqsave(&xhci->lock, flags);
3185         for (i = 0; i < num_eps; i++) {
3186                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3187                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3188                 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3189                          udev->slot_id, ep_index);
3190                 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3191         }
3192         xhci_free_command(xhci, config_cmd);
3193         spin_unlock_irqrestore(&xhci->lock, flags);
3194
3195         /* Subtract 1 for stream 0, which drivers can't use */
3196         return num_streams - 1;
3197
3198 cleanup:
3199         /* If it didn't work, free the streams! */
3200         for (i = 0; i < num_eps; i++) {
3201                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3202                 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3203                 vdev->eps[ep_index].stream_info = NULL;
3204                 /* FIXME Unset maxPstreams in endpoint context and
3205                  * update deq ptr to point to normal string ring.
3206                  */
3207                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3208                 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3209                 xhci_endpoint_zero(xhci, vdev, eps[i]);
3210         }
3211         xhci_free_command(xhci, config_cmd);
3212         return -ENOMEM;
3213 }
3214
3215 /* Transition the endpoint from using streams to being a "normal" endpoint
3216  * without streams.
3217  *
3218  * Modify the endpoint context state, submit a configure endpoint command,
3219  * and free all endpoint rings for streams if that completes successfully.
3220  */
3221 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3222                 struct usb_host_endpoint **eps, unsigned int num_eps,
3223                 gfp_t mem_flags)
3224 {
3225         int i, ret;
3226         struct xhci_hcd *xhci;
3227         struct xhci_virt_device *vdev;
3228         struct xhci_command *command;
3229         unsigned int ep_index;
3230         unsigned long flags;
3231         u32 changed_ep_bitmask;
3232
3233         xhci = hcd_to_xhci(hcd);
3234         vdev = xhci->devs[udev->slot_id];
3235
3236         /* Set up a configure endpoint command to remove the streams rings */
3237         spin_lock_irqsave(&xhci->lock, flags);
3238         changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3239                         udev, eps, num_eps);
3240         if (changed_ep_bitmask == 0) {
3241                 spin_unlock_irqrestore(&xhci->lock, flags);
3242                 return -EINVAL;
3243         }
3244
3245         /* Use the xhci_command structure from the first endpoint.  We may have
3246          * allocated too many, but the driver may call xhci_free_streams() for
3247          * each endpoint it grouped into one call to xhci_alloc_streams().
3248          */
3249         ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3250         command = vdev->eps[ep_index].stream_info->free_streams_command;
3251         for (i = 0; i < num_eps; i++) {
3252                 struct xhci_ep_ctx *ep_ctx;
3253
3254                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3255                 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3256                 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3257                         EP_GETTING_NO_STREAMS;
3258
3259                 xhci_endpoint_copy(xhci, command->in_ctx,
3260                                 vdev->out_ctx, ep_index);
3261                 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3262                                 &vdev->eps[ep_index]);
3263         }
3264         xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3265                         vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3266         spin_unlock_irqrestore(&xhci->lock, flags);
3267
3268         /* Issue and wait for the configure endpoint command,
3269          * which must succeed.
3270          */
3271         ret = xhci_configure_endpoint(xhci, udev, command,
3272                         false, true);
3273
3274         /* xHC rejected the configure endpoint command for some reason, so we
3275          * leave the streams rings intact.
3276          */
3277         if (ret < 0)
3278                 return ret;
3279
3280         spin_lock_irqsave(&xhci->lock, flags);
3281         for (i = 0; i < num_eps; i++) {
3282                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3283                 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3284                 vdev->eps[ep_index].stream_info = NULL;
3285                 /* FIXME Unset maxPstreams in endpoint context and
3286                  * update deq ptr to point to normal string ring.
3287                  */
3288                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3289                 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3290         }
3291         spin_unlock_irqrestore(&xhci->lock, flags);
3292
3293         return 0;
3294 }
3295
3296 /*
3297  * Deletes endpoint resources for endpoints that were active before a Reset
3298  * Device command, or a Disable Slot command.  The Reset Device command leaves
3299  * the control endpoint intact, whereas the Disable Slot command deletes it.
3300  *
3301  * Must be called with xhci->lock held.
3302  */
3303 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3304         struct xhci_virt_device *virt_dev, bool drop_control_ep)
3305 {
3306         int i;
3307         unsigned int num_dropped_eps = 0;
3308         unsigned int drop_flags = 0;
3309
3310         for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3311                 if (virt_dev->eps[i].ring) {
3312                         drop_flags |= 1 << i;
3313                         num_dropped_eps++;
3314                 }
3315         }
3316         xhci->num_active_eps -= num_dropped_eps;
3317         if (num_dropped_eps)
3318                 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3319                                 "%u now active.\n",
3320                                 num_dropped_eps, drop_flags,
3321                                 xhci->num_active_eps);
3322 }
3323
3324 /*
3325  * This submits a Reset Device Command, which will set the device state to 0,
3326  * set the device address to 0, and disable all the endpoints except the default
3327  * control endpoint.  The USB core should come back and call
3328  * xhci_address_device(), and then re-set up the configuration.  If this is
3329  * called because of a usb_reset_and_verify_device(), then the old alternate
3330  * settings will be re-installed through the normal bandwidth allocation
3331  * functions.
3332  *
3333  * Wait for the Reset Device command to finish.  Remove all structures
3334  * associated with the endpoints that were disabled.  Clear the input device
3335  * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
3336  *
3337  * If the virt_dev to be reset does not exist or does not match the udev,
3338  * it means the device is lost, possibly due to the xHC restore error and
3339  * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3340  * re-allocate the device.
3341  */
3342 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3343 {
3344         int ret, i;
3345         unsigned long flags;
3346         struct xhci_hcd *xhci;
3347         unsigned int slot_id;
3348         struct xhci_virt_device *virt_dev;
3349         struct xhci_command *reset_device_cmd;
3350         int timeleft;
3351         int last_freed_endpoint;
3352         struct xhci_slot_ctx *slot_ctx;
3353         int old_active_eps = 0;
3354
3355         ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3356         if (ret <= 0)
3357                 return ret;
3358         xhci = hcd_to_xhci(hcd);
3359         slot_id = udev->slot_id;
3360         virt_dev = xhci->devs[slot_id];
3361         if (!virt_dev) {
3362                 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3363                                 "not exist. Re-allocate the device\n", slot_id);
3364                 ret = xhci_alloc_dev(hcd, udev);
3365                 if (ret == 1)
3366                         return 0;
3367                 else
3368                         return -EINVAL;
3369         }
3370
3371         if (virt_dev->tt_info)
3372                 old_active_eps = virt_dev->tt_info->active_eps;
3373
3374         if (virt_dev->udev != udev) {
3375                 /* If the virt_dev and the udev does not match, this virt_dev
3376                  * may belong to another udev.
3377                  * Re-allocate the device.
3378                  */
3379                 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3380                                 "not match the udev. Re-allocate the device\n",
3381                                 slot_id);
3382                 ret = xhci_alloc_dev(hcd, udev);
3383                 if (ret == 1)
3384                         return 0;
3385                 else
3386                         return -EINVAL;
3387         }
3388
3389         /* If device is not setup, there is no point in resetting it */
3390         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3391         if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3392                                                 SLOT_STATE_DISABLED)
3393                 return 0;
3394
3395         xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3396         /* Allocate the command structure that holds the struct completion.
3397          * Assume we're in process context, since the normal device reset
3398          * process has to wait for the device anyway.  Storage devices are
3399          * reset as part of error handling, so use GFP_NOIO instead of
3400          * GFP_KERNEL.
3401          */
3402         reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3403         if (!reset_device_cmd) {
3404                 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3405                 return -ENOMEM;
3406         }
3407
3408         /* Attempt to submit the Reset Device command to the command ring */
3409         spin_lock_irqsave(&xhci->lock, flags);
3410         reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3411
3412         /* Enqueue pointer can be left pointing to the link TRB,
3413          * we must handle that
3414          */
3415         if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3416                 reset_device_cmd->command_trb =
3417                         xhci->cmd_ring->enq_seg->next->trbs;
3418
3419         list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3420         ret = xhci_queue_reset_device(xhci, slot_id);
3421         if (ret) {
3422                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3423                 list_del(&reset_device_cmd->cmd_list);
3424                 spin_unlock_irqrestore(&xhci->lock, flags);
3425                 goto command_cleanup;
3426         }
3427         xhci_ring_cmd_db(xhci);
3428         spin_unlock_irqrestore(&xhci->lock, flags);
3429
3430         /* Wait for the Reset Device command to finish */
3431         timeleft = wait_for_completion_interruptible_timeout(
3432                         reset_device_cmd->completion,
3433                         USB_CTRL_SET_TIMEOUT);
3434         if (timeleft <= 0) {
3435                 xhci_warn(xhci, "%s while waiting for reset device command\n",
3436                                 timeleft == 0 ? "Timeout" : "Signal");
3437                 spin_lock_irqsave(&xhci->lock, flags);
3438                 /* The timeout might have raced with the event ring handler, so
3439                  * only delete from the list if the item isn't poisoned.
3440                  */
3441                 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3442                         list_del(&reset_device_cmd->cmd_list);
3443                 spin_unlock_irqrestore(&xhci->lock, flags);
3444                 ret = -ETIME;
3445                 goto command_cleanup;
3446         }
3447
3448         /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3449          * unless we tried to reset a slot ID that wasn't enabled,
3450          * or the device wasn't in the addressed or configured state.
3451          */
3452         ret = reset_device_cmd->status;
3453         switch (ret) {
3454         case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3455         case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3456                 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3457                                 slot_id,
3458                                 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3459                 xhci_info(xhci, "Not freeing device rings.\n");
3460                 /* Don't treat this as an error.  May change my mind later. */
3461                 ret = 0;
3462                 goto command_cleanup;
3463         case COMP_SUCCESS:
3464                 xhci_dbg(xhci, "Successful reset device command.\n");
3465                 break;
3466         default:
3467                 if (xhci_is_vendor_info_code(xhci, ret))
3468                         break;
3469                 xhci_warn(xhci, "Unknown completion code %u for "
3470                                 "reset device command.\n", ret);
3471                 ret = -EINVAL;
3472                 goto command_cleanup;
3473         }
3474
3475         /* Free up host controller endpoint resources */
3476         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3477                 spin_lock_irqsave(&xhci->lock, flags);
3478                 /* Don't delete the default control endpoint resources */
3479                 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3480                 spin_unlock_irqrestore(&xhci->lock, flags);
3481         }
3482
3483         /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3484         last_freed_endpoint = 1;
3485         for (i = 1; i < 31; ++i) {
3486                 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3487
3488                 if (ep->ep_state & EP_HAS_STREAMS) {
3489                         xhci_free_stream_info(xhci, ep->stream_info);
3490                         ep->stream_info = NULL;
3491                         ep->ep_state &= ~EP_HAS_STREAMS;
3492                 }
3493
3494                 if (ep->ring) {
3495                         xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3496                         last_freed_endpoint = i;
3497                 }
3498                 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3499                         xhci_drop_ep_from_interval_table(xhci,
3500                                         &virt_dev->eps[i].bw_info,
3501                                         virt_dev->bw_table,
3502                                         udev,
3503                                         &virt_dev->eps[i],
3504                                         virt_dev->tt_info);
3505                 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3506         }
3507         /* If necessary, update the number of active TTs on this root port */
3508         xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3509
3510         xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3511         xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3512         ret = 0;
3513
3514 command_cleanup:
3515         xhci_free_command(xhci, reset_device_cmd);
3516         return ret;
3517 }
3518
3519 /*
3520  * At this point, the struct usb_device is about to go away, the device has
3521  * disconnected, and all traffic has been stopped and the endpoints have been
3522  * disabled.  Free any HC data structures associated with that device.
3523  */
3524 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3525 {
3526         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3527         struct xhci_virt_device *virt_dev;
3528         struct device *dev = hcd->self.controller;
3529         unsigned long flags;
3530         u32 state;
3531         int i, ret;
3532
3533 #ifndef CONFIG_USB_DEFAULT_PERSIST
3534         /*
3535          * We called pm_runtime_get_noresume when the device was attached.
3536          * Decrement the counter here to allow controller to runtime suspend
3537          * if no devices remain.
3538          */
3539         if (xhci->quirks & XHCI_RESET_ON_RESUME)
3540                 pm_runtime_put_noidle(dev);
3541 #endif
3542
3543