usb: host: xhci: Enable XHCI_SPURIOUS_SUCCESS for all controllers with xhci 1.0
[pandora-kernel.git] / drivers / usb / host / xhci.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
30
31 #include "xhci.h"
32
33 #define DRIVER_AUTHOR "Sarah Sharp"
34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35
36 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
37 static int link_quirk;
38 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
40
41 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 /*
43  * handshake - spin reading hc until handshake completes or fails
44  * @ptr: address of hc register to be read
45  * @mask: bits to look at in result of read
46  * @done: value of those bits when handshake succeeds
47  * @usec: timeout in microseconds
48  *
49  * Returns negative errno, or zero on success
50  *
51  * Success happens when the "mask" bits have the specified value (hardware
52  * handshake done).  There are two failure modes:  "usec" have passed (major
53  * hardware flakeout), or the register reads as all-ones (hardware removed).
54  */
55 int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
56                       u32 mask, u32 done, int usec)
57 {
58         u32     result;
59
60         do {
61                 result = xhci_readl(xhci, ptr);
62                 if (result == ~(u32)0)          /* card removed */
63                         return -ENODEV;
64                 result &= mask;
65                 if (result == done)
66                         return 0;
67                 udelay(1);
68                 usec--;
69         } while (usec > 0);
70         return -ETIMEDOUT;
71 }
72
73 /*
74  * Disable interrupts and begin the xHCI halting process.
75  */
76 void xhci_quiesce(struct xhci_hcd *xhci)
77 {
78         u32 halted;
79         u32 cmd;
80         u32 mask;
81
82         mask = ~(XHCI_IRQS);
83         halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
84         if (!halted)
85                 mask &= ~CMD_RUN;
86
87         cmd = xhci_readl(xhci, &xhci->op_regs->command);
88         cmd &= mask;
89         xhci_writel(xhci, cmd, &xhci->op_regs->command);
90 }
91
92 /*
93  * Force HC into halt state.
94  *
95  * Disable any IRQs and clear the run/stop bit.
96  * HC will complete any current and actively pipelined transactions, and
97  * should halt within 16 ms of the run/stop bit being cleared.
98  * Read HC Halted bit in the status register to see when the HC is finished.
99  */
100 int xhci_halt(struct xhci_hcd *xhci)
101 {
102         int ret;
103         xhci_dbg(xhci, "// Halt the HC\n");
104         xhci_quiesce(xhci);
105
106         ret = handshake(xhci, &xhci->op_regs->status,
107                         STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
108         if (!ret) {
109                 xhci->xhc_state |= XHCI_STATE_HALTED;
110                 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
111         } else
112                 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
113                                 XHCI_MAX_HALT_USEC);
114         return ret;
115 }
116
117 /*
118  * Set the run bit and wait for the host to be running.
119  */
120 static int xhci_start(struct xhci_hcd *xhci)
121 {
122         u32 temp;
123         int ret;
124
125         temp = xhci_readl(xhci, &xhci->op_regs->command);
126         temp |= (CMD_RUN);
127         xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
128                         temp);
129         xhci_writel(xhci, temp, &xhci->op_regs->command);
130
131         /*
132          * Wait for the HCHalted Status bit to be 0 to indicate the host is
133          * running.
134          */
135         ret = handshake(xhci, &xhci->op_regs->status,
136                         STS_HALT, 0, XHCI_MAX_HALT_USEC);
137         if (ret == -ETIMEDOUT)
138                 xhci_err(xhci, "Host took too long to start, "
139                                 "waited %u microseconds.\n",
140                                 XHCI_MAX_HALT_USEC);
141         if (!ret)
142                 xhci->xhc_state &= ~XHCI_STATE_HALTED;
143         return ret;
144 }
145
146 /*
147  * Reset a halted HC.
148  *
149  * This resets pipelines, timers, counters, state machines, etc.
150  * Transactions will be terminated immediately, and operational registers
151  * will be set to their defaults.
152  */
153 int xhci_reset(struct xhci_hcd *xhci)
154 {
155         u32 command;
156         u32 state;
157         int ret;
158
159         state = xhci_readl(xhci, &xhci->op_regs->status);
160         if ((state & STS_HALT) == 0) {
161                 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
162                 return 0;
163         }
164
165         xhci_dbg(xhci, "// Reset the HC\n");
166         command = xhci_readl(xhci, &xhci->op_regs->command);
167         command |= CMD_RESET;
168         xhci_writel(xhci, command, &xhci->op_regs->command);
169
170         ret = handshake(xhci, &xhci->op_regs->command,
171                         CMD_RESET, 0, 10 * 1000 * 1000);
172         if (ret)
173                 return ret;
174
175         xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
176         /*
177          * xHCI cannot write to any doorbells or operational registers other
178          * than status until the "Controller Not Ready" flag is cleared.
179          */
180         return handshake(xhci, &xhci->op_regs->status,
181                          STS_CNR, 0, 10 * 1000 * 1000);
182 }
183
184 #ifdef CONFIG_PCI
185 static int xhci_free_msi(struct xhci_hcd *xhci)
186 {
187         int i;
188
189         if (!xhci->msix_entries)
190                 return -EINVAL;
191
192         for (i = 0; i < xhci->msix_count; i++)
193                 if (xhci->msix_entries[i].vector)
194                         free_irq(xhci->msix_entries[i].vector,
195                                         xhci_to_hcd(xhci));
196         return 0;
197 }
198
199 /*
200  * Set up MSI
201  */
202 static int xhci_setup_msi(struct xhci_hcd *xhci)
203 {
204         int ret;
205         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
206
207         ret = pci_enable_msi(pdev);
208         if (ret) {
209                 xhci_dbg(xhci, "failed to allocate MSI entry\n");
210                 return ret;
211         }
212
213         ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
214                                 0, "xhci_hcd", xhci_to_hcd(xhci));
215         if (ret) {
216                 xhci_dbg(xhci, "disable MSI interrupt\n");
217                 pci_disable_msi(pdev);
218         }
219
220         return ret;
221 }
222
223 /*
224  * Free IRQs
225  * free all IRQs request
226  */
227 static void xhci_free_irq(struct xhci_hcd *xhci)
228 {
229         struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
230         int ret;
231
232         /* return if using legacy interrupt */
233         if (xhci_to_hcd(xhci)->irq >= 0)
234                 return;
235
236         ret = xhci_free_msi(xhci);
237         if (!ret)
238                 return;
239         if (pdev->irq >= 0)
240                 free_irq(pdev->irq, xhci_to_hcd(xhci));
241
242         return;
243 }
244
245 /*
246  * Set up MSI-X
247  */
248 static int xhci_setup_msix(struct xhci_hcd *xhci)
249 {
250         int i, ret = 0;
251         struct usb_hcd *hcd = xhci_to_hcd(xhci);
252         struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
253
254         /*
255          * calculate number of msi-x vectors supported.
256          * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
257          *   with max number of interrupters based on the xhci HCSPARAMS1.
258          * - num_online_cpus: maximum msi-x vectors per CPUs core.
259          *   Add additional 1 vector to ensure always available interrupt.
260          */
261         xhci->msix_count = min(num_online_cpus() + 1,
262                                 HCS_MAX_INTRS(xhci->hcs_params1));
263
264         xhci->msix_entries =
265                 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
266                                 GFP_KERNEL);
267         if (!xhci->msix_entries) {
268                 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
269                 return -ENOMEM;
270         }
271
272         for (i = 0; i < xhci->msix_count; i++) {
273                 xhci->msix_entries[i].entry = i;
274                 xhci->msix_entries[i].vector = 0;
275         }
276
277         ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
278         if (ret) {
279                 xhci_dbg(xhci, "Failed to enable MSI-X\n");
280                 goto free_entries;
281         }
282
283         for (i = 0; i < xhci->msix_count; i++) {
284                 ret = request_irq(xhci->msix_entries[i].vector,
285                                 (irq_handler_t)xhci_msi_irq,
286                                 0, "xhci_hcd", xhci_to_hcd(xhci));
287                 if (ret)
288                         goto disable_msix;
289         }
290
291         hcd->msix_enabled = 1;
292         return ret;
293
294 disable_msix:
295         xhci_dbg(xhci, "disable MSI-X interrupt\n");
296         xhci_free_irq(xhci);
297         pci_disable_msix(pdev);
298 free_entries:
299         kfree(xhci->msix_entries);
300         xhci->msix_entries = NULL;
301         return ret;
302 }
303
304 /* Free any IRQs and disable MSI-X */
305 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
306 {
307         struct usb_hcd *hcd = xhci_to_hcd(xhci);
308         struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
309
310         xhci_free_irq(xhci);
311
312         if (xhci->msix_entries) {
313                 pci_disable_msix(pdev);
314                 kfree(xhci->msix_entries);
315                 xhci->msix_entries = NULL;
316         } else {
317                 pci_disable_msi(pdev);
318         }
319
320         hcd->msix_enabled = 0;
321         return;
322 }
323
324 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
325 {
326         int i;
327
328         if (xhci->msix_entries) {
329                 for (i = 0; i < xhci->msix_count; i++)
330                         synchronize_irq(xhci->msix_entries[i].vector);
331         }
332 }
333
334 static int xhci_try_enable_msi(struct usb_hcd *hcd)
335 {
336         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
337         struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
338         int ret;
339
340         /*
341          * Some Fresco Logic host controllers advertise MSI, but fail to
342          * generate interrupts.  Don't even try to enable MSI.
343          */
344         if (xhci->quirks & XHCI_BROKEN_MSI)
345                 goto legacy_irq;
346
347         /* unregister the legacy interrupt */
348         if (hcd->irq)
349                 free_irq(hcd->irq, hcd);
350         hcd->irq = -1;
351
352         ret = xhci_setup_msix(xhci);
353         if (ret)
354                 /* fall back to msi*/
355                 ret = xhci_setup_msi(xhci);
356
357         if (!ret)
358                 /* hcd->irq is -1, we have MSI */
359                 return 0;
360
361         if (!pdev->irq) {
362                 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
363                 return -EINVAL;
364         }
365
366  legacy_irq:
367         /* fall back to legacy interrupt*/
368         ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
369                         hcd->irq_descr, hcd);
370         if (ret) {
371                 xhci_err(xhci, "request interrupt %d failed\n",
372                                 pdev->irq);
373                 return ret;
374         }
375         hcd->irq = pdev->irq;
376         return 0;
377 }
378
379 #else
380
381 static int xhci_try_enable_msi(struct usb_hcd *hcd)
382 {
383         return 0;
384 }
385
386 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
387 {
388 }
389
390 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
391 {
392 }
393
394 #endif
395
396 static void compliance_mode_recovery(unsigned long arg)
397 {
398         struct xhci_hcd *xhci;
399         struct usb_hcd *hcd;
400         u32 temp;
401         int i;
402
403         xhci = (struct xhci_hcd *)arg;
404
405         for (i = 0; i < xhci->num_usb3_ports; i++) {
406                 temp = xhci_readl(xhci, xhci->usb3_ports[i]);
407                 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
408                         /*
409                          * Compliance Mode Detected. Letting USB Core
410                          * handle the Warm Reset
411                          */
412                         xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
413                                         i + 1);
414                         xhci_dbg(xhci, "Attempting Recovery routine!\n");
415                         hcd = xhci->shared_hcd;
416
417                         if (hcd->state == HC_STATE_SUSPENDED)
418                                 usb_hcd_resume_root_hub(hcd);
419
420                         usb_hcd_poll_rh_status(hcd);
421                 }
422         }
423
424         if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
425                 mod_timer(&xhci->comp_mode_recovery_timer,
426                         jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
427 }
428
429 /*
430  * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
431  * that causes ports behind that hardware to enter compliance mode sometimes.
432  * The quirk creates a timer that polls every 2 seconds the link state of
433  * each host controller's port and recovers it by issuing a Warm reset
434  * if Compliance mode is detected, otherwise the port will become "dead" (no
435  * device connections or disconnections will be detected anymore). Becasue no
436  * status event is generated when entering compliance mode (per xhci spec),
437  * this quirk is needed on systems that have the failing hardware installed.
438  */
439 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
440 {
441         xhci->port_status_u0 = 0;
442         init_timer(&xhci->comp_mode_recovery_timer);
443
444         xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
445         xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
446         xhci->comp_mode_recovery_timer.expires = jiffies +
447                         msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
448
449         set_timer_slack(&xhci->comp_mode_recovery_timer,
450                         msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
451         add_timer(&xhci->comp_mode_recovery_timer);
452         xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
453 }
454
455 /*
456  * This function identifies the systems that have installed the SN65LVPE502CP
457  * USB3.0 re-driver and that need the Compliance Mode Quirk.
458  * Systems:
459  * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
460  */
461 static bool compliance_mode_recovery_timer_quirk_check(void)
462 {
463         const char *dmi_product_name, *dmi_sys_vendor;
464
465         dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
466         dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
467         if (!dmi_product_name || !dmi_sys_vendor)
468                 return false;
469
470         if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
471                 return false;
472
473         if (strstr(dmi_product_name, "Z420") ||
474                         strstr(dmi_product_name, "Z620") ||
475                         strstr(dmi_product_name, "Z820") ||
476                         strstr(dmi_product_name, "Z1 Workstation"))
477                 return true;
478
479         return false;
480 }
481
482 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
483 {
484         return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
485 }
486
487
488 /*
489  * Initialize memory for HCD and xHC (one-time init).
490  *
491  * Program the PAGESIZE register, initialize the device context array, create
492  * device contexts (?), set up a command ring segment (or two?), create event
493  * ring (one for now).
494  */
495 int xhci_init(struct usb_hcd *hcd)
496 {
497         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
498         int retval = 0;
499
500         xhci_dbg(xhci, "xhci_init\n");
501         spin_lock_init(&xhci->lock);
502         if (xhci->hci_version == 0x95 && link_quirk) {
503                 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
504                 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
505         } else {
506                 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
507         }
508         retval = xhci_mem_init(xhci, GFP_KERNEL);
509         xhci_dbg(xhci, "Finished xhci_init\n");
510
511         /* Initializing Compliance Mode Recovery Data If Needed */
512         if (compliance_mode_recovery_timer_quirk_check()) {
513                 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
514                 compliance_mode_recovery_timer_init(xhci);
515         }
516
517         return retval;
518 }
519
520 /*-------------------------------------------------------------------------*/
521
522
523 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
524 static void xhci_event_ring_work(unsigned long arg)
525 {
526         unsigned long flags;
527         int temp;
528         u64 temp_64;
529         struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
530         int i, j;
531
532         xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
533
534         spin_lock_irqsave(&xhci->lock, flags);
535         temp = xhci_readl(xhci, &xhci->op_regs->status);
536         xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
537         if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
538                         (xhci->xhc_state & XHCI_STATE_HALTED)) {
539                 xhci_dbg(xhci, "HW died, polling stopped.\n");
540                 spin_unlock_irqrestore(&xhci->lock, flags);
541                 return;
542         }
543
544         temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
545         xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
546         xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
547         xhci->error_bitmask = 0;
548         xhci_dbg(xhci, "Event ring:\n");
549         xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
550         xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
551         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
552         temp_64 &= ~ERST_PTR_MASK;
553         xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
554         xhci_dbg(xhci, "Command ring:\n");
555         xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
556         xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
557         xhci_dbg_cmd_ptrs(xhci);
558         for (i = 0; i < MAX_HC_SLOTS; ++i) {
559                 if (!xhci->devs[i])
560                         continue;
561                 for (j = 0; j < 31; ++j) {
562                         xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
563                 }
564         }
565         spin_unlock_irqrestore(&xhci->lock, flags);
566
567         if (!xhci->zombie)
568                 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
569         else
570                 xhci_dbg(xhci, "Quit polling the event ring.\n");
571 }
572 #endif
573
574 static int xhci_run_finished(struct xhci_hcd *xhci)
575 {
576         if (xhci_start(xhci)) {
577                 xhci_halt(xhci);
578                 return -ENODEV;
579         }
580         xhci->shared_hcd->state = HC_STATE_RUNNING;
581         xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
582
583         if (xhci->quirks & XHCI_NEC_HOST)
584                 xhci_ring_cmd_db(xhci);
585
586         xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
587         return 0;
588 }
589
590 /*
591  * Start the HC after it was halted.
592  *
593  * This function is called by the USB core when the HC driver is added.
594  * Its opposite is xhci_stop().
595  *
596  * xhci_init() must be called once before this function can be called.
597  * Reset the HC, enable device slot contexts, program DCBAAP, and
598  * set command ring pointer and event ring pointer.
599  *
600  * Setup MSI-X vectors and enable interrupts.
601  */
602 int xhci_run(struct usb_hcd *hcd)
603 {
604         u32 temp;
605         u64 temp_64;
606         int ret;
607         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
608
609         /* Start the xHCI host controller running only after the USB 2.0 roothub
610          * is setup.
611          */
612
613         hcd->uses_new_polling = 1;
614         if (!usb_hcd_is_primary_hcd(hcd))
615                 return xhci_run_finished(xhci);
616
617         xhci_dbg(xhci, "xhci_run\n");
618
619         ret = xhci_try_enable_msi(hcd);
620         if (ret)
621                 return ret;
622
623 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
624         init_timer(&xhci->event_ring_timer);
625         xhci->event_ring_timer.data = (unsigned long) xhci;
626         xhci->event_ring_timer.function = xhci_event_ring_work;
627         /* Poll the event ring */
628         xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
629         xhci->zombie = 0;
630         xhci_dbg(xhci, "Setting event ring polling timer\n");
631         add_timer(&xhci->event_ring_timer);
632 #endif
633
634         xhci_dbg(xhci, "Command ring memory map follows:\n");
635         xhci_debug_ring(xhci, xhci->cmd_ring);
636         xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
637         xhci_dbg_cmd_ptrs(xhci);
638
639         xhci_dbg(xhci, "ERST memory map follows:\n");
640         xhci_dbg_erst(xhci, &xhci->erst);
641         xhci_dbg(xhci, "Event ring:\n");
642         xhci_debug_ring(xhci, xhci->event_ring);
643         xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
644         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
645         temp_64 &= ~ERST_PTR_MASK;
646         xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
647
648         xhci_dbg(xhci, "// Set the interrupt modulation register\n");
649         temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
650         temp &= ~ER_IRQ_INTERVAL_MASK;
651         temp |= (u32) 160;
652         xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
653
654         /* Set the HCD state before we enable the irqs */
655         temp = xhci_readl(xhci, &xhci->op_regs->command);
656         temp |= (CMD_EIE);
657         xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
658                         temp);
659         xhci_writel(xhci, temp, &xhci->op_regs->command);
660
661         temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
662         xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
663                         xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
664         xhci_writel(xhci, ER_IRQ_ENABLE(temp),
665                         &xhci->ir_set->irq_pending);
666         xhci_print_ir_set(xhci, 0);
667
668         if (xhci->quirks & XHCI_NEC_HOST)
669                 xhci_queue_vendor_command(xhci, 0, 0, 0,
670                                 TRB_TYPE(TRB_NEC_GET_FW));
671
672         xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
673         return 0;
674 }
675
676 static void xhci_only_stop_hcd(struct usb_hcd *hcd)
677 {
678         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
679
680         spin_lock_irq(&xhci->lock);
681         xhci_halt(xhci);
682
683         /* The shared_hcd is going to be deallocated shortly (the USB core only
684          * calls this function when allocation fails in usb_add_hcd(), or
685          * usb_remove_hcd() is called).  So we need to unset xHCI's pointer.
686          */
687         xhci->shared_hcd = NULL;
688         spin_unlock_irq(&xhci->lock);
689 }
690
691 /*
692  * Stop xHCI driver.
693  *
694  * This function is called by the USB core when the HC driver is removed.
695  * Its opposite is xhci_run().
696  *
697  * Disable device contexts, disable IRQs, and quiesce the HC.
698  * Reset the HC, finish any completed transactions, and cleanup memory.
699  */
700 void xhci_stop(struct usb_hcd *hcd)
701 {
702         u32 temp;
703         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
704
705         if (!usb_hcd_is_primary_hcd(hcd)) {
706                 xhci_only_stop_hcd(xhci->shared_hcd);
707                 return;
708         }
709
710         spin_lock_irq(&xhci->lock);
711         /* Make sure the xHC is halted for a USB3 roothub
712          * (xhci_stop() could be called as part of failed init).
713          */
714         xhci_halt(xhci);
715         xhci_reset(xhci);
716         spin_unlock_irq(&xhci->lock);
717
718         xhci_cleanup_msix(xhci);
719
720 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
721         /* Tell the event ring poll function not to reschedule */
722         xhci->zombie = 1;
723         del_timer_sync(&xhci->event_ring_timer);
724 #endif
725
726         /* Deleting Compliance Mode Recovery Timer */
727         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
728                         (!(xhci_all_ports_seen_u0(xhci))))
729                 del_timer_sync(&xhci->comp_mode_recovery_timer);
730
731         if (xhci->quirks & XHCI_AMD_PLL_FIX)
732                 usb_amd_dev_put();
733
734         xhci_dbg(xhci, "// Disabling event ring interrupts\n");
735         temp = xhci_readl(xhci, &xhci->op_regs->status);
736         xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
737         temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
738         xhci_writel(xhci, ER_IRQ_DISABLE(temp),
739                         &xhci->ir_set->irq_pending);
740         xhci_print_ir_set(xhci, 0);
741
742         xhci_dbg(xhci, "cleaning up memory\n");
743         xhci_mem_cleanup(xhci);
744         xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
745                     xhci_readl(xhci, &xhci->op_regs->status));
746 }
747
748 /*
749  * Shutdown HC (not bus-specific)
750  *
751  * This is called when the machine is rebooting or halting.  We assume that the
752  * machine will be powered off, and the HC's internal state will be reset.
753  * Don't bother to free memory.
754  *
755  * This will only ever be called with the main usb_hcd (the USB3 roothub).
756  */
757 void xhci_shutdown(struct usb_hcd *hcd)
758 {
759         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
760
761         if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
762                 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
763
764         spin_lock_irq(&xhci->lock);
765         xhci_halt(xhci);
766         spin_unlock_irq(&xhci->lock);
767
768         xhci_cleanup_msix(xhci);
769
770         xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
771                     xhci_readl(xhci, &xhci->op_regs->status));
772 }
773
774 #ifdef CONFIG_PM
775 static void xhci_save_registers(struct xhci_hcd *xhci)
776 {
777         xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
778         xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
779         xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
780         xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
781         xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
782         xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
783         xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
784         xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
785         xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
786 }
787
788 static void xhci_restore_registers(struct xhci_hcd *xhci)
789 {
790         xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
791         xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
792         xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
793         xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
794         xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
795         xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
796         xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
797         xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
798         xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
799 }
800
801 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
802 {
803         u64     val_64;
804
805         /* step 2: initialize command ring buffer */
806         val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
807         val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
808                 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
809                                       xhci->cmd_ring->dequeue) &
810                  (u64) ~CMD_RING_RSVD_BITS) |
811                 xhci->cmd_ring->cycle_state;
812         xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
813                         (long unsigned long) val_64);
814         xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
815 }
816
817 /*
818  * The whole command ring must be cleared to zero when we suspend the host.
819  *
820  * The host doesn't save the command ring pointer in the suspend well, so we
821  * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
822  * aligned, because of the reserved bits in the command ring dequeue pointer
823  * register.  Therefore, we can't just set the dequeue pointer back in the
824  * middle of the ring (TRBs are 16-byte aligned).
825  */
826 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
827 {
828         struct xhci_ring *ring;
829         struct xhci_segment *seg;
830
831         ring = xhci->cmd_ring;
832         seg = ring->deq_seg;
833         do {
834                 memset(seg->trbs, 0,
835                         sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
836                 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
837                         cpu_to_le32(~TRB_CYCLE);
838                 seg = seg->next;
839         } while (seg != ring->deq_seg);
840
841         /* Reset the software enqueue and dequeue pointers */
842         ring->deq_seg = ring->first_seg;
843         ring->dequeue = ring->first_seg->trbs;
844         ring->enq_seg = ring->deq_seg;
845         ring->enqueue = ring->dequeue;
846
847         /*
848          * Ring is now zeroed, so the HW should look for change of ownership
849          * when the cycle bit is set to 1.
850          */
851         ring->cycle_state = 1;
852
853         /*
854          * Reset the hardware dequeue pointer.
855          * Yes, this will need to be re-written after resume, but we're paranoid
856          * and want to make sure the hardware doesn't access bogus memory
857          * because, say, the BIOS or an SMI started the host without changing
858          * the command ring pointers.
859          */
860         xhci_set_cmd_ring_deq(xhci);
861 }
862
863 /*
864  * Stop HC (not bus-specific)
865  *
866  * This is called when the machine transition into S3/S4 mode.
867  *
868  */
869 int xhci_suspend(struct xhci_hcd *xhci)
870 {
871         int                     rc = 0;
872         struct usb_hcd          *hcd = xhci_to_hcd(xhci);
873         u32                     command;
874
875         /* Don't poll the roothubs on bus suspend. */
876         xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
877         clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
878         del_timer_sync(&hcd->rh_timer);
879
880         spin_lock_irq(&xhci->lock);
881         clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
882         clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
883         /* step 1: stop endpoint */
884         /* skipped assuming that port suspend has done */
885
886         /* step 2: clear Run/Stop bit */
887         command = xhci_readl(xhci, &xhci->op_regs->command);
888         command &= ~CMD_RUN;
889         xhci_writel(xhci, command, &xhci->op_regs->command);
890         if (handshake(xhci, &xhci->op_regs->status,
891                       STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
892                 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
893                 spin_unlock_irq(&xhci->lock);
894                 return -ETIMEDOUT;
895         }
896         xhci_clear_command_ring(xhci);
897
898         /* step 3: save registers */
899         xhci_save_registers(xhci);
900
901         /* step 4: set CSS flag */
902         command = xhci_readl(xhci, &xhci->op_regs->command);
903         command |= CMD_CSS;
904         xhci_writel(xhci, command, &xhci->op_regs->command);
905         if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
906                 xhci_warn(xhci, "WARN: xHC save state timeout\n");
907                 spin_unlock_irq(&xhci->lock);
908                 return -ETIMEDOUT;
909         }
910         spin_unlock_irq(&xhci->lock);
911
912         /*
913          * Deleting Compliance Mode Recovery Timer because the xHCI Host
914          * is about to be suspended.
915          */
916         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
917                         (!(xhci_all_ports_seen_u0(xhci)))) {
918                 del_timer_sync(&xhci->comp_mode_recovery_timer);
919                 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
920         }
921
922         /* step 5: remove core well power */
923         /* synchronize irq when using MSI-X */
924         xhci_msix_sync_irqs(xhci);
925
926         return rc;
927 }
928
929 /*
930  * start xHC (not bus-specific)
931  *
932  * This is called when the machine transition from S3/S4 mode.
933  *
934  */
935 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
936 {
937         u32                     command, temp = 0;
938         struct usb_hcd          *hcd = xhci_to_hcd(xhci);
939         struct usb_hcd          *secondary_hcd;
940         int                     retval = 0;
941         bool                    comp_timer_running = false;
942
943         /* Wait a bit if either of the roothubs need to settle from the
944          * transition into bus suspend.
945          */
946         if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
947                         time_before(jiffies,
948                                 xhci->bus_state[1].next_statechange))
949                 msleep(100);
950
951         set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
952         set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
953
954         spin_lock_irq(&xhci->lock);
955         if (xhci->quirks & XHCI_RESET_ON_RESUME)
956                 hibernated = true;
957
958         if (!hibernated) {
959                 /* step 1: restore register */
960                 xhci_restore_registers(xhci);
961                 /* step 2: initialize command ring buffer */
962                 xhci_set_cmd_ring_deq(xhci);
963                 /* step 3: restore state and start state*/
964                 /* step 3: set CRS flag */
965                 command = xhci_readl(xhci, &xhci->op_regs->command);
966                 command |= CMD_CRS;
967                 xhci_writel(xhci, command, &xhci->op_regs->command);
968                 if (handshake(xhci, &xhci->op_regs->status,
969                               STS_RESTORE, 0, 10 * 1000)) {
970                         xhci_warn(xhci, "WARN: xHC restore state timeout\n");
971                         spin_unlock_irq(&xhci->lock);
972                         return -ETIMEDOUT;
973                 }
974                 temp = xhci_readl(xhci, &xhci->op_regs->status);
975         }
976
977         /* If restore operation fails, re-initialize the HC during resume */
978         if ((temp & STS_SRE) || hibernated) {
979
980                 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
981                                 !(xhci_all_ports_seen_u0(xhci))) {
982                         del_timer_sync(&xhci->comp_mode_recovery_timer);
983                         xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
984                 }
985
986                 /* Let the USB core know _both_ roothubs lost power. */
987                 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
988                 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
989
990                 xhci_dbg(xhci, "Stop HCD\n");
991                 xhci_halt(xhci);
992                 xhci_reset(xhci);
993                 spin_unlock_irq(&xhci->lock);
994                 xhci_cleanup_msix(xhci);
995
996 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
997                 /* Tell the event ring poll function not to reschedule */
998                 xhci->zombie = 1;
999                 del_timer_sync(&xhci->event_ring_timer);
1000 #endif
1001
1002                 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1003                 temp = xhci_readl(xhci, &xhci->op_regs->status);
1004                 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
1005                 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
1006                 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
1007                                 &xhci->ir_set->irq_pending);
1008                 xhci_print_ir_set(xhci, 0);
1009
1010                 xhci_dbg(xhci, "cleaning up memory\n");
1011                 xhci_mem_cleanup(xhci);
1012                 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1013                             xhci_readl(xhci, &xhci->op_regs->status));
1014
1015                 /* USB core calls the PCI reinit and start functions twice:
1016                  * first with the primary HCD, and then with the secondary HCD.
1017                  * If we don't do the same, the host will never be started.
1018                  */
1019                 if (!usb_hcd_is_primary_hcd(hcd))
1020                         secondary_hcd = hcd;
1021                 else
1022                         secondary_hcd = xhci->shared_hcd;
1023
1024                 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1025                 retval = xhci_init(hcd->primary_hcd);
1026                 if (retval)
1027                         return retval;
1028                 comp_timer_running = true;
1029
1030                 xhci_dbg(xhci, "Start the primary HCD\n");
1031                 retval = xhci_run(hcd->primary_hcd);
1032                 if (!retval) {
1033                         xhci_dbg(xhci, "Start the secondary HCD\n");
1034                         retval = xhci_run(secondary_hcd);
1035                 }
1036                 hcd->state = HC_STATE_SUSPENDED;
1037                 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1038                 goto done;
1039         }
1040
1041         /* step 4: set Run/Stop bit */
1042         command = xhci_readl(xhci, &xhci->op_regs->command);
1043         command |= CMD_RUN;
1044         xhci_writel(xhci, command, &xhci->op_regs->command);
1045         handshake(xhci, &xhci->op_regs->status, STS_HALT,
1046                   0, 250 * 1000);
1047
1048         /* step 5: walk topology and initialize portsc,
1049          * portpmsc and portli
1050          */
1051         /* this is done in bus_resume */
1052
1053         /* step 6: restart each of the previously
1054          * Running endpoints by ringing their doorbells
1055          */
1056
1057         spin_unlock_irq(&xhci->lock);
1058
1059  done:
1060         if (retval == 0) {
1061                 usb_hcd_resume_root_hub(hcd);
1062                 usb_hcd_resume_root_hub(xhci->shared_hcd);
1063         }
1064
1065         /*
1066          * If system is subject to the Quirk, Compliance Mode Timer needs to
1067          * be re-initialized Always after a system resume. Ports are subject
1068          * to suffer the Compliance Mode issue again. It doesn't matter if
1069          * ports have entered previously to U0 before system's suspension.
1070          */
1071         if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1072                 compliance_mode_recovery_timer_init(xhci);
1073
1074         /* Re-enable port polling. */
1075         xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1076         set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1077         usb_hcd_poll_rh_status(hcd);
1078
1079         return retval;
1080 }
1081 #endif  /* CONFIG_PM */
1082
1083 /*-------------------------------------------------------------------------*/
1084
1085 /**
1086  * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1087  * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
1088  * value to right shift 1 for the bitmask.
1089  *
1090  * Index  = (epnum * 2) + direction - 1,
1091  * where direction = 0 for OUT, 1 for IN.
1092  * For control endpoints, the IN index is used (OUT index is unused), so
1093  * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1094  */
1095 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1096 {
1097         unsigned int index;
1098         if (usb_endpoint_xfer_control(desc))
1099                 index = (unsigned int) (usb_endpoint_num(desc)*2);
1100         else
1101                 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1102                         (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1103         return index;
1104 }
1105
1106 /* Find the flag for this endpoint (for use in the control context).  Use the
1107  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1108  * bit 1, etc.
1109  */
1110 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1111 {
1112         return 1 << (xhci_get_endpoint_index(desc) + 1);
1113 }
1114
1115 /* Find the flag for this endpoint (for use in the control context).  Use the
1116  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1117  * bit 1, etc.
1118  */
1119 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1120 {
1121         return 1 << (ep_index + 1);
1122 }
1123
1124 /* Compute the last valid endpoint context index.  Basically, this is the
1125  * endpoint index plus one.  For slot contexts with more than valid endpoint,
1126  * we find the most significant bit set in the added contexts flags.
1127  * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1128  * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1129  */
1130 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1131 {
1132         return fls(added_ctxs) - 1;
1133 }
1134
1135 /* Returns 1 if the arguments are OK;
1136  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1137  */
1138 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1139                 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1140                 const char *func) {
1141         struct xhci_hcd *xhci;
1142         struct xhci_virt_device *virt_dev;
1143
1144         if (!hcd || (check_ep && !ep) || !udev) {
1145                 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1146                                 func);
1147                 return -EINVAL;
1148         }
1149         if (!udev->parent) {
1150                 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1151                                 func);
1152                 return 0;
1153         }
1154
1155         xhci = hcd_to_xhci(hcd);
1156         if (check_virt_dev) {
1157                 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1158                         printk(KERN_DEBUG "xHCI %s called with unaddressed "
1159                                                 "device\n", func);
1160                         return -EINVAL;
1161                 }
1162
1163                 virt_dev = xhci->devs[udev->slot_id];
1164                 if (virt_dev->udev != udev) {
1165                         printk(KERN_DEBUG "xHCI %s called with udev and "
1166                                           "virt_dev does not match\n", func);
1167                         return -EINVAL;
1168                 }
1169         }
1170
1171         if (xhci->xhc_state & XHCI_STATE_HALTED)
1172                 return -ENODEV;
1173
1174         return 1;
1175 }
1176
1177 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1178                 struct usb_device *udev, struct xhci_command *command,
1179                 bool ctx_change, bool must_succeed);
1180
1181 /*
1182  * Full speed devices may have a max packet size greater than 8 bytes, but the
1183  * USB core doesn't know that until it reads the first 8 bytes of the
1184  * descriptor.  If the usb_device's max packet size changes after that point,
1185  * we need to issue an evaluate context command and wait on it.
1186  */
1187 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1188                 unsigned int ep_index, struct urb *urb)
1189 {
1190         struct xhci_container_ctx *in_ctx;
1191         struct xhci_container_ctx *out_ctx;
1192         struct xhci_input_control_ctx *ctrl_ctx;
1193         struct xhci_ep_ctx *ep_ctx;
1194         int max_packet_size;
1195         int hw_max_packet_size;
1196         int ret = 0;
1197
1198         out_ctx = xhci->devs[slot_id]->out_ctx;
1199         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1200         hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1201         max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1202         if (hw_max_packet_size != max_packet_size) {
1203                 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1204                 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1205                                 max_packet_size);
1206                 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1207                                 hw_max_packet_size);
1208                 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1209
1210                 /* Set up the modified control endpoint 0 */
1211                 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1212                                 xhci->devs[slot_id]->out_ctx, ep_index);
1213                 in_ctx = xhci->devs[slot_id]->in_ctx;
1214                 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1215                 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1216                 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1217
1218                 /* Set up the input context flags for the command */
1219                 /* FIXME: This won't work if a non-default control endpoint
1220                  * changes max packet sizes.
1221                  */
1222                 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1223                 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1224                 ctrl_ctx->drop_flags = 0;
1225
1226                 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1227                 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1228                 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1229                 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1230
1231                 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1232                                 true, false);
1233
1234                 /* Clean up the input context for later use by bandwidth
1235                  * functions.
1236                  */
1237                 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1238         }
1239         return ret;
1240 }
1241
1242 /*
1243  * non-error returns are a promise to giveback() the urb later
1244  * we drop ownership so next owner (or urb unlink) can get it
1245  */
1246 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1247 {
1248         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1249         struct xhci_td *buffer;
1250         unsigned long flags;
1251         int ret = 0;
1252         unsigned int slot_id, ep_index;
1253         struct urb_priv *urb_priv;
1254         int size, i;
1255
1256         if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1257                                         true, true, __func__) <= 0)
1258                 return -EINVAL;
1259
1260         slot_id = urb->dev->slot_id;
1261         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1262
1263         if (!HCD_HW_ACCESSIBLE(hcd)) {
1264                 if (!in_interrupt())
1265                         xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1266                 ret = -ESHUTDOWN;
1267                 goto exit;
1268         }
1269
1270         if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1271                 size = urb->number_of_packets;
1272         else
1273                 size = 1;
1274
1275         urb_priv = kzalloc(sizeof(struct urb_priv) +
1276                                   size * sizeof(struct xhci_td *), mem_flags);
1277         if (!urb_priv)
1278                 return -ENOMEM;
1279
1280         buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1281         if (!buffer) {
1282                 kfree(urb_priv);
1283                 return -ENOMEM;
1284         }
1285
1286         for (i = 0; i < size; i++) {
1287                 urb_priv->td[i] = buffer;
1288                 buffer++;
1289         }
1290
1291         urb_priv->length = size;
1292         urb_priv->td_cnt = 0;
1293         urb->hcpriv = urb_priv;
1294
1295         if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1296                 /* Check to see if the max packet size for the default control
1297                  * endpoint changed during FS device enumeration
1298                  */
1299                 if (urb->dev->speed == USB_SPEED_FULL) {
1300                         ret = xhci_check_maxpacket(xhci, slot_id,
1301                                         ep_index, urb);
1302                         if (ret < 0) {
1303                                 xhci_urb_free_priv(xhci, urb_priv);
1304                                 urb->hcpriv = NULL;
1305                                 return ret;
1306                         }
1307                 }
1308
1309                 /* We have a spinlock and interrupts disabled, so we must pass
1310                  * atomic context to this function, which may allocate memory.
1311                  */
1312                 spin_lock_irqsave(&xhci->lock, flags);
1313                 if (xhci->xhc_state & XHCI_STATE_DYING)
1314                         goto dying;
1315                 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1316                                 slot_id, ep_index);
1317                 if (ret)
1318                         goto free_priv;
1319                 spin_unlock_irqrestore(&xhci->lock, flags);
1320         } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1321                 spin_lock_irqsave(&xhci->lock, flags);
1322                 if (xhci->xhc_state & XHCI_STATE_DYING)
1323                         goto dying;
1324                 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1325                                 EP_GETTING_STREAMS) {
1326                         xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1327                                         "is transitioning to using streams.\n");
1328                         ret = -EINVAL;
1329                 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1330                                 EP_GETTING_NO_STREAMS) {
1331                         xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1332                                         "is transitioning to "
1333                                         "not having streams.\n");
1334                         ret = -EINVAL;
1335                 } else {
1336                         ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1337                                         slot_id, ep_index);
1338                 }
1339                 if (ret)
1340                         goto free_priv;
1341                 spin_unlock_irqrestore(&xhci->lock, flags);
1342         } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1343                 spin_lock_irqsave(&xhci->lock, flags);
1344                 if (xhci->xhc_state & XHCI_STATE_DYING)
1345                         goto dying;
1346                 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1347                                 slot_id, ep_index);
1348                 if (ret)
1349                         goto free_priv;
1350                 spin_unlock_irqrestore(&xhci->lock, flags);
1351         } else {
1352                 spin_lock_irqsave(&xhci->lock, flags);
1353                 if (xhci->xhc_state & XHCI_STATE_DYING)
1354                         goto dying;
1355                 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1356                                 slot_id, ep_index);
1357                 if (ret)
1358                         goto free_priv;
1359                 spin_unlock_irqrestore(&xhci->lock, flags);
1360         }
1361 exit:
1362         return ret;
1363 dying:
1364         xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1365                         "non-responsive xHCI host.\n",
1366                         urb->ep->desc.bEndpointAddress, urb);
1367         ret = -ESHUTDOWN;
1368 free_priv:
1369         xhci_urb_free_priv(xhci, urb_priv);
1370         urb->hcpriv = NULL;
1371         spin_unlock_irqrestore(&xhci->lock, flags);
1372         return ret;
1373 }
1374
1375 /* Get the right ring for the given URB.
1376  * If the endpoint supports streams, boundary check the URB's stream ID.
1377  * If the endpoint doesn't support streams, return the singular endpoint ring.
1378  */
1379 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1380                 struct urb *urb)
1381 {
1382         unsigned int slot_id;
1383         unsigned int ep_index;
1384         unsigned int stream_id;
1385         struct xhci_virt_ep *ep;
1386
1387         slot_id = urb->dev->slot_id;
1388         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1389         stream_id = urb->stream_id;
1390         ep = &xhci->devs[slot_id]->eps[ep_index];
1391         /* Common case: no streams */
1392         if (!(ep->ep_state & EP_HAS_STREAMS))
1393                 return ep->ring;
1394
1395         if (stream_id == 0) {
1396                 xhci_warn(xhci,
1397                                 "WARN: Slot ID %u, ep index %u has streams, "
1398                                 "but URB has no stream ID.\n",
1399                                 slot_id, ep_index);
1400                 return NULL;
1401         }
1402
1403         if (stream_id < ep->stream_info->num_streams)
1404                 return ep->stream_info->stream_rings[stream_id];
1405
1406         xhci_warn(xhci,
1407                         "WARN: Slot ID %u, ep index %u has "
1408                         "stream IDs 1 to %u allocated, "
1409                         "but stream ID %u is requested.\n",
1410                         slot_id, ep_index,
1411                         ep->stream_info->num_streams - 1,
1412                         stream_id);
1413         return NULL;
1414 }
1415
1416 /*
1417  * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1418  * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1419  * should pick up where it left off in the TD, unless a Set Transfer Ring
1420  * Dequeue Pointer is issued.
1421  *
1422  * The TRBs that make up the buffers for the canceled URB will be "removed" from
1423  * the ring.  Since the ring is a contiguous structure, they can't be physically
1424  * removed.  Instead, there are two options:
1425  *
1426  *  1) If the HC is in the middle of processing the URB to be canceled, we
1427  *     simply move the ring's dequeue pointer past those TRBs using the Set
1428  *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1429  *     when drivers timeout on the last submitted URB and attempt to cancel.
1430  *
1431  *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1432  *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1433  *     HC will need to invalidate the any TRBs it has cached after the stop
1434  *     endpoint command, as noted in the xHCI 0.95 errata.
1435  *
1436  *  3) The TD may have completed by the time the Stop Endpoint Command
1437  *     completes, so software needs to handle that case too.
1438  *
1439  * This function should protect against the TD enqueueing code ringing the
1440  * doorbell while this code is waiting for a Stop Endpoint command to complete.
1441  * It also needs to account for multiple cancellations on happening at the same
1442  * time for the same endpoint.
1443  *
1444  * Note that this function can be called in any context, or so says
1445  * usb_hcd_unlink_urb()
1446  */
1447 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1448 {
1449         unsigned long flags;
1450         int ret, i;
1451         u32 temp;
1452         struct xhci_hcd *xhci;
1453         struct urb_priv *urb_priv;
1454         struct xhci_td *td;
1455         unsigned int ep_index;
1456         struct xhci_ring *ep_ring;
1457         struct xhci_virt_ep *ep;
1458
1459         xhci = hcd_to_xhci(hcd);
1460         spin_lock_irqsave(&xhci->lock, flags);
1461         /* Make sure the URB hasn't completed or been unlinked already */
1462         ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1463         if (ret || !urb->hcpriv)
1464                 goto done;
1465         temp = xhci_readl(xhci, &xhci->op_regs->status);
1466         if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1467                 xhci_dbg(xhci, "HW died, freeing TD.\n");
1468                 urb_priv = urb->hcpriv;
1469                 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1470                         td = urb_priv->td[i];
1471                         if (!list_empty(&td->td_list))
1472                                 list_del_init(&td->td_list);
1473                         if (!list_empty(&td->cancelled_td_list))
1474                                 list_del_init(&td->cancelled_td_list);
1475                 }
1476
1477                 usb_hcd_unlink_urb_from_ep(hcd, urb);
1478                 spin_unlock_irqrestore(&xhci->lock, flags);
1479                 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1480                 xhci_urb_free_priv(xhci, urb_priv);
1481                 return ret;
1482         }
1483         if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1484                         (xhci->xhc_state & XHCI_STATE_HALTED)) {
1485                 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1486                                 "non-responsive xHCI host.\n",
1487                                 urb->ep->desc.bEndpointAddress, urb);
1488                 /* Let the stop endpoint command watchdog timer (which set this
1489                  * state) finish cleaning up the endpoint TD lists.  We must
1490                  * have caught it in the middle of dropping a lock and giving
1491                  * back an URB.
1492                  */
1493                 goto done;
1494         }
1495
1496         xhci_dbg(xhci, "Cancel URB %p\n", urb);
1497         xhci_dbg(xhci, "Event ring:\n");
1498         xhci_debug_ring(xhci, xhci->event_ring);
1499         ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1500         ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1501         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1502         if (!ep_ring) {
1503                 ret = -EINVAL;
1504                 goto done;
1505         }
1506
1507         xhci_dbg(xhci, "Endpoint ring:\n");
1508         xhci_debug_ring(xhci, ep_ring);
1509
1510         urb_priv = urb->hcpriv;
1511
1512         for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1513                 td = urb_priv->td[i];
1514                 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1515         }
1516
1517         /* Queue a stop endpoint command, but only if this is
1518          * the first cancellation to be handled.
1519          */
1520         if (!(ep->ep_state & EP_HALT_PENDING)) {
1521                 ep->ep_state |= EP_HALT_PENDING;
1522                 ep->stop_cmds_pending++;
1523                 ep->stop_cmd_timer.expires = jiffies +
1524                         XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1525                 add_timer(&ep->stop_cmd_timer);
1526                 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1527                 xhci_ring_cmd_db(xhci);
1528         }
1529 done:
1530         spin_unlock_irqrestore(&xhci->lock, flags);
1531         return ret;
1532 }
1533
1534 /* Drop an endpoint from a new bandwidth configuration for this device.
1535  * Only one call to this function is allowed per endpoint before
1536  * check_bandwidth() or reset_bandwidth() must be called.
1537  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1538  * add the endpoint to the schedule with possibly new parameters denoted by a
1539  * different endpoint descriptor in usb_host_endpoint.
1540  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1541  * not allowed.
1542  *
1543  * The USB core will not allow URBs to be queued to an endpoint that is being
1544  * disabled, so there's no need for mutual exclusion to protect
1545  * the xhci->devs[slot_id] structure.
1546  */
1547 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1548                 struct usb_host_endpoint *ep)
1549 {
1550         struct xhci_hcd *xhci;
1551         struct xhci_container_ctx *in_ctx, *out_ctx;
1552         struct xhci_input_control_ctx *ctrl_ctx;
1553         struct xhci_slot_ctx *slot_ctx;
1554         unsigned int last_ctx;
1555         unsigned int ep_index;
1556         struct xhci_ep_ctx *ep_ctx;
1557         u32 drop_flag;
1558         u32 new_add_flags, new_drop_flags, new_slot_info;
1559         int ret;
1560
1561         ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1562         if (ret <= 0)
1563                 return ret;
1564         xhci = hcd_to_xhci(hcd);
1565         if (xhci->xhc_state & XHCI_STATE_DYING)
1566                 return -ENODEV;
1567
1568         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1569         drop_flag = xhci_get_endpoint_flag(&ep->desc);
1570         if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1571                 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1572                                 __func__, drop_flag);
1573                 return 0;
1574         }
1575
1576         in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1577         out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1578         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1579         ep_index = xhci_get_endpoint_index(&ep->desc);
1580         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1581         /* If the HC already knows the endpoint is disabled,
1582          * or the HCD has noted it is disabled, ignore this request
1583          */
1584         if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1585              cpu_to_le32(EP_STATE_DISABLED)) ||
1586             le32_to_cpu(ctrl_ctx->drop_flags) &
1587             xhci_get_endpoint_flag(&ep->desc)) {
1588                 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1589                                 __func__, ep);
1590                 return 0;
1591         }
1592
1593         ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1594         new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1595
1596         ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1597         new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1598
1599         last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1600         slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1601         /* Update the last valid endpoint context, if we deleted the last one */
1602         if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1603             LAST_CTX(last_ctx)) {
1604                 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1605                 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1606         }
1607         new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1608
1609         xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1610
1611         xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1612                         (unsigned int) ep->desc.bEndpointAddress,
1613                         udev->slot_id,
1614                         (unsigned int) new_drop_flags,
1615                         (unsigned int) new_add_flags,
1616                         (unsigned int) new_slot_info);
1617         return 0;
1618 }
1619
1620 /* Add an endpoint to a new possible bandwidth configuration for this device.
1621  * Only one call to this function is allowed per endpoint before
1622  * check_bandwidth() or reset_bandwidth() must be called.
1623  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1624  * add the endpoint to the schedule with possibly new parameters denoted by a
1625  * different endpoint descriptor in usb_host_endpoint.
1626  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1627  * not allowed.
1628  *
1629  * The USB core will not allow URBs to be queued to an endpoint until the
1630  * configuration or alt setting is installed in the device, so there's no need
1631  * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1632  */
1633 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1634                 struct usb_host_endpoint *ep)
1635 {
1636         struct xhci_hcd *xhci;
1637         struct xhci_container_ctx *in_ctx, *out_ctx;
1638         unsigned int ep_index;
1639         struct xhci_ep_ctx *ep_ctx;
1640         struct xhci_slot_ctx *slot_ctx;
1641         struct xhci_input_control_ctx *ctrl_ctx;
1642         u32 added_ctxs;
1643         unsigned int last_ctx;
1644         u32 new_add_flags, new_drop_flags, new_slot_info;
1645         struct xhci_virt_device *virt_dev;
1646         int ret = 0;
1647
1648         ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1649         if (ret <= 0) {
1650                 /* So we won't queue a reset ep command for a root hub */
1651                 ep->hcpriv = NULL;
1652                 return ret;
1653         }
1654         xhci = hcd_to_xhci(hcd);
1655         if (xhci->xhc_state & XHCI_STATE_DYING)
1656                 return -ENODEV;
1657
1658         added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1659         last_ctx = xhci_last_valid_endpoint(added_ctxs);
1660         if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1661                 /* FIXME when we have to issue an evaluate endpoint command to
1662                  * deal with ep0 max packet size changing once we get the
1663                  * descriptors
1664                  */
1665                 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1666                                 __func__, added_ctxs);
1667                 return 0;
1668         }
1669
1670         virt_dev = xhci->devs[udev->slot_id];
1671         in_ctx = virt_dev->in_ctx;
1672         out_ctx = virt_dev->out_ctx;
1673         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1674         ep_index = xhci_get_endpoint_index(&ep->desc);
1675         ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1676
1677         /* If this endpoint is already in use, and the upper layers are trying
1678          * to add it again without dropping it, reject the addition.
1679          */
1680         if (virt_dev->eps[ep_index].ring &&
1681                         !(le32_to_cpu(ctrl_ctx->drop_flags) &
1682                                 xhci_get_endpoint_flag(&ep->desc))) {
1683                 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1684                                 "without dropping it.\n",
1685                                 (unsigned int) ep->desc.bEndpointAddress);
1686                 return -EINVAL;
1687         }
1688
1689         /* If the HCD has already noted the endpoint is enabled,
1690          * ignore this request.
1691          */
1692         if (le32_to_cpu(ctrl_ctx->add_flags) &
1693             xhci_get_endpoint_flag(&ep->desc)) {
1694                 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1695                                 __func__, ep);
1696                 return 0;
1697         }
1698
1699         /*
1700          * Configuration and alternate setting changes must be done in
1701          * process context, not interrupt context (or so documenation
1702          * for usb_set_interface() and usb_set_configuration() claim).
1703          */
1704         if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1705                 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1706                                 __func__, ep->desc.bEndpointAddress);
1707                 return -ENOMEM;
1708         }
1709
1710         ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1711         new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1712
1713         /* If xhci_endpoint_disable() was called for this endpoint, but the
1714          * xHC hasn't been notified yet through the check_bandwidth() call,
1715          * this re-adds a new state for the endpoint from the new endpoint
1716          * descriptors.  We must drop and re-add this endpoint, so we leave the
1717          * drop flags alone.
1718          */
1719         new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1720
1721         slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1722         /* Update the last valid endpoint context, if we just added one past */
1723         if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1724             LAST_CTX(last_ctx)) {
1725                 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1726                 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1727         }
1728         new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1729
1730         /* Store the usb_device pointer for later use */
1731         ep->hcpriv = udev;
1732
1733         xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1734                         (unsigned int) ep->desc.bEndpointAddress,
1735                         udev->slot_id,
1736                         (unsigned int) new_drop_flags,
1737                         (unsigned int) new_add_flags,
1738                         (unsigned int) new_slot_info);
1739         return 0;
1740 }
1741
1742 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1743 {
1744         struct xhci_input_control_ctx *ctrl_ctx;
1745         struct xhci_ep_ctx *ep_ctx;
1746         struct xhci_slot_ctx *slot_ctx;
1747         int i;
1748
1749         /* When a device's add flag and drop flag are zero, any subsequent
1750          * configure endpoint command will leave that endpoint's state
1751          * untouched.  Make sure we don't leave any old state in the input
1752          * endpoint contexts.
1753          */
1754         ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1755         ctrl_ctx->drop_flags = 0;
1756         ctrl_ctx->add_flags = 0;
1757         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1758         slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1759         /* Endpoint 0 is always valid */
1760         slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1761         for (i = 1; i < 31; ++i) {
1762                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1763                 ep_ctx->ep_info = 0;
1764                 ep_ctx->ep_info2 = 0;
1765                 ep_ctx->deq = 0;
1766                 ep_ctx->tx_info = 0;
1767         }
1768 }
1769
1770 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1771                 struct usb_device *udev, u32 *cmd_status)
1772 {
1773         int ret;
1774
1775         switch (*cmd_status) {
1776         case COMP_ENOMEM:
1777                 dev_warn(&udev->dev, "Not enough host controller resources "
1778                                 "for new device state.\n");
1779                 ret = -ENOMEM;
1780                 /* FIXME: can we allocate more resources for the HC? */
1781                 break;
1782         case COMP_BW_ERR:
1783         case COMP_2ND_BW_ERR:
1784                 dev_warn(&udev->dev, "Not enough bandwidth "
1785                                 "for new device state.\n");
1786                 ret = -ENOSPC;
1787                 /* FIXME: can we go back to the old state? */
1788                 break;
1789         case COMP_TRB_ERR:
1790                 /* the HCD set up something wrong */
1791                 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1792                                 "add flag = 1, "
1793                                 "and endpoint is not disabled.\n");
1794                 ret = -EINVAL;
1795                 break;
1796         case COMP_DEV_ERR:
1797                 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1798                                 "configure command.\n");
1799                 ret = -ENODEV;
1800                 break;
1801         case COMP_SUCCESS:
1802                 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1803                 ret = 0;
1804                 break;
1805         default:
1806                 xhci_err(xhci, "ERROR: unexpected command completion "
1807                                 "code 0x%x.\n", *cmd_status);
1808                 ret = -EINVAL;
1809                 break;
1810         }
1811         return ret;
1812 }
1813
1814 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1815                 struct usb_device *udev, u32 *cmd_status)
1816 {
1817         int ret;
1818         struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1819
1820         switch (*cmd_status) {
1821         case COMP_EINVAL:
1822                 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1823                                 "context command.\n");
1824                 ret = -EINVAL;
1825                 break;
1826         case COMP_EBADSLT:
1827                 dev_warn(&udev->dev, "WARN: slot not enabled for"
1828                                 "evaluate context command.\n");
1829         case COMP_CTX_STATE:
1830                 dev_warn(&udev->dev, "WARN: invalid context state for "
1831                                 "evaluate context command.\n");
1832                 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1833                 ret = -EINVAL;
1834                 break;
1835         case COMP_DEV_ERR:
1836                 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1837                                 "context command.\n");
1838                 ret = -ENODEV;
1839                 break;
1840         case COMP_MEL_ERR:
1841                 /* Max Exit Latency too large error */
1842                 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1843                 ret = -EINVAL;
1844                 break;
1845         case COMP_SUCCESS:
1846                 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1847                 ret = 0;
1848                 break;
1849         default:
1850                 xhci_err(xhci, "ERROR: unexpected command completion "
1851                                 "code 0x%x.\n", *cmd_status);
1852                 ret = -EINVAL;
1853                 break;
1854         }
1855         return ret;
1856 }
1857
1858 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1859                 struct xhci_container_ctx *in_ctx)
1860 {
1861         struct xhci_input_control_ctx *ctrl_ctx;
1862         u32 valid_add_flags;
1863         u32 valid_drop_flags;
1864
1865         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1866         /* Ignore the slot flag (bit 0), and the default control endpoint flag
1867          * (bit 1).  The default control endpoint is added during the Address
1868          * Device command and is never removed until the slot is disabled.
1869          */
1870         valid_add_flags = ctrl_ctx->add_flags >> 2;
1871         valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1872
1873         /* Use hweight32 to count the number of ones in the add flags, or
1874          * number of endpoints added.  Don't count endpoints that are changed
1875          * (both added and dropped).
1876          */
1877         return hweight32(valid_add_flags) -
1878                 hweight32(valid_add_flags & valid_drop_flags);
1879 }
1880
1881 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1882                 struct xhci_container_ctx *in_ctx)
1883 {
1884         struct xhci_input_control_ctx *ctrl_ctx;
1885         u32 valid_add_flags;
1886         u32 valid_drop_flags;
1887
1888         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1889         valid_add_flags = ctrl_ctx->add_flags >> 2;
1890         valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1891
1892         return hweight32(valid_drop_flags) -
1893                 hweight32(valid_add_flags & valid_drop_flags);
1894 }
1895
1896 /*
1897  * We need to reserve the new number of endpoints before the configure endpoint
1898  * command completes.  We can't subtract the dropped endpoints from the number
1899  * of active endpoints until the command completes because we can oversubscribe
1900  * the host in this case:
1901  *
1902  *  - the first configure endpoint command drops more endpoints than it adds
1903  *  - a second configure endpoint command that adds more endpoints is queued
1904  *  - the first configure endpoint command fails, so the config is unchanged
1905  *  - the second command may succeed, even though there isn't enough resources
1906  *
1907  * Must be called with xhci->lock held.
1908  */
1909 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1910                 struct xhci_container_ctx *in_ctx)
1911 {
1912         u32 added_eps;
1913
1914         added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1915         if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1916                 xhci_dbg(xhci, "Not enough ep ctxs: "
1917                                 "%u active, need to add %u, limit is %u.\n",
1918                                 xhci->num_active_eps, added_eps,
1919                                 xhci->limit_active_eps);
1920                 return -ENOMEM;
1921         }
1922         xhci->num_active_eps += added_eps;
1923         xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1924                         xhci->num_active_eps);
1925         return 0;
1926 }
1927
1928 /*
1929  * The configure endpoint was failed by the xHC for some other reason, so we
1930  * need to revert the resources that failed configuration would have used.
1931  *
1932  * Must be called with xhci->lock held.
1933  */
1934 static void xhci_free_host_resources(struct xhci_hcd *xhci,
1935                 struct xhci_container_ctx *in_ctx)
1936 {
1937         u32 num_failed_eps;
1938
1939         num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1940         xhci->num_active_eps -= num_failed_eps;
1941         xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1942                         num_failed_eps,
1943                         xhci->num_active_eps);
1944 }
1945
1946 /*
1947  * Now that the command has completed, clean up the active endpoint count by
1948  * subtracting out the endpoints that were dropped (but not changed).
1949  *
1950  * Must be called with xhci->lock held.
1951  */
1952 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1953                 struct xhci_container_ctx *in_ctx)
1954 {
1955         u32 num_dropped_eps;
1956
1957         num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1958         xhci->num_active_eps -= num_dropped_eps;
1959         if (num_dropped_eps)
1960                 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1961                                 num_dropped_eps,
1962                                 xhci->num_active_eps);
1963 }
1964
1965 unsigned int xhci_get_block_size(struct usb_device *udev)
1966 {
1967         switch (udev->speed) {
1968         case USB_SPEED_LOW:
1969         case USB_SPEED_FULL:
1970                 return FS_BLOCK;
1971         case USB_SPEED_HIGH:
1972                 return HS_BLOCK;
1973         case USB_SPEED_SUPER:
1974                 return SS_BLOCK;
1975         case USB_SPEED_UNKNOWN:
1976         case USB_SPEED_WIRELESS:
1977         default:
1978                 /* Should never happen */
1979                 return 1;
1980         }
1981 }
1982
1983 unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1984 {
1985         if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1986                 return LS_OVERHEAD;
1987         if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1988                 return FS_OVERHEAD;
1989         return HS_OVERHEAD;
1990 }
1991
1992 /* If we are changing a LS/FS device under a HS hub,
1993  * make sure (if we are activating a new TT) that the HS bus has enough
1994  * bandwidth for this new TT.
1995  */
1996 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1997                 struct xhci_virt_device *virt_dev,
1998                 int old_active_eps)
1999 {
2000         struct xhci_interval_bw_table *bw_table;
2001         struct xhci_tt_bw_info *tt_info;
2002
2003         /* Find the bandwidth table for the root port this TT is attached to. */
2004         bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2005         tt_info = virt_dev->tt_info;
2006         /* If this TT already had active endpoints, the bandwidth for this TT
2007          * has already been added.  Removing all periodic endpoints (and thus
2008          * making the TT enactive) will only decrease the bandwidth used.
2009          */
2010         if (old_active_eps)
2011                 return 0;
2012         if (old_active_eps == 0 && tt_info->active_eps != 0) {
2013                 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2014                         return -ENOMEM;
2015                 return 0;
2016         }
2017         /* Not sure why we would have no new active endpoints...
2018          *
2019          * Maybe because of an Evaluate Context change for a hub update or a
2020          * control endpoint 0 max packet size change?
2021          * FIXME: skip the bandwidth calculation in that case.
2022          */
2023         return 0;
2024 }
2025
2026 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2027                 struct xhci_virt_device *virt_dev)
2028 {
2029         unsigned int bw_reserved;
2030
2031         bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2032         if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2033                 return -ENOMEM;
2034
2035         bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2036         if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2037                 return -ENOMEM;
2038
2039         return 0;
2040 }
2041
2042 /*
2043  * This algorithm is a very conservative estimate of the worst-case scheduling
2044  * scenario for any one interval.  The hardware dynamically schedules the
2045  * packets, so we can't tell which microframe could be the limiting factor in
2046  * the bandwidth scheduling.  This only takes into account periodic endpoints.
2047  *
2048  * Obviously, we can't solve an NP complete problem to find the minimum worst
2049  * case scenario.  Instead, we come up with an estimate that is no less than
2050  * the worst case bandwidth used for any one microframe, but may be an
2051  * over-estimate.
2052  *
2053  * We walk the requirements for each endpoint by interval, starting with the
2054  * smallest interval, and place packets in the schedule where there is only one
2055  * possible way to schedule packets for that interval.  In order to simplify
2056  * this algorithm, we record the largest max packet size for each interval, and
2057  * assume all packets will be that size.
2058  *
2059  * For interval 0, we obviously must schedule all packets for each interval.
2060  * The bandwidth for interval 0 is just the amount of data to be transmitted
2061  * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2062  * the number of packets).
2063  *
2064  * For interval 1, we have two possible microframes to schedule those packets
2065  * in.  For this algorithm, if we can schedule the same number of packets for
2066  * each possible scheduling opportunity (each microframe), we will do so.  The
2067  * remaining number of packets will be saved to be transmitted in the gaps in
2068  * the next interval's scheduling sequence.
2069  *
2070  * As we move those remaining packets to be scheduled with interval 2 packets,
2071  * we have to double the number of remaining packets to transmit.  This is
2072  * because the intervals are actually powers of 2, and we would be transmitting
2073  * the previous interval's packets twice in this interval.  We also have to be
2074  * sure that when we look at the largest max packet size for this interval, we
2075  * also look at the largest max packet size for the remaining packets and take
2076  * the greater of the two.
2077  *
2078  * The algorithm continues to evenly distribute packets in each scheduling
2079  * opportunity, and push the remaining packets out, until we get to the last
2080  * interval.  Then those packets and their associated overhead are just added
2081  * to the bandwidth used.
2082  */
2083 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2084                 struct xhci_virt_device *virt_dev,
2085                 int old_active_eps)
2086 {
2087         unsigned int bw_reserved;
2088         unsigned int max_bandwidth;
2089         unsigned int bw_used;
2090         unsigned int block_size;
2091         struct xhci_interval_bw_table *bw_table;
2092         unsigned int packet_size = 0;
2093         unsigned int overhead = 0;
2094         unsigned int packets_transmitted = 0;
2095         unsigned int packets_remaining = 0;
2096         unsigned int i;
2097
2098         if (virt_dev->udev->speed == USB_SPEED_SUPER)
2099                 return xhci_check_ss_bw(xhci, virt_dev);
2100
2101         if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2102                 max_bandwidth = HS_BW_LIMIT;
2103                 /* Convert percent of bus BW reserved to blocks reserved */
2104                 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2105         } else {
2106                 max_bandwidth = FS_BW_LIMIT;
2107                 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2108         }
2109
2110         bw_table = virt_dev->bw_table;
2111         /* We need to translate the max packet size and max ESIT payloads into
2112          * the units the hardware uses.
2113          */
2114         block_size = xhci_get_block_size(virt_dev->udev);
2115
2116         /* If we are manipulating a LS/FS device under a HS hub, double check
2117          * that the HS bus has enough bandwidth if we are activing a new TT.
2118          */
2119         if (virt_dev->tt_info) {
2120                 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2121                                 virt_dev->real_port);
2122                 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2123                         xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2124                                         "newly activated TT.\n");
2125                         return -ENOMEM;
2126                 }
2127                 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
2128                                 virt_dev->tt_info->slot_id,
2129                                 virt_dev->tt_info->ttport);
2130         } else {
2131                 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2132                                 virt_dev->real_port);
2133         }
2134
2135         /* Add in how much bandwidth will be used for interval zero, or the
2136          * rounded max ESIT payload + number of packets * largest overhead.
2137          */
2138         bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2139                 bw_table->interval_bw[0].num_packets *
2140                 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2141
2142         for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2143                 unsigned int bw_added;
2144                 unsigned int largest_mps;
2145                 unsigned int interval_overhead;
2146
2147                 /*
2148                  * How many packets could we transmit in this interval?
2149                  * If packets didn't fit in the previous interval, we will need
2150                  * to transmit that many packets twice within this interval.
2151                  */
2152                 packets_remaining = 2 * packets_remaining +
2153                         bw_table->interval_bw[i].num_packets;
2154
2155                 /* Find the largest max packet size of this or the previous
2156                  * interval.
2157                  */
2158                 if (list_empty(&bw_table->interval_bw[i].endpoints))
2159                         largest_mps = 0;
2160                 else {
2161                         struct xhci_virt_ep *virt_ep;
2162                         struct list_head *ep_entry;
2163
2164                         ep_entry = bw_table->interval_bw[i].endpoints.next;
2165                         virt_ep = list_entry(ep_entry,
2166                                         struct xhci_virt_ep, bw_endpoint_list);
2167                         /* Convert to blocks, rounding up */
2168                         largest_mps = DIV_ROUND_UP(
2169                                         virt_ep->bw_info.max_packet_size,
2170                                         block_size);
2171                 }
2172                 if (largest_mps > packet_size)
2173                         packet_size = largest_mps;
2174
2175                 /* Use the larger overhead of this or the previous interval. */
2176                 interval_overhead = xhci_get_largest_overhead(
2177                                 &bw_table->interval_bw[i]);
2178                 if (interval_overhead > overhead)
2179                         overhead = interval_overhead;
2180
2181                 /* How many packets can we evenly distribute across
2182                  * (1 << (i + 1)) possible scheduling opportunities?
2183                  */
2184                 packets_transmitted = packets_remaining >> (i + 1);
2185
2186                 /* Add in the bandwidth used for those scheduled packets */
2187                 bw_added = packets_transmitted * (overhead + packet_size);
2188
2189                 /* How many packets do we have remaining to transmit? */
2190                 packets_remaining = packets_remaining % (1 << (i + 1));
2191
2192                 /* What largest max packet size should those packets have? */
2193                 /* If we've transmitted all packets, don't carry over the
2194                  * largest packet size.
2195                  */
2196                 if (packets_remaining == 0) {
2197                         packet_size = 0;
2198                         overhead = 0;
2199                 } else if (packets_transmitted > 0) {
2200                         /* Otherwise if we do have remaining packets, and we've
2201                          * scheduled some packets in this interval, take the
2202                          * largest max packet size from endpoints with this
2203                          * interval.
2204                          */
2205                         packet_size = largest_mps;
2206                         overhead = interval_overhead;
2207                 }
2208                 /* Otherwise carry over packet_size and overhead from the last
2209                  * time we had a remainder.
2210                  */
2211                 bw_used += bw_added;
2212                 if (bw_used > max_bandwidth) {
2213                         xhci_warn(xhci, "Not enough bandwidth. "
2214                                         "Proposed: %u, Max: %u\n",
2215                                 bw_used, max_bandwidth);
2216                         return -ENOMEM;
2217                 }
2218         }
2219         /*
2220          * Ok, we know we have some packets left over after even-handedly
2221          * scheduling interval 15.  We don't know which microframes they will
2222          * fit into, so we over-schedule and say they will be scheduled every
2223          * microframe.
2224          */
2225         if (packets_remaining > 0)
2226                 bw_used += overhead + packet_size;
2227
2228         if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2229                 unsigned int port_index = virt_dev->real_port - 1;
2230
2231                 /* OK, we're manipulating a HS device attached to a
2232                  * root port bandwidth domain.  Include the number of active TTs
2233                  * in the bandwidth used.
2234                  */
2235                 bw_used += TT_HS_OVERHEAD *
2236                         xhci->rh_bw[port_index].num_active_tts;
2237         }
2238
2239         xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2240                 "Available: %u " "percent\n",
2241                 bw_used, max_bandwidth, bw_reserved,
2242                 (max_bandwidth - bw_used - bw_reserved) * 100 /
2243                 max_bandwidth);
2244
2245         bw_used += bw_reserved;
2246         if (bw_used > max_bandwidth) {
2247                 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2248                                 bw_used, max_bandwidth);
2249                 return -ENOMEM;
2250         }
2251
2252         bw_table->bw_used = bw_used;
2253         return 0;
2254 }
2255
2256 static bool xhci_is_async_ep(unsigned int ep_type)
2257 {
2258         return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2259                                         ep_type != ISOC_IN_EP &&
2260                                         ep_type != INT_IN_EP);
2261 }
2262
2263 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2264 {
2265         return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2266 }
2267
2268 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2269 {
2270         unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2271
2272         if (ep_bw->ep_interval == 0)
2273                 return SS_OVERHEAD_BURST +
2274                         (ep_bw->mult * ep_bw->num_packets *
2275                                         (SS_OVERHEAD + mps));
2276         return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2277                                 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2278                                 1 << ep_bw->ep_interval);
2279
2280 }
2281
2282 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2283                 struct xhci_bw_info *ep_bw,
2284                 struct xhci_interval_bw_table *bw_table,
2285                 struct usb_device *udev,
2286                 struct xhci_virt_ep *virt_ep,
2287                 struct xhci_tt_bw_info *tt_info)
2288 {
2289         struct xhci_interval_bw *interval_bw;
2290         int normalized_interval;
2291
2292         if (xhci_is_async_ep(ep_bw->type))
2293                 return;
2294
2295         if (udev->speed == USB_SPEED_SUPER) {
2296                 if (xhci_is_sync_in_ep(ep_bw->type))
2297                         xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2298                                 xhci_get_ss_bw_consumed(ep_bw);
2299                 else
2300                         xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2301                                 xhci_get_ss_bw_consumed(ep_bw);
2302                 return;
2303         }
2304
2305         /* SuperSpeed endpoints never get added to intervals in the table, so
2306          * this check is only valid for HS/FS/LS devices.
2307          */
2308         if (list_empty(&virt_ep->bw_endpoint_list))
2309                 return;
2310         /* For LS/FS devices, we need to translate the interval expressed in
2311          * microframes to frames.
2312          */
2313         if (udev->speed == USB_SPEED_HIGH)
2314                 normalized_interval = ep_bw->ep_interval;
2315         else
2316                 normalized_interval = ep_bw->ep_interval - 3;
2317
2318         if (normalized_interval == 0)
2319                 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2320         interval_bw = &bw_table->interval_bw[normalized_interval];
2321         interval_bw->num_packets -= ep_bw->num_packets;
2322         switch (udev->speed) {
2323         case USB_SPEED_LOW:
2324                 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2325                 break;
2326         case USB_SPEED_FULL:
2327                 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2328                 break;
2329         case USB_SPEED_HIGH:
2330                 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2331                 break;
2332         case USB_SPEED_SUPER:
2333         case USB_SPEED_UNKNOWN:
2334         case USB_SPEED_WIRELESS:
2335                 /* Should never happen because only LS/FS/HS endpoints will get
2336                  * added to the endpoint list.
2337                  */
2338                 return;
2339         }
2340         if (tt_info)
2341                 tt_info->active_eps -= 1;
2342         list_del_init(&virt_ep->bw_endpoint_list);
2343 }
2344
2345 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2346                 struct xhci_bw_info *ep_bw,
2347                 struct xhci_interval_bw_table *bw_table,
2348                 struct usb_device *udev,
2349                 struct xhci_virt_ep *virt_ep,
2350                 struct xhci_tt_bw_info *tt_info)
2351 {
2352         struct xhci_interval_bw *interval_bw;
2353         struct xhci_virt_ep *smaller_ep;
2354         int normalized_interval;
2355
2356         if (xhci_is_async_ep(ep_bw->type))
2357                 return;
2358
2359         if (udev->speed == USB_SPEED_SUPER) {
2360                 if (xhci_is_sync_in_ep(ep_bw->type))
2361                         xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2362                                 xhci_get_ss_bw_consumed(ep_bw);
2363                 else
2364                         xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2365                                 xhci_get_ss_bw_consumed(ep_bw);
2366                 return;
2367         }
2368
2369         /* For LS/FS devices, we need to translate the interval expressed in
2370          * microframes to frames.
2371          */
2372         if (udev->speed == USB_SPEED_HIGH)
2373                 normalized_interval = ep_bw->ep_interval;
2374         else
2375                 normalized_interval = ep_bw->ep_interval - 3;
2376
2377         if (normalized_interval == 0)
2378                 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2379         interval_bw = &bw_table->interval_bw[normalized_interval];
2380         interval_bw->num_packets += ep_bw->num_packets;
2381         switch (udev->speed) {
2382         case USB_SPEED_LOW:
2383                 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2384                 break;
2385         case USB_SPEED_FULL:
2386                 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2387                 break;
2388         case USB_SPEED_HIGH:
2389                 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2390                 break;
2391         case USB_SPEED_SUPER:
2392         case USB_SPEED_UNKNOWN:
2393         case USB_SPEED_WIRELESS:
2394                 /* Should never happen because only LS/FS/HS endpoints will get
2395                  * added to the endpoint list.
2396                  */
2397                 return;
2398         }
2399
2400         if (tt_info)
2401                 tt_info->active_eps += 1;
2402         /* Insert the endpoint into the list, largest max packet size first. */
2403         list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2404                         bw_endpoint_list) {
2405                 if (ep_bw->max_packet_size >=
2406                                 smaller_ep->bw_info.max_packet_size) {
2407                         /* Add the new ep before the smaller endpoint */
2408                         list_add_tail(&virt_ep->bw_endpoint_list,
2409                                         &smaller_ep->bw_endpoint_list);
2410                         return;
2411                 }
2412         }
2413         /* Add the new endpoint at the end of the list. */
2414         list_add_tail(&virt_ep->bw_endpoint_list,
2415                         &interval_bw->endpoints);
2416 }
2417
2418 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2419                 struct xhci_virt_device *virt_dev,
2420                 int old_active_eps)
2421 {
2422         struct xhci_root_port_bw_info *rh_bw_info;
2423         if (!virt_dev->tt_info)
2424                 return;
2425
2426         rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2427         if (old_active_eps == 0 &&
2428                                 virt_dev->tt_info->active_eps != 0) {
2429                 rh_bw_info->num_active_tts += 1;
2430                 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2431         } else if (old_active_eps != 0 &&
2432                                 virt_dev->tt_info->active_eps == 0) {
2433                 rh_bw_info->num_active_tts -= 1;
2434                 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2435         }
2436 }
2437
2438 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2439                 struct xhci_virt_device *virt_dev,
2440                 struct xhci_container_ctx *in_ctx)
2441 {
2442         struct xhci_bw_info ep_bw_info[31];
2443         int i;
2444         struct xhci_input_control_ctx *ctrl_ctx;
2445         int old_active_eps = 0;
2446
2447         if (virt_dev->tt_info)
2448                 old_active_eps = virt_dev->tt_info->active_eps;
2449
2450         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2451
2452         for (i = 0; i < 31; i++) {
2453                 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2454                         continue;
2455
2456                 /* Make a copy of the BW info in case we need to revert this */
2457                 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2458                                 sizeof(ep_bw_info[i]));
2459                 /* Drop the endpoint from the interval table if the endpoint is
2460                  * being dropped or changed.
2461                  */
2462                 if (EP_IS_DROPPED(ctrl_ctx, i))
2463                         xhci_drop_ep_from_interval_table(xhci,
2464                                         &virt_dev->eps[i].bw_info,
2465                                         virt_dev->bw_table,
2466                                         virt_dev->udev,
2467                                         &virt_dev->eps[i],
2468                                         virt_dev->tt_info);
2469         }
2470         /* Overwrite the information stored in the endpoints' bw_info */
2471         xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2472         for (i = 0; i < 31; i++) {
2473                 /* Add any changed or added endpoints to the interval table */
2474                 if (EP_IS_ADDED(ctrl_ctx, i))
2475                         xhci_add_ep_to_interval_table(xhci,
2476                                         &virt_dev->eps[i].bw_info,
2477                                         virt_dev->bw_table,
2478                                         virt_dev->udev,
2479                                         &virt_dev->eps[i],
2480                                         virt_dev->tt_info);
2481         }
2482
2483         if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2484                 /* Ok, this fits in the bandwidth we have.
2485                  * Update the number of active TTs.
2486                  */
2487                 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2488                 return 0;
2489         }
2490
2491         /* We don't have enough bandwidth for this, revert the stored info. */
2492         for (i = 0; i < 31; i++) {
2493                 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2494                         continue;
2495
2496                 /* Drop the new copies of any added or changed endpoints from
2497                  * the interval table.
2498                  */
2499                 if (EP_IS_ADDED(ctrl_ctx, i)) {
2500                         xhci_drop_ep_from_interval_table(xhci,
2501                                         &virt_dev->eps[i].bw_info,
2502                                         virt_dev->bw_table,
2503                                         virt_dev->udev,
2504                                         &virt_dev->eps[i],
2505                                         virt_dev->tt_info);
2506                 }
2507                 /* Revert the endpoint back to its old information */
2508                 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2509                                 sizeof(ep_bw_info[i]));
2510                 /* Add any changed or dropped endpoints back into the table */
2511                 if (EP_IS_DROPPED(ctrl_ctx, i))
2512                         xhci_add_ep_to_interval_table(xhci,
2513                                         &virt_dev->eps[i].bw_info,
2514                                         virt_dev->bw_table,
2515                                         virt_dev->udev,
2516                                         &virt_dev->eps[i],
2517                                         virt_dev->tt_info);
2518         }
2519         return -ENOMEM;
2520 }
2521
2522
2523 /* Issue a configure endpoint command or evaluate context command
2524  * and wait for it to finish.
2525  */
2526 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2527                 struct usb_device *udev,
2528                 struct xhci_command *command,
2529                 bool ctx_change, bool must_succeed)
2530 {
2531         int ret;
2532         int timeleft;
2533         unsigned long flags;
2534         struct xhci_container_ctx *in_ctx;
2535         struct completion *cmd_completion;
2536         u32 *cmd_status;
2537         struct xhci_virt_device *virt_dev;
2538         union xhci_trb *cmd_trb;
2539
2540         spin_lock_irqsave(&xhci->lock, flags);
2541         virt_dev = xhci->devs[udev->slot_id];
2542
2543         if (command)
2544                 in_ctx = command->in_ctx;
2545         else
2546                 in_ctx = virt_dev->in_ctx;
2547
2548         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2549                         xhci_reserve_host_resources(xhci, in_ctx)) {
2550                 spin_unlock_irqrestore(&xhci->lock, flags);
2551                 xhci_warn(xhci, "Not enough host resources, "
2552                                 "active endpoint contexts = %u\n",
2553                                 xhci->num_active_eps);
2554                 return -ENOMEM;
2555         }
2556         if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2557                         xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2558                 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2559                         xhci_free_host_resources(xhci, in_ctx);
2560                 spin_unlock_irqrestore(&xhci->lock, flags);
2561                 xhci_warn(xhci, "Not enough bandwidth\n");
2562                 return -ENOMEM;
2563         }
2564
2565         if (command) {
2566                 cmd_completion = command->completion;
2567                 cmd_status = &command->status;
2568                 command->command_trb = xhci->cmd_ring->enqueue;
2569
2570                 /* Enqueue pointer can be left pointing to the link TRB,
2571                  * we must handle that
2572                  */
2573                 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2574                         command->command_trb =
2575                                 xhci->cmd_ring->enq_seg->next->trbs;
2576
2577                 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2578         } else {
2579                 cmd_completion = &virt_dev->cmd_completion;
2580                 cmd_status = &virt_dev->cmd_status;
2581         }
2582         init_completion(cmd_completion);
2583
2584         cmd_trb = xhci->cmd_ring->dequeue;
2585         if (!ctx_change)
2586                 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2587                                 udev->slot_id, must_succeed);
2588         else
2589                 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2590                                 udev->slot_id);
2591         if (ret < 0) {
2592                 if (command)
2593                         list_del(&command->cmd_list);
2594                 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2595                         xhci_free_host_resources(xhci, in_ctx);
2596                 spin_unlock_irqrestore(&xhci->lock, flags);
2597                 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2598                 return -ENOMEM;
2599         }
2600         xhci_ring_cmd_db(xhci);
2601         spin_unlock_irqrestore(&xhci->lock, flags);
2602
2603         /* Wait for the configure endpoint command to complete */
2604         timeleft = wait_for_completion_interruptible_timeout(
2605                         cmd_completion,
2606                         XHCI_CMD_DEFAULT_TIMEOUT);
2607         if (timeleft <= 0) {
2608                 xhci_warn(xhci, "%s while waiting for %s command\n",
2609                                 timeleft == 0 ? "Timeout" : "Signal",
2610                                 ctx_change == 0 ?
2611                                         "configure endpoint" :
2612                                         "evaluate context");
2613                 /* cancel the configure endpoint command */
2614                 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2615                 if (ret < 0)
2616                         return ret;
2617                 return -ETIME;
2618         }
2619
2620         if (!ctx_change)
2621                 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2622         else
2623                 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2624
2625         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2626                 spin_lock_irqsave(&xhci->lock, flags);
2627                 /* If the command failed, remove the reserved resources.
2628                  * Otherwise, clean up the estimate to include dropped eps.
2629                  */
2630                 if (ret)
2631                         xhci_free_host_resources(xhci, in_ctx);
2632                 else
2633                         xhci_finish_resource_reservation(xhci, in_ctx);
2634                 spin_unlock_irqrestore(&xhci->lock, flags);
2635         }
2636         return ret;
2637 }
2638
2639 /* Called after one or more calls to xhci_add_endpoint() or
2640  * xhci_drop_endpoint().  If this call fails, the USB core is expected
2641  * to call xhci_reset_bandwidth().
2642  *
2643  * Since we are in the middle of changing either configuration or
2644  * installing a new alt setting, the USB core won't allow URBs to be
2645  * enqueued for any endpoint on the old config or interface.  Nothing
2646  * else should be touching the xhci->devs[slot_id] structure, so we
2647  * don't need to take the xhci->lock for manipulating that.
2648  */
2649 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2650 {
2651         int i;
2652         int ret = 0;
2653         struct xhci_hcd *xhci;
2654         struct xhci_virt_device *virt_dev;
2655         struct xhci_input_control_ctx *ctrl_ctx;
2656         struct xhci_slot_ctx *slot_ctx;
2657
2658         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2659         if (ret <= 0)
2660                 return ret;
2661         xhci = hcd_to_xhci(hcd);
2662         if (xhci->xhc_state & XHCI_STATE_DYING)
2663                 return -ENODEV;
2664
2665         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2666         virt_dev = xhci->devs[udev->slot_id];
2667
2668         /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2669         ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2670         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2671         ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2672         ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2673
2674         /* Don't issue the command if there's no endpoints to update. */
2675         if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2676                         ctrl_ctx->drop_flags == 0)
2677                 return 0;
2678
2679         xhci_dbg(xhci, "New Input Control Context:\n");
2680         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2681         xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2682                      LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2683
2684         ret = xhci_configure_endpoint(xhci, udev, NULL,
2685                         false, false);
2686         if (ret) {
2687                 /* Callee should call reset_bandwidth() */
2688                 return ret;
2689         }
2690
2691         xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2692         xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2693                      LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2694
2695         /* Free any rings that were dropped, but not changed. */
2696         for (i = 1; i < 31; ++i) {
2697                 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2698                     !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2699                         xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2700         }
2701         xhci_zero_in_ctx(xhci, virt_dev);
2702         /*
2703          * Install any rings for completely new endpoints or changed endpoints,
2704          * and free or cache any old rings from changed endpoints.
2705          */
2706         for (i = 1; i < 31; ++i) {
2707                 if (!virt_dev->eps[i].new_ring)
2708                         continue;
2709                 /* Only cache or free the old ring if it exists.
2710                  * It may not if this is the first add of an endpoint.
2711                  */
2712                 if (virt_dev->eps[i].ring) {
2713                         xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2714                 }
2715                 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2716                 virt_dev->eps[i].new_ring = NULL;
2717         }
2718
2719         return ret;
2720 }
2721
2722 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2723 {
2724         struct xhci_hcd *xhci;
2725         struct xhci_virt_device *virt_dev;
2726         int i, ret;
2727
2728         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2729         if (ret <= 0)
2730                 return;
2731         xhci = hcd_to_xhci(hcd);
2732
2733         xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2734         virt_dev = xhci->devs[udev->slot_id];
2735         /* Free any rings allocated for added endpoints */
2736         for (i = 0; i < 31; ++i) {
2737                 if (virt_dev->eps[i].new_ring) {
2738                         xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2739                         virt_dev->eps[i].new_ring = NULL;
2740                 }
2741         }
2742         xhci_zero_in_ctx(xhci, virt_dev);
2743 }
2744
2745 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2746                 struct xhci_container_ctx *in_ctx,
2747                 struct xhci_container_ctx *out_ctx,
2748                 u32 add_flags, u32 drop_flags)
2749 {
2750         struct xhci_input_control_ctx *ctrl_ctx;
2751         ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2752         ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2753         ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2754         xhci_slot_copy(xhci, in_ctx, out_ctx);
2755         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2756
2757         xhci_dbg(xhci, "Input Context:\n");
2758         xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2759 }
2760
2761 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2762                 unsigned int slot_id, unsigned int ep_index,
2763                 struct xhci_dequeue_state *deq_state)
2764 {
2765         struct xhci_container_ctx *in_ctx;
2766         struct xhci_ep_ctx *ep_ctx;
2767         u32 added_ctxs;
2768         dma_addr_t addr;
2769
2770         xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2771                         xhci->devs[slot_id]->out_ctx, ep_index);
2772         in_ctx = xhci->devs[slot_id]->in_ctx;
2773         ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2774         addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2775                         deq_state->new_deq_ptr);
2776         if (addr == 0) {
2777                 xhci_warn(xhci, "WARN Cannot submit config ep after "
2778                                 "reset ep command\n");
2779                 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2780                                 deq_state->new_deq_seg,
2781                                 deq_state->new_deq_ptr);
2782                 return;
2783         }
2784         ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2785
2786         added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2787         xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2788                         xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2789 }
2790
2791 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2792                 struct usb_device *udev, unsigned int ep_index)
2793 {
2794         struct xhci_dequeue_state deq_state;
2795         struct xhci_virt_ep *ep;
2796
2797         xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2798         ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2799         /* We need to move the HW's dequeue pointer past this TD,
2800          * or it will attempt to resend it on the next doorbell ring.
2801          */
2802         xhci_find_new_dequeue_state(xhci, udev->slot_id,
2803                         ep_index, ep->stopped_stream, ep->stopped_td,
2804                         &deq_state);
2805
2806         /* HW with the reset endpoint quirk will use the saved dequeue state to
2807          * issue a configure endpoint command later.
2808          */
2809         if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2810                 xhci_dbg(xhci, "Queueing new dequeue state\n");
2811                 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2812                                 ep_index, ep->stopped_stream, &deq_state);
2813         } else {
2814                 /* Better hope no one uses the input context between now and the
2815                  * reset endpoint completion!
2816                  * XXX: No idea how this hardware will react when stream rings
2817                  * are enabled.
2818                  */
2819                 xhci_dbg(xhci, "Setting up input context for "
2820                                 "configure endpoint command\n");
2821                 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2822                                 ep_index, &deq_state);
2823         }
2824 }
2825
2826 /* Deal with stalled endpoints.  The core should have sent the control message
2827  * to clear the halt condition.  However, we need to make the xHCI hardware
2828  * reset its sequence number, since a device will expect a sequence number of
2829  * zero after the halt condition is cleared.
2830  * Context: in_interrupt
2831  */
2832 void xhci_endpoint_reset(struct usb_hcd *hcd,
2833                 struct usb_host_endpoint *ep)
2834 {
2835         struct xhci_hcd *xhci;
2836         struct usb_device *udev;
2837         unsigned int ep_index;
2838         unsigned long flags;
2839         int ret;
2840         struct xhci_virt_ep *virt_ep;
2841
2842         xhci = hcd_to_xhci(hcd);
2843         udev = (struct usb_device *) ep->hcpriv;
2844         /* Called with a root hub endpoint (or an endpoint that wasn't added
2845          * with xhci_add_endpoint()
2846          */
2847         if (!ep->hcpriv)
2848                 return;
2849         ep_index = xhci_get_endpoint_index(&ep->desc);
2850         virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2851         if (!virt_ep->stopped_td) {
2852                 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2853                                 ep->desc.bEndpointAddress);
2854                 return;
2855         }
2856         if (usb_endpoint_xfer_control(&ep->desc)) {
2857                 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2858                 return;
2859         }
2860
2861         xhci_dbg(xhci, "Queueing reset endpoint command\n");
2862         spin_lock_irqsave(&xhci->lock, flags);
2863         ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2864         /*
2865          * Can't change the ring dequeue pointer until it's transitioned to the
2866          * stopped state, which is only upon a successful reset endpoint
2867          * command.  Better hope that last command worked!
2868          */
2869         if (!ret) {
2870                 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2871                 kfree(virt_ep->stopped_td);
2872                 xhci_ring_cmd_db(xhci);
2873         }
2874         virt_ep->stopped_td = NULL;
2875         virt_ep->stopped_trb = NULL;
2876         virt_ep->stopped_stream = 0;
2877         spin_unlock_irqrestore(&xhci->lock, flags);
2878
2879         if (ret)
2880                 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2881 }
2882
2883 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2884                 struct usb_device *udev, struct usb_host_endpoint *ep,
2885                 unsigned int slot_id)
2886 {
2887         int ret;
2888         unsigned int ep_index;
2889         unsigned int ep_state;
2890
2891         if (!ep)
2892                 return -EINVAL;
2893         ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2894         if (ret <= 0)
2895                 return -EINVAL;
2896         if (ep->ss_ep_comp.bmAttributes == 0) {
2897                 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2898                                 " descriptor for ep 0x%x does not support streams\n",
2899                                 ep->desc.bEndpointAddress);
2900                 return -EINVAL;
2901         }
2902
2903         ep_index = xhci_get_endpoint_index(&ep->desc);
2904         ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2905         if (ep_state & EP_HAS_STREAMS ||
2906                         ep_state & EP_GETTING_STREAMS) {
2907                 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2908                                 "already has streams set up.\n",
2909                                 ep->desc.bEndpointAddress);
2910                 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2911                                 "dynamic stream context array reallocation.\n");
2912                 return -EINVAL;
2913         }
2914         if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2915                 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2916                                 "endpoint 0x%x; URBs are pending.\n",
2917                                 ep->desc.bEndpointAddress);
2918                 return -EINVAL;
2919         }
2920         return 0;
2921 }
2922
2923 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2924                 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2925 {
2926         unsigned int max_streams;
2927
2928         /* The stream context array size must be a power of two */
2929         *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2930         /*
2931          * Find out how many primary stream array entries the host controller
2932          * supports.  Later we may use secondary stream arrays (similar to 2nd
2933          * level page entries), but that's an optional feature for xHCI host
2934          * controllers. xHCs must support at least 4 stream IDs.
2935          */
2936         max_streams = HCC_MAX_PSA(xhci->hcc_params);
2937         if (*num_stream_ctxs > max_streams) {
2938                 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2939                                 max_streams);
2940                 *num_stream_ctxs = max_streams;
2941                 *num_streams = max_streams;
2942         }
2943 }
2944
2945 /* Returns an error code if one of the endpoint already has streams.
2946  * This does not change any data structures, it only checks and gathers
2947  * information.
2948  */
2949 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2950                 struct usb_device *udev,
2951                 struct usb_host_endpoint **eps, unsigned int num_eps,
2952                 unsigned int *num_streams, u32 *changed_ep_bitmask)
2953 {
2954         unsigned int max_streams;
2955         unsigned int endpoint_flag;
2956         int i;
2957         int ret;
2958
2959         for (i = 0; i < num_eps; i++) {
2960                 ret = xhci_check_streams_endpoint(xhci, udev,
2961                                 eps[i], udev->slot_id);
2962                 if (ret < 0)
2963                         return ret;
2964
2965                 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2966                 if (max_streams < (*num_streams - 1)) {
2967                         xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2968                                         eps[i]->desc.bEndpointAddress,
2969                                         max_streams);
2970                         *num_streams = max_streams+1;
2971                 }
2972
2973                 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2974                 if (*changed_ep_bitmask & endpoint_flag)
2975                         return -EINVAL;
2976                 *changed_ep_bitmask |= endpoint_flag;
2977         }
2978         return 0;
2979 }
2980
2981 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2982                 struct usb_device *udev,
2983                 struct usb_host_endpoint **eps, unsigned int num_eps)
2984 {
2985         u32 changed_ep_bitmask = 0;
2986         unsigned int slot_id;
2987         unsigned int ep_index;
2988         unsigned int ep_state;
2989         int i;
2990
2991         slot_id = udev->slot_id;
2992         if (!xhci->devs[slot_id])
2993                 return 0;
2994
2995         for (i = 0; i < num_eps; i++) {
2996                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2997                 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2998                 /* Are streams already being freed for the endpoint? */
2999                 if (ep_state & EP_GETTING_NO_STREAMS) {
3000                         xhci_warn(xhci, "WARN Can't disable streams for "
3001                                         "endpoint 0x%x\n, "
3002                                         "streams are being disabled already.",
3003                                         eps[i]->desc.bEndpointAddress);
3004                         return 0;
3005                 }
3006                 /* Are there actually any streams to free? */
3007                 if (!(ep_state & EP_HAS_STREAMS) &&
3008                                 !(ep_state & EP_GETTING_STREAMS)) {
3009                         xhci_warn(xhci, "WARN Can't disable streams for "
3010                                         "endpoint 0x%x\n, "
3011                                         "streams are already disabled!",
3012                                         eps[i]->desc.bEndpointAddress);
3013                         xhci_warn(xhci, "WARN xhci_free_streams() called "
3014                                         "with non-streams endpoint\n");
3015                         return 0;
3016                 }
3017                 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3018         }
3019         return changed_ep_bitmask;
3020 }
3021
3022 /*
3023  * The USB device drivers use this function (though the HCD interface in USB
3024  * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
3025  * coordinate mass storage command queueing across multiple endpoints (basically
3026  * a stream ID == a task ID).
3027  *
3028  * Setting up streams involves allocating the same size stream context array
3029  * for each endpoint and issuing a configure endpoint command for all endpoints.
3030  *
3031  * Don't allow the call to succeed if one endpoint only supports one stream
3032  * (which means it doesn't support streams at all).
3033  *
3034  * Drivers may get less stream IDs than they asked for, if the host controller
3035  * hardware or endpoints claim they can't support the number of requested
3036  * stream IDs.
3037  */
3038 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3039                 struct usb_host_endpoint **eps, unsigned int num_eps,
3040                 unsigned int num_streams, gfp_t mem_flags)
3041 {
3042         int i, ret;
3043         struct xhci_hcd *xhci;
3044         struct xhci_virt_device *vdev;
3045         struct xhci_command *config_cmd;
3046         unsigned int ep_index;
3047         unsigned int num_stream_ctxs;
3048         unsigned long flags;
3049         u32 changed_ep_bitmask = 0;
3050
3051         if (!eps)
3052                 return -EINVAL;
3053
3054         /* Add one to the number of streams requested to account for
3055          * stream 0 that is reserved for xHCI usage.
3056          */
3057         num_streams += 1;
3058         xhci = hcd_to_xhci(hcd);
3059         xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3060                         num_streams);
3061
3062         config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3063         if (!config_cmd) {
3064                 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3065                 return -ENOMEM;
3066         }
3067
3068         /* Check to make sure all endpoints are not already configured for
3069          * streams.  While we're at it, find the maximum number of streams that
3070          * all the endpoints will support and check for duplicate endpoints.
3071          */
3072         spin_lock_irqsave(&xhci->lock, flags);
3073         ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3074                         num_eps, &num_streams, &changed_ep_bitmask);
3075         if (ret < 0) {
3076                 xhci_free_command(xhci, config_cmd);
3077                 spin_unlock_irqrestore(&xhci->lock, flags);
3078                 return ret;
3079         }
3080         if (num_streams <= 1) {
3081                 xhci_warn(xhci, "WARN: endpoints can't handle "
3082                                 "more than one stream.\n");
3083                 xhci_free_command(xhci, config_cmd);
3084                 spin_unlock_irqrestore(&xhci->lock, flags);
3085                 return -EINVAL;
3086         }
3087         vdev = xhci->devs[udev->slot_id];
3088         /* Mark each endpoint as being in transition, so
3089          * xhci_urb_enqueue() will reject all URBs.
3090          */
3091         for (i = 0; i < num_eps; i++) {
3092                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3093                 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3094         }
3095         spin_unlock_irqrestore(&xhci->lock, flags);
3096
3097         /* Setup internal data structures and allocate HW data structures for
3098          * streams (but don't install the HW structures in the input context
3099          * until we're sure all memory allocation succeeded).
3100          */
3101         xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3102         xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3103                         num_stream_ctxs, num_streams);
3104
3105         for (i = 0; i < num_eps; i++) {
3106                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3107                 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3108                                 num_stream_ctxs,
3109                                 num_streams, mem_flags);
3110                 if (!vdev->eps[ep_index].stream_info)
3111                         goto cleanup;
3112                 /* Set maxPstreams in endpoint context and update deq ptr to
3113                  * point to stream context array. FIXME
3114                  */
3115         }
3116
3117         /* Set up the input context for a configure endpoint command. */
3118         for (i = 0; i < num_eps; i++) {
3119                 struct xhci_ep_ctx *ep_ctx;
3120
3121                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3122                 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3123
3124                 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3125                                 vdev->out_ctx, ep_index);
3126                 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3127                                 vdev->eps[ep_index].stream_info);
3128         }
3129         /* Tell the HW to drop its old copy of the endpoint context info
3130          * and add the updated copy from the input context.
3131          */
3132         xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3133                         vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3134
3135         /* Issue and wait for the configure endpoint command */
3136         ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3137                         false, false);
3138
3139         /* xHC rejected the configure endpoint command for some reason, so we
3140          * leave the old ring intact and free our internal streams data
3141          * structure.
3142          */
3143         if (ret < 0)
3144                 goto cleanup;
3145
3146         spin_lock_irqsave(&xhci->lock, flags);
3147         for (i = 0; i < num_eps; i++) {
3148                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3149                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3150                 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3151                          udev->slot_id, ep_index);
3152                 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3153         }
3154         xhci_free_command(xhci, config_cmd);
3155         spin_unlock_irqrestore(&xhci->lock, flags);
3156
3157         /* Subtract 1 for stream 0, which drivers can't use */
3158         return num_streams - 1;
3159
3160 cleanup:
3161         /* If it didn't work, free the streams! */
3162         for (i = 0; i < num_eps; i++) {
3163                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3164                 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3165                 vdev->eps[ep_index].stream_info = NULL;
3166                 /* FIXME Unset maxPstreams in endpoint context and
3167                  * update deq ptr to point to normal string ring.
3168                  */
3169                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3170                 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3171                 xhci_endpoint_zero(xhci, vdev, eps[i]);
3172         }
3173         xhci_free_command(xhci, config_cmd);
3174         return -ENOMEM;
3175 }
3176
3177 /* Transition the endpoint from using streams to being a "normal" endpoint
3178  * without streams.
3179  *
3180  * Modify the endpoint context state, submit a configure endpoint command,
3181  * and free all endpoint rings for streams if that completes successfully.
3182  */
3183 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3184                 struct usb_host_endpoint **eps, unsigned int num_eps,
3185                 gfp_t mem_flags)
3186 {
3187         int i, ret;
3188         struct xhci_hcd *xhci;
3189         struct xhci_virt_device *vdev;
3190         struct xhci_command *command;
3191         unsigned int ep_index;
3192         unsigned long flags;
3193         u32 changed_ep_bitmask;
3194
3195         xhci = hcd_to_xhci(hcd);
3196         vdev = xhci->devs[udev->slot_id];
3197
3198         /* Set up a configure endpoint command to remove the streams rings */
3199         spin_lock_irqsave(&xhci->lock, flags);
3200         changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3201                         udev, eps, num_eps);
3202         if (changed_ep_bitmask == 0) {
3203                 spin_unlock_irqrestore(&xhci->lock, flags);
3204                 return -EINVAL;
3205         }
3206
3207         /* Use the xhci_command structure from the first endpoint.  We may have
3208          * allocated too many, but the driver may call xhci_free_streams() for
3209          * each endpoint it grouped into one call to xhci_alloc_streams().
3210          */
3211         ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3212         command = vdev->eps[ep_index].stream_info->free_streams_command;
3213         for (i = 0; i < num_eps; i++) {
3214                 struct xhci_ep_ctx *ep_ctx;
3215
3216                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3217                 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3218                 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3219                         EP_GETTING_NO_STREAMS;
3220
3221                 xhci_endpoint_copy(xhci, command->in_ctx,
3222                                 vdev->out_ctx, ep_index);
3223                 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3224                                 &vdev->eps[ep_index]);
3225         }
3226         xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3227                         vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3228         spin_unlock_irqrestore(&xhci->lock, flags);
3229
3230         /* Issue and wait for the configure endpoint command,
3231          * which must succeed.
3232          */
3233         ret = xhci_configure_endpoint(xhci, udev, command,
3234                         false, true);
3235
3236         /* xHC rejected the configure endpoint command for some reason, so we
3237          * leave the streams rings intact.
3238          */
3239         if (ret < 0)
3240                 return ret;
3241
3242         spin_lock_irqsave(&xhci->lock, flags);
3243         for (i = 0; i < num_eps; i++) {
3244                 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3245                 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3246                 vdev->eps[ep_index].stream_info = NULL;
3247                 /* FIXME Unset maxPstreams in endpoint context and
3248                  * update deq ptr to point to normal string ring.
3249                  */
3250                 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3251                 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3252         }
3253         spin_unlock_irqrestore(&xhci->lock, flags);
3254
3255         return 0;
3256 }
3257
3258 /*
3259  * Deletes endpoint resources for endpoints that were active before a Reset
3260  * Device command, or a Disable Slot command.  The Reset Device command leaves
3261  * the control endpoint intact, whereas the Disable Slot command deletes it.
3262  *
3263  * Must be called with xhci->lock held.
3264  */
3265 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3266         struct xhci_virt_device *virt_dev, bool drop_control_ep)
3267 {
3268         int i;
3269         unsigned int num_dropped_eps = 0;
3270         unsigned int drop_flags = 0;
3271
3272         for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3273                 if (virt_dev->eps[i].ring) {
3274                         drop_flags |= 1 << i;
3275                         num_dropped_eps++;
3276                 }
3277         }
3278         xhci->num_active_eps -= num_dropped_eps;
3279         if (num_dropped_eps)
3280                 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3281                                 "%u now active.\n",
3282                                 num_dropped_eps, drop_flags,
3283                                 xhci->num_active_eps);
3284 }
3285
3286 /*
3287  * This submits a Reset Device Command, which will set the device state to 0,
3288  * set the device address to 0, and disable all the endpoints except the default
3289  * control endpoint.  The USB core should come back and call
3290  * xhci_address_device(), and then re-set up the configuration.  If this is
3291  * called because of a usb_reset_and_verify_device(), then the old alternate
3292  * settings will be re-installed through the normal bandwidth allocation
3293  * functions.
3294  *
3295  * Wait for the Reset Device command to finish.  Remove all structures
3296  * associated with the endpoints that were disabled.  Clear the input device
3297  * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
3298  *
3299  * If the virt_dev to be reset does not exist or does not match the udev,
3300  * it means the device is lost, possibly due to the xHC restore error and
3301  * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3302  * re-allocate the device.
3303  */
3304 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3305 {
3306         int ret, i;
3307         unsigned long flags;
3308         struct xhci_hcd *xhci;
3309         unsigned int slot_id;
3310         struct xhci_virt_device *virt_dev;
3311         struct xhci_command *reset_device_cmd;
3312         int timeleft;
3313         int last_freed_endpoint;
3314         struct xhci_slot_ctx *slot_ctx;
3315         int old_active_eps = 0;
3316
3317         ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3318         if (ret <= 0)
3319                 return ret;
3320         xhci = hcd_to_xhci(hcd);
3321         slot_id = udev->slot_id;
3322         virt_dev = xhci->devs[slot_id];
3323         if (!virt_dev) {
3324                 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3325                                 "not exist. Re-allocate the device\n", slot_id);
3326                 ret = xhci_alloc_dev(hcd, udev);
3327                 if (ret == 1)
3328                         return 0;
3329                 else
3330                         return -EINVAL;
3331         }
3332
3333         if (virt_dev->udev != udev) {
3334                 /* If the virt_dev and the udev does not match, this virt_dev
3335                  * may belong to another udev.
3336                  * Re-allocate the device.
3337                  */
3338                 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3339                                 "not match the udev. Re-allocate the device\n",
3340                                 slot_id);
3341                 ret = xhci_alloc_dev(hcd, udev);
3342                 if (ret == 1)
3343                         return 0;
3344                 else
3345                         return -EINVAL;
3346         }
3347
3348         /* If device is not setup, there is no point in resetting it */
3349         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3350         if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3351                                                 SLOT_STATE_DISABLED)
3352                 return 0;
3353
3354         xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3355         /* Allocate the command structure that holds the struct completion.
3356          * Assume we're in process context, since the normal device reset
3357          * process has to wait for the device anyway.  Storage devices are
3358          * reset as part of error handling, so use GFP_NOIO instead of
3359          * GFP_KERNEL.
3360          */
3361         reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3362         if (!reset_device_cmd) {
3363                 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3364                 return -ENOMEM;
3365         }
3366
3367         /* Attempt to submit the Reset Device command to the command ring */
3368         spin_lock_irqsave(&xhci->lock, flags);
3369         reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3370
3371         /* Enqueue pointer can be left pointing to the link TRB,
3372          * we must handle that
3373          */
3374         if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3375                 reset_device_cmd->command_trb =
3376                         xhci->cmd_ring->enq_seg->next->trbs;
3377
3378         list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3379         ret = xhci_queue_reset_device(xhci, slot_id);
3380         if (ret) {
3381                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3382                 list_del(&reset_device_cmd->cmd_list);
3383                 spin_unlock_irqrestore(&xhci->lock, flags);
3384                 goto command_cleanup;
3385         }
3386         xhci_ring_cmd_db(xhci);
3387         spin_unlock_irqrestore(&xhci->lock, flags);
3388
3389         /* Wait for the Reset Device command to finish */
3390         timeleft = wait_for_completion_interruptible_timeout(
3391                         reset_device_cmd->completion,
3392                         USB_CTRL_SET_TIMEOUT);
3393         if (timeleft <= 0) {
3394                 xhci_warn(xhci, "%s while waiting for reset device command\n",
3395                                 timeleft == 0 ? "Timeout" : "Signal");
3396                 spin_lock_irqsave(&xhci->lock, flags);
3397                 /* The timeout might have raced with the event ring handler, so
3398                  * only delete from the list if the item isn't poisoned.
3399                  */
3400                 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3401                         list_del(&reset_device_cmd->cmd_list);
3402                 spin_unlock_irqrestore(&xhci->lock, flags);
3403                 ret = -ETIME;
3404                 goto command_cleanup;
3405         }
3406
3407         /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3408          * unless we tried to reset a slot ID that wasn't enabled,
3409          * or the device wasn't in the addressed or configured state.
3410          */
3411         ret = reset_device_cmd->status;
3412         switch (ret) {
3413         case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3414         case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3415                 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3416                                 slot_id,
3417                                 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3418                 xhci_info(xhci, "Not freeing device rings.\n");
3419                 /* Don't treat this as an error.  May change my mind later. */
3420                 ret = 0;
3421                 goto command_cleanup;
3422         case COMP_SUCCESS:
3423                 xhci_dbg(xhci, "Successful reset device command.\n");
3424                 break;
3425         default:
3426                 if (xhci_is_vendor_info_code(xhci, ret))
3427                         break;
3428                 xhci_warn(xhci, "Unknown completion code %u for "
3429                                 "reset device command.\n", ret);
3430                 ret = -EINVAL;
3431                 goto command_cleanup;
3432         }
3433
3434         /* Free up host controller endpoint resources */
3435         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3436                 spin_lock_irqsave(&xhci->lock, flags);
3437                 /* Don't delete the default control endpoint resources */
3438                 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3439                 spin_unlock_irqrestore(&xhci->lock, flags);
3440         }
3441
3442         /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3443         last_freed_endpoint = 1;
3444         for (i = 1; i < 31; ++i) {
3445                 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3446
3447                 if (ep->ep_state & EP_HAS_STREAMS) {
3448                         xhci_free_stream_info(xhci, ep->stream_info);
3449                         ep->stream_info = NULL;
3450                         ep->ep_state &= ~EP_HAS_STREAMS;
3451                 }
3452
3453                 if (ep->ring) {
3454                         xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3455                         last_freed_endpoint = i;
3456                 }
3457                 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3458                         xhci_drop_ep_from_interval_table(xhci,
3459                                         &virt_dev->eps[i].bw_info,
3460                                         virt_dev->bw_table,
3461                                         udev,
3462                                         &virt_dev->eps[i],
3463                                         virt_dev->tt_info);
3464                 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3465         }
3466         /* If necessary, update the number of active TTs on this root port */
3467         xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3468
3469         xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3470         xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3471         ret = 0;
3472
3473 command_cleanup:
3474         xhci_free_command(xhci, reset_device_cmd);
3475         return ret;
3476 }
3477
3478 /*
3479  * At this point, the struct usb_device is about to go away, the device has
3480  * disconnected, and all traffic has been stopped and the endpoints have been
3481  * disabled.  Free any HC data structures associated with that device.
3482  */
3483 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3484 {
3485         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3486         struct xhci_virt_device *virt_dev;
3487         unsigned long flags;
3488         u32 state;
3489         int i, ret;
3490
3491         ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3492         /* If the host is halted due to driver unload, we still need to free the
3493          * device.
3494          */
3495         if (ret <= 0 && ret != -ENODEV)
3496                 return;
3497
3498         virt_dev = xhci->devs[udev->slot_id];
3499
3500         /* Stop any wayward timer functions (which may grab the lock) */
3501         for (i = 0; i < 31; ++i) {
3502                 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3503                 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3504         }
3505
3506         if (udev->usb2_hw_lpm_enabled) {
3507                 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3508                 udev->usb2_hw_lpm_enabled = 0;
3509         }
3510
3511         spin_lock_irqsave(&xhci->lock, flags);
3512         /* Don't disable the slot if the host controller is dead. */
3513         state = xhci_readl(xhci, &xhci->op_regs->status);
3514         if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3515                         (xhci->xhc_state & XHCI_STATE_HALTED)) {
3516                 xhci_free_virt_device(xhci, udev->slot_id);
3517                 spin_unlock_irqrestore(&xhci->lock, flags);
3518                 return;
3519         }
3520
3521         if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3522                 spin_unlock_irqrestore(&xhci->lock, flags);
3523                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3524                 return;
3525         }
3526         xhci_ring_cmd_db(xhci);
3527         spin_unlock_irqrestore(&xhci->lock, flags);
3528         /*
3529          * Event command completion handler will free any data structures
3530          * associated with the slot.  XXX Can free sleep?
3531          */
3532 }
3533
3534 /*
3535  * Checks if we have enough host controller resources for the default control
3536  * endpoint.
3537  *
3538  * Must be called with xhci->lock held.
3539  */
3540 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3541 {
3542         if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3543                 xhci_dbg(xhci, "Not enough ep ctxs: "
3544                                 "%u active, need to add 1, limit is %u.\n",
3545                                 xhci->num_active_eps, xhci->limit_active_eps);
3546                 return -ENOMEM;
3547         }
3548         xhci->num_active_eps += 1;
3549         xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3550                         xhci->num_active_eps);
3551         return 0;
3552 }
3553
3554
3555 /*
3556  * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3557  * timed out, or allocating memory failed.  Returns 1 on success.
3558  */
3559 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3560 {
3561         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3562         unsigned long flags;
3563         int timeleft;
3564         int ret;
3565         union xhci_trb *cmd_trb;
3566
3567         spin_lock_irqsave(&xhci->lock, flags);
3568         cmd_trb = xhci->cmd_ring->dequeue;
3569         ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3570         if (ret) {
3571                 spin_unlock_irqrestore(&xhci->lock, flags);
3572                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3573                 return 0;
3574         }
3575         xhci_ring_cmd_db(xhci);
3576         spin_unlock_irqrestore(&xhci->lock, flags);
3577
3578         /* XXX: how much time for xHC slot assignment? */
3579         timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3580                         XHCI_CMD_DEFAULT_TIMEOUT);
3581         if (timeleft <= 0) {
3582                 xhci_warn(xhci, "%s while waiting for a slot\n",
3583                                 timeleft == 0 ? "Timeout" : "Signal");
3584                 /* cancel the enable slot request */
3585                 return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3586         }
3587
3588         if (!xhci->slot_id) {
3589                 xhci_err(xhci, "Error while assigning device slot ID\n");
3590                 return 0;
3591         }
3592
3593         if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3594                 spin_lock_irqsave(&xhci->lock, flags);
3595                 ret = xhci_reserve_host_control_ep_resources(xhci);
3596                 if (ret) {
3597                         spin_unlock_irqrestore(&xhci->lock, flags);
3598                         xhci_warn(xhci, "Not enough host resources, "
3599                                         "active endpoint contexts = %u\n",
3600                                         xhci->num_active_eps);
3601                         goto disable_slot;
3602                 }
3603                 spin_unlock_irqrestore(&xhci->lock, flags);
3604         }
3605         /* Use GFP_NOIO, since this function can be called from
3606          * xhci_discover_or_reset_device(), which may be called as part of
3607          * mass storage driver error handling.
3608          */
3609         if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3610                 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3611                 goto disable_slot;
3612         }
3613         udev->slot_id = xhci->slot_id;
3614         /* Is this a LS or FS device under a HS hub? */
3615         /* Hub or peripherial? */
3616         return 1;
3617
3618 disable_slot:
3619         /* Disable slot, if we can do it without mem alloc */
3620         spin_lock_irqsave(&xhci->lock, flags);
3621         if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3622                 xhci_ring_cmd_db(xhci);
3623         spin_unlock_irqrestore(&xhci->lock, flags);
3624         return 0;
3625 }
3626
3627 /*
3628  * Issue an Address Device command (which will issue a SetAddress request to
3629  * the device).
3630  * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3631  * we should only issue and wait on one address command at the same time.
3632  *
3633  * We add one to the device address issued by the hardware because the USB core
3634  * uses address 1 for the root hubs (even though they're not really devices).
3635  */
3636 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3637 {
3638         unsigned long flags;
3639         int timeleft;
3640         struct xhci_virt_device *virt_dev;
3641         int ret = 0;
3642         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3643         struct xhci_slot_ctx *slot_ctx;
3644         struct xhci_input_control_ctx *ctrl_ctx;
3645         u64 temp_64;
3646         union xhci_trb *cmd_trb;
3647
3648         if (!udev->slot_id) {
3649                 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3650                 return -EINVAL;
3651         }
3652
3653         virt_dev = xhci->devs[udev->slot_id];
3654
3655         if (WARN_ON(!virt_dev)) {
3656                 /*
3657                  * In plug/unplug torture test with an NEC controller,
3658                  * a zero-dereference was observed once due to virt_dev = 0.
3659                  * Print useful debug rather than crash if it is observed again!
3660                  */
3661                 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3662                         udev->slot_id);
3663                 return -EINVAL;
3664         }
3665
3666         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3667         /*
3668          * If this is the first Set Address since device plug-in or
3669          * virt_device realloaction after a resume with an xHCI power loss,
3670          * then set up the slot context.
3671          */
3672         if (!slot_ctx->dev_info)
3673                 xhci_setup_addressable_virt_dev(xhci, udev);
3674         /* Otherwise, update the control endpoint ring enqueue pointer. */
3675         else
3676                 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3677         ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3678         ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3679         ctrl_ctx->drop_flags = 0;
3680
3681         xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3682         xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3683
3684         spin_lock_irqsave(&xhci->lock, flags);
3685         cmd_trb = xhci->cmd_ring->dequeue;
3686         ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3687                                         udev->slot_id);
3688         if (ret) {
3689                 spin_unlock_irqrestore(&xhci->lock, flags);
3690                 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3691                 return ret;
3692         }
3693         xhci_ring_cmd_db(xhci);
3694         spin_unlock_irqrestore(&xhci->lock, flags);
3695
3696         /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3697         timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3698                         XHCI_CMD_DEFAULT_TIMEOUT);
3699         /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3700          * the SetAddress() "recovery interval" required by USB and aborting the
3701          * command on a timeout.
3702          */
3703         if (timeleft <= 0) {
3704                 xhci_warn(xhci, "%s while waiting for address device command\n",
3705                                 timeleft == 0 ? "Timeout" : "Signal");
3706                 /* cancel the address device command */
3707                 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3708                 if (ret < 0)
3709                         return ret;
3710                 return -ETIME;
3711         }
3712
3713         switch (virt_dev->cmd_status) {
3714         case COMP_CTX_STATE:
3715         case COMP_EBADSLT:
3716                 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3717                                 udev->slot_id);
3718                 ret = -EINVAL;
3719                 break;
3720         case COMP_TX_ERR:
3721                 dev_warn(&udev->dev, "Device not responding to set address.\n");
3722                 ret = -EPROTO;
3723                 break;
3724         case COMP_DEV_ERR:
3725                 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3726                                 "device command.\n");
3727                 ret = -ENODEV;
3728                 break;
3729         case COMP_SUCCESS:
3730                 xhci_dbg(xhci, "Successful Address Device command\n");
3731                 break;
3732         default:
3733                 xhci_err(xhci, "ERROR: unexpected command completion "
3734                                 "code 0x%x.\n", virt_dev->cmd_status);
3735                 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3736                 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3737                 ret = -EINVAL;
3738                 break;
3739         }
3740         if (ret) {
3741                 return ret;
3742         }
3743         temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3744         xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3745         xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3746                  udev->slot_id,
3747                  &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3748                  (unsigned long long)
3749                  le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3750         xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3751                         (unsigned long long)virt_dev->out_ctx->dma);
3752         xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3753         xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3754         xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3755         xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3756         /*
3757          * USB core uses address 1 for the roothubs, so we add one to the
3758          * address given back to us by the HC.
3759          */
3760         slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3761         /* Use kernel assigned address for devices; store xHC assigned
3762          * address locally. */
3763         virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3764                 + 1;
3765         /* Zero the input context control for later use */
3766         ctrl_ctx->add_flags = 0;
3767         ctrl_ctx->drop_flags = 0;
3768
3769         xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3770
3771         return 0;
3772 }
3773
3774 #ifdef CONFIG_USB_SUSPEND
3775
3776 /* BESL to HIRD Encoding array for USB2 LPM */
3777 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3778         3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3779
3780 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3781 static int xhci_calculate_hird_besl(int u2del, bool use_besl)
3782 {
3783         int hird;
3784
3785         if (use_besl) {
3786                 for (hird = 0; hird < 16; hird++) {
3787                         if (xhci_besl_encoding[hird] >= u2del)
3788                                 break;
3789                 }
3790         } else {
3791                 if (u2del <= 50)
3792                         hird = 0;
3793                 else
3794                         hird = (u2del - 51) / 75 + 1;
3795
3796                 if (hird > 15)
3797                         hird = 15;
3798         }
3799
3800         return hird;
3801 }
3802
3803 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3804                                         struct usb_device *udev)
3805 {
3806         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3807         struct dev_info *dev_info;
3808         __le32 __iomem  **port_array;
3809         __le32 __iomem  *addr, *pm_addr;
3810         u32             temp, dev_id;
3811         unsigned int    port_num;
3812         unsigned long   flags;
3813         int             u2del, hird;
3814         int             ret;
3815
3816         if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3817                         !udev->lpm_capable)
3818                 return -EINVAL;
3819
3820         /* we only support lpm for non-hub device connected to root hub yet */
3821         if (!udev->parent || udev->parent->parent ||
3822                         udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3823                 return -EINVAL;
3824
3825         spin_lock_irqsave(&xhci->lock, flags);
3826
3827         /* Look for devices in lpm_failed_devs list */
3828         dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3829                         le16_to_cpu(udev->descriptor.idProduct);
3830         list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3831                 if (dev_info->dev_id == dev_id) {
3832                         ret = -EINVAL;
3833                         goto finish;
3834                 }
3835         }
3836
3837         port_array = xhci->usb2_ports;
3838         port_num = udev->portnum - 1;
3839
3840         if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3841                 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3842                 ret = -EINVAL;
3843                 goto finish;
3844         }
3845
3846         /*
3847          * Test USB 2.0 software LPM.
3848          * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3849          * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3850          * in the June 2011 errata release.
3851          */
3852         xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3853         /*
3854          * Set L1 Device Slot and HIRD/BESL.
3855          * Check device's USB 2.0 extension descriptor to determine whether
3856          * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3857          */
3858         pm_addr = port_array[port_num] + 1;
3859         u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3860         if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3861                 hird = xhci_calculate_hird_besl(u2del, 1);
3862         else
3863                 hird = xhci_calculate_hird_besl(u2del, 0);
3864
3865         temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3866         xhci_writel(xhci, temp, pm_addr);
3867
3868         /* Set port link state to U2(L1) */
3869         addr = port_array[port_num];
3870         xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3871
3872         /* wait for ACK */
3873         spin_unlock_irqrestore(&xhci->lock, flags);
3874         msleep(10);
3875         spin_lock_irqsave(&xhci->lock, flags);
3876
3877         /* Check L1 Status */
3878         ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3879         if (ret != -ETIMEDOUT) {
3880                 /* enter L1 successfully */
3881                 temp = xhci_readl(xhci, addr);
3882                 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3883                                 port_num, temp);
3884                 ret = 0;
3885         } else {
3886                 temp = xhci_readl(xhci, pm_addr);
3887                 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3888                                 port_num, temp & PORT_L1S_MASK);
3889                 ret = -EINVAL;
3890         }
3891
3892         /* Resume the port */
3893         xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3894
3895         spin_unlock_irqrestore(&xhci->lock, flags);
3896         msleep(10);
3897         spin_lock_irqsave(&xhci->lock, flags);
3898
3899         /* Clear PLC */
3900         xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3901
3902         /* Check PORTSC to make sure the device is in the right state */
3903         if (!ret) {
3904                 temp = xhci_readl(xhci, addr);
3905                 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3906                 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3907                                 (temp & PORT_PLS_MASK) != XDEV_U0) {
3908                         xhci_dbg(xhci, "port L1 resume fail\n");
3909                         ret = -EINVAL;
3910                 }
3911         }
3912
3913         if (ret) {
3914                 /* Insert dev to lpm_failed_devs list */
3915                 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3916                                 "re-enumerate\n");
3917                 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3918                 if (!dev_info) {
3919                         ret = -ENOMEM;
3920                         goto finish;
3921                 }
3922                 dev_info->dev_id = dev_id;
3923                 INIT_LIST_HEAD(&dev_info->list);
3924                 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3925         } else {
3926                 xhci_ring_device(xhci, udev->slot_id);
3927         }
3928
3929 finish:
3930         spin_unlock_irqrestore(&xhci->lock, flags);
3931         return ret;
3932 }
3933
3934 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3935                         struct usb_device *udev, int enable)
3936 {
3937         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3938         __le32 __iomem  **port_array;
3939         __le32 __iomem  *pm_addr;
3940         u32             temp;
3941         unsigned int    port_num;
3942         unsigned long   flags;
3943         int             u2del, hird;
3944
3945         if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3946                         !udev->lpm_capable)
3947                 return -EPERM;
3948
3949         if (!udev->parent || udev->parent->parent ||
3950                         udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3951                 return -EPERM;
3952
3953         if (udev->usb2_hw_lpm_capable != 1)
3954                 return -EPERM;
3955
3956         spin_lock_irqsave(&xhci->lock, flags);
3957
3958         port_array = xhci->usb2_ports;
3959         port_num = udev->portnum - 1;
3960         pm_addr = port_array[port_num] + 1;
3961         temp = xhci_readl(xhci, pm_addr);
3962
3963         xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3964                         enable ? "enable" : "disable", port_num);
3965
3966         u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3967         if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3968                 hird = xhci_calculate_hird_besl(u2del, 1);
3969         else
3970                 hird = xhci_calculate_hird_besl(u2del, 0);
3971
3972         if (enable) {
3973                 temp &= ~PORT_HIRD_MASK;
3974                 temp |= PORT_HIRD(hird) | PORT_RWE;
3975                 xhci_writel(xhci, temp, pm_addr);
3976                 temp = xhci_readl(xhci, pm_addr);
3977                 temp |= PORT_HLE;
3978                 xhci_writel(xhci, temp, pm_addr);
3979         } else {
3980                 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3981                 xhci_writel(xhci, temp, pm_addr);
3982         }
3983
3984         spin_unlock_irqrestore(&xhci->lock, flags);
3985         return 0;
3986 }
3987
3988 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3989 {
3990         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3991         int             ret;
3992
3993         ret = xhci_usb2_software_lpm_test(hcd, udev);
3994         if (!ret) {
3995                 xhci_dbg(xhci, "software LPM test succeed\n");
3996                 if (xhci->hw_lpm_support == 1) {
3997                         udev->usb2_hw_lpm_capable = 1;
3998                         ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3999                         if (!ret)
4000                                 udev->usb2_hw_lpm_enabled = 1;
4001                 }
4002         }
4003
4004         return 0;
4005 }
4006
4007 #else
4008
4009 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4010                                 struct usb_device *udev, int enable)
4011 {
4012         return 0;
4013 }
4014
4015 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4016 {
4017         return 0;
4018 }
4019
4020 #endif /* CONFIG_USB_SUSPEND */
4021
4022 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4023  * internal data structures for the device.
4024  */
4025 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4026                         struct usb_tt *tt, gfp_t mem_flags)
4027 {
4028         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4029         struct xhci_virt_device *vdev;
4030         struct xhci_command *config_cmd;
4031         struct xhci_input_control_ctx *ctrl_ctx;
4032         struct xhci_slot_ctx *slot_ctx;
4033         unsigned long flags;
4034         unsigned think_time;
4035         int ret;
4036
4037         /* Ignore root hubs */
4038         if (!hdev->parent)
4039                 return 0;
4040
4041         vdev = xhci->devs[hdev->slot_id];
4042         if (!vdev) {
4043                 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4044                 return -EINVAL;
4045         }
4046         config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4047         if (!config_cmd) {
4048                 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4049                 return -ENOMEM;
4050         }
4051
4052         spin_lock_irqsave(&xhci->lock, flags);
4053         if (hdev->speed == USB_SPEED_HIGH &&
4054                         xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4055                 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4056                 xhci_free_command(xhci, config_cmd);
4057                 spin_unlock_irqrestore(&xhci->lock, flags);
4058                 return -ENOMEM;
4059         }
4060
4061         xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4062         ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4063         ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4064         slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4065         slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4066         if (tt->multi)
4067                 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4068         if (xhci->hci_version > 0x95) {
4069                 xhci_dbg(xhci, "xHCI version %x needs hub "
4070                                 "TT think time and number of ports\n",
4071                                 (unsigned int) xhci->hci_version);
4072                 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4073                 /* Set TT think time - convert from ns to FS bit times.
4074                  * 0 = 8 FS bit times, 1 = 16 FS bit times,
4075                  * 2 = 24 FS bit times, 3 = 32 FS bit times.
4076                  *
4077                  * xHCI 1.0: this field shall be 0 if the device is not a
4078                  * High-spped hub.
4079                  */
4080                 think_time = tt->think_time;
4081                 if (think_time != 0)
4082                         think_time = (think_time / 666) - 1;
4083                 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4084                         slot_ctx->tt_info |=
4085                                 cpu_to_le32(TT_THINK_TIME(think_time));
4086         } else {
4087                 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4088                                 "TT think time or number of ports\n",
4089                                 (unsigned int) xhci->hci_version);
4090         }
4091         slot_ctx->dev_state = 0;
4092         spin_unlock_irqrestore(&xhci->lock, flags);
4093
4094         xhci_dbg(xhci, "Set up %s for hub device.\n",
4095                         (xhci->hci_version > 0x95) ?
4096                         "configure endpoint" : "evaluate context");
4097         xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4098         xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4099
4100         /* Issue and wait for the configure endpoint or
4101          * evaluate context command.
4102          */
4103         if (xhci->hci_version > 0x95)
4104                 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4105                                 false, false);
4106         else
4107                 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4108                                 true, false);
4109
4110         xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4111         xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4112
4113         xhci_free_command(xhci, config_cmd);
4114         return ret;
4115 }
4116
4117 int xhci_get_frame(struct usb_hcd *hcd)
4118 {
4119         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4120         /* EHCI mods by the periodic size.  Why? */
4121         return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4122 }
4123
4124 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4125 {
4126         struct xhci_hcd         *xhci;
4127         struct device           *dev = hcd->self.controller;
4128         int                     retval;
4129         u32                     temp;
4130
4131         hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
4132
4133         if (usb_hcd_is_primary_hcd(hcd)) {
4134                 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4135                 if (!xhci)
4136                         return -ENOMEM;
4137                 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4138                 xhci->main_hcd = hcd;
4139                 /* Mark the first roothub as being USB 2.0.
4140                  * The xHCI driver will register the USB 3.0 roothub.
4141                  */
4142                 hcd->speed = HCD_USB2;
4143                 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4144                 /*
4145                  * USB 2.0 roothub under xHCI has an integrated TT,
4146                  * (rate matching hub) as opposed to having an OHCI/UHCI
4147                  * companion controller.
4148                  */
4149                 hcd->has_tt = 1;
4150         } else {
4151                 /* xHCI private pointer was set in xhci_pci_probe for the second
4152                  * registered roothub.
4153                  */
4154                 xhci = hcd_to_xhci(hcd);
4155                 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4156                 if (HCC_64BIT_ADDR(temp)) {
4157                         xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4158                         dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4159                 } else {
4160                         dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4161                 }
4162                 return 0;
4163         }
4164
4165         xhci->cap_regs = hcd->regs;
4166         xhci->op_regs = hcd->regs +
4167                 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4168         xhci->run_regs = hcd->regs +
4169                 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4170         /* Cache read-only capability registers */
4171         xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4172         xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4173         xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4174         xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4175         xhci->hci_version = HC_VERSION(xhci->hcc_params);
4176         xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4177         xhci_print_registers(xhci);
4178
4179         get_quirks(dev, xhci);
4180
4181         /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4182          * success event after a short transfer. This quirk will ignore such
4183          * spurious event.
4184          */
4185         if (xhci->hci_version > 0x96)
4186                 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4187
4188         /* Make sure the HC is halted. */
4189         retval = xhci_halt(xhci);
4190         if (retval)
4191                 goto error;
4192
4193         xhci_dbg(xhci, "Resetting HCD\n");
4194         /* Reset the internal HC memory state and registers. */
4195         retval = xhci_reset(xhci);
4196         if (retval)
4197                 goto error;
4198         xhci_dbg(xhci, "Reset complete\n");
4199
4200         temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4201         if (HCC_64BIT_ADDR(temp)) {
4202                 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4203                 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4204         } else {
4205                 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4206         }
4207
4208         xhci_dbg(xhci, "Calling HCD init\n");
4209         /* Initialize HCD and host controller data structures. */
4210         retval = xhci_init(hcd);
4211         if (retval)
4212                 goto error;
4213         xhci_dbg(xhci, "Called HCD init\n");
4214         return 0;
4215 error:
4216         kfree(xhci);
4217         return retval;
4218 }
4219
4220 MODULE_DESCRIPTION(DRIVER_DESC);
4221 MODULE_AUTHOR(DRIVER_AUTHOR);
4222 MODULE_LICENSE("GPL");
4223
4224 static int __init xhci_hcd_init(void)
4225 {
4226         int retval;
4227
4228         retval = xhci_register_pci();
4229         if (retval < 0) {
4230                 printk(KERN_DEBUG "Problem registering PCI driver.");
4231                 return retval;
4232         }
4233         /*
4234          * Check the compiler generated sizes of structures that must be laid
4235          * out in specific ways for hardware access.
4236          */
4237         BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4238         BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4239         BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4240         /* xhci_device_control has eight fields, and also
4241          * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4242          */
4243         BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4244         BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4245         BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4246         BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4247         BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4248         /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4249         BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4250         BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4251         return 0;
4252 }
4253 module_init(xhci_hcd_init);
4254
4255 static void __exit xhci_hcd_cleanup(void)
4256 {
4257         xhci_unregister_pci();
4258 }
4259 module_exit(xhci_hcd_cleanup);