xhci: give command abortion one more chance before killing xhci
[pandora-kernel.git] / drivers / usb / host / xhci-ring.c
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
27  *    Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
32  *    least one free TRB in the ring.  This is useful if you want to turn that
33  *    into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  *    link TRB, then load the pointer with the address in the link TRB.  If the
36  *    link TRB had its toggle bit set, you may need to update the ring cycle
37  *    state (see cycle bit rules).  You may have to do this multiple times
38  *    until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  *    equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  *    in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  *    in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  *    Update enqueue pointer between each write (which may update the ring
52  *    cycle state).
53  * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
54  *    and endpoint rings.  If HC is the producer for the event ring,
55  *    and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
59  *    the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  *    continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer.  SW is the consumer for the event ring, and it
63  *   updates event ring dequeue pointer.  HC is the consumer for the command and
64  *   endpoint rings; it generates events on the event ring for these.
65  */
66
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70
71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72                 struct xhci_virt_device *virt_dev,
73                 struct xhci_event_cmd *event);
74
75 /*
76  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77  * address of the TRB.
78  */
79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80                 union xhci_trb *trb)
81 {
82         unsigned long segment_offset;
83
84         if (!seg || !trb || trb < seg->trbs)
85                 return 0;
86         /* offset in TRBs */
87         segment_offset = trb - seg->trbs;
88         if (segment_offset >= TRBS_PER_SEGMENT)
89                 return 0;
90         return seg->dma + (segment_offset * sizeof(*trb));
91 }
92
93 /* Does this link TRB point to the first segment in a ring,
94  * or was the previous TRB the last TRB on the last segment in the ERST?
95  */
96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97                 struct xhci_segment *seg, union xhci_trb *trb)
98 {
99         if (ring == xhci->event_ring)
100                 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101                         (seg->next == xhci->event_ring->first_seg);
102         else
103                 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 }
105
106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107  * segment?  I.e. would the updated event TRB pointer step off the end of the
108  * event seg?
109  */
110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111                 struct xhci_segment *seg, union xhci_trb *trb)
112 {
113         if (ring == xhci->event_ring)
114                 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115         else
116                 return TRB_TYPE_LINK_LE32(trb->link.control);
117 }
118
119 static int enqueue_is_link_trb(struct xhci_ring *ring)
120 {
121         struct xhci_link_trb *link = &ring->enqueue->link;
122         return TRB_TYPE_LINK_LE32(link->control);
123 }
124
125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void next_trb(struct xhci_hcd *xhci,
130                 struct xhci_ring *ring,
131                 struct xhci_segment **seg,
132                 union xhci_trb **trb)
133 {
134         if (last_trb(xhci, ring, *seg, *trb)) {
135                 *seg = (*seg)->next;
136                 *trb = ((*seg)->trbs);
137         } else {
138                 (*trb)++;
139         }
140 }
141
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
145  */
146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
147 {
148         unsigned long long addr;
149
150         ring->deq_updates++;
151
152         do {
153                 /*
154                  * Update the dequeue pointer further if that was a link TRB or
155                  * we're at the end of an event ring segment (which doesn't have
156                  * link TRBS)
157                  */
158                 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
159                         if (consumer && last_trb_on_last_seg(xhci, ring,
160                                                 ring->deq_seg, ring->dequeue)) {
161                                 if (!in_interrupt())
162                                         xhci_dbg(xhci, "Toggle cycle state "
163                                                         "for ring %p = %i\n",
164                                                         ring,
165                                                         (unsigned int)
166                                                         ring->cycle_state);
167                                 ring->cycle_state = (ring->cycle_state ? 0 : 1);
168                         }
169                         ring->deq_seg = ring->deq_seg->next;
170                         ring->dequeue = ring->deq_seg->trbs;
171                 } else {
172                         ring->dequeue++;
173                 }
174         } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
175
176         addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
177 }
178
179 /*
180  * See Cycle bit rules. SW is the consumer for the event ring only.
181  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
182  *
183  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
184  * chain bit is set), then set the chain bit in all the following link TRBs.
185  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
186  * have their chain bit cleared (so that each Link TRB is a separate TD).
187  *
188  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
189  * set, but other sections talk about dealing with the chain bit set.  This was
190  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
191  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
192  *
193  * @more_trbs_coming:   Will you enqueue more TRBs before calling
194  *                      prepare_transfer()?
195  */
196 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
197                 bool consumer, bool more_trbs_coming, bool isoc)
198 {
199         u32 chain;
200         union xhci_trb *next;
201         unsigned long long addr;
202
203         chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
204         next = ++(ring->enqueue);
205
206         ring->enq_updates++;
207         /* Update the dequeue pointer further if that was a link TRB or we're at
208          * the end of an event ring segment (which doesn't have link TRBS)
209          */
210         while (last_trb(xhci, ring, ring->enq_seg, next)) {
211                 if (!consumer) {
212                         if (ring != xhci->event_ring) {
213                                 /*
214                                  * If the caller doesn't plan on enqueueing more
215                                  * TDs before ringing the doorbell, then we
216                                  * don't want to give the link TRB to the
217                                  * hardware just yet.  We'll give the link TRB
218                                  * back in prepare_ring() just before we enqueue
219                                  * the TD at the top of the ring.
220                                  */
221                                 if (!chain && !more_trbs_coming)
222                                         break;
223
224                                 /* If we're not dealing with 0.95 hardware or
225                                  * isoc rings on AMD 0.96 host,
226                                  * carry over the chain bit of the previous TRB
227                                  * (which may mean the chain bit is cleared).
228                                  */
229                                 if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
230                                                 && !xhci_link_trb_quirk(xhci)) {
231                                         next->link.control &=
232                                                 cpu_to_le32(~TRB_CHAIN);
233                                         next->link.control |=
234                                                 cpu_to_le32(chain);
235                                 }
236                                 /* Give this link TRB to the hardware */
237                                 wmb();
238                                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
239                         }
240                         /* Toggle the cycle bit after the last ring segment. */
241                         if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
242                                 ring->cycle_state = (ring->cycle_state ? 0 : 1);
243                                 if (!in_interrupt())
244                                         xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
245                                                         ring,
246                                                         (unsigned int) ring->cycle_state);
247                         }
248                 }
249                 ring->enq_seg = ring->enq_seg->next;
250                 ring->enqueue = ring->enq_seg->trbs;
251                 next = ring->enqueue;
252         }
253         addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
254 }
255
256 /*
257  * Check to see if there's room to enqueue num_trbs on the ring.  See rules
258  * above.
259  * FIXME: this would be simpler and faster if we just kept track of the number
260  * of free TRBs in a ring.
261  */
262 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
263                 unsigned int num_trbs)
264 {
265         int i;
266         union xhci_trb *enq = ring->enqueue;
267         struct xhci_segment *enq_seg = ring->enq_seg;
268         struct xhci_segment *cur_seg;
269         unsigned int left_on_ring;
270
271         /* If we are currently pointing to a link TRB, advance the
272          * enqueue pointer before checking for space */
273         while (last_trb(xhci, ring, enq_seg, enq)) {
274                 enq_seg = enq_seg->next;
275                 enq = enq_seg->trbs;
276         }
277
278         /* Check if ring is empty */
279         if (enq == ring->dequeue) {
280                 /* Can't use link trbs */
281                 left_on_ring = TRBS_PER_SEGMENT - 1;
282                 for (cur_seg = enq_seg->next; cur_seg != enq_seg;
283                                 cur_seg = cur_seg->next)
284                         left_on_ring += TRBS_PER_SEGMENT - 1;
285
286                 /* Always need one TRB free in the ring. */
287                 left_on_ring -= 1;
288                 if (num_trbs > left_on_ring) {
289                         xhci_warn(xhci, "Not enough room on ring; "
290                                         "need %u TRBs, %u TRBs left\n",
291                                         num_trbs, left_on_ring);
292                         return 0;
293                 }
294                 return 1;
295         }
296         /* Make sure there's an extra empty TRB available */
297         for (i = 0; i <= num_trbs; ++i) {
298                 if (enq == ring->dequeue)
299                         return 0;
300                 enq++;
301                 while (last_trb(xhci, ring, enq_seg, enq)) {
302                         enq_seg = enq_seg->next;
303                         enq = enq_seg->trbs;
304                 }
305         }
306         return 1;
307 }
308
309 /* Ring the host controller doorbell after placing a command on the ring */
310 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
311 {
312         if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
313                 return;
314
315         xhci_dbg(xhci, "// Ding dong!\n");
316         xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
317         /* Flush PCI posted writes */
318         xhci_readl(xhci, &xhci->dba->doorbell[0]);
319 }
320
321 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
322 {
323         u64 temp_64;
324         int ret;
325
326         xhci_dbg(xhci, "Abort command ring\n");
327
328         if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
329                 xhci_dbg(xhci, "The command ring isn't running, "
330                                 "Have the command ring been stopped?\n");
331                 return 0;
332         }
333
334         temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
335         if (!(temp_64 & CMD_RING_RUNNING)) {
336                 xhci_dbg(xhci, "Command ring had been stopped\n");
337                 return 0;
338         }
339         xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
340         xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
341                         &xhci->op_regs->cmd_ring);
342
343         /* Section 4.6.1.2 of xHCI 1.0 spec says software should
344          * time the completion od all xHCI commands, including
345          * the Command Abort operation. If software doesn't see
346          * CRR negated in a timely manner (e.g. longer than 5
347          * seconds), then it should assume that the there are
348          * larger problems with the xHC and assert HCRST.
349          */
350         ret = handshake(xhci, &xhci->op_regs->cmd_ring,
351                         CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
352         if (ret < 0) {
353                 /* we are about to kill xhci, give it one more chance */
354                 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
355                               &xhci->op_regs->cmd_ring);
356                 udelay(1000);
357                 ret = handshake(xhci, &xhci->op_regs->cmd_ring,
358                                 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
359                 if (ret == 0)
360                         return 0;
361
362                 xhci_err(xhci, "Stopped the command ring failed, "
363                                 "maybe the host is dead\n");
364                 xhci->xhc_state |= XHCI_STATE_DYING;
365                 xhci_quiesce(xhci);
366                 xhci_halt(xhci);
367                 return -ESHUTDOWN;
368         }
369
370         return 0;
371 }
372
373 static int xhci_queue_cd(struct xhci_hcd *xhci,
374                 struct xhci_command *command,
375                 union xhci_trb *cmd_trb)
376 {
377         struct xhci_cd *cd;
378         cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
379         if (!cd)
380                 return -ENOMEM;
381         INIT_LIST_HEAD(&cd->cancel_cmd_list);
382
383         cd->command = command;
384         cd->cmd_trb = cmd_trb;
385         list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
386
387         return 0;
388 }
389
390 /*
391  * Cancel the command which has issue.
392  *
393  * Some commands may hang due to waiting for acknowledgement from
394  * usb device. It is outside of the xHC's ability to control and
395  * will cause the command ring is blocked. When it occurs software
396  * should intervene to recover the command ring.
397  * See Section 4.6.1.1 and 4.6.1.2
398  */
399 int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
400                 union xhci_trb *cmd_trb)
401 {
402         int retval = 0;
403         unsigned long flags;
404
405         spin_lock_irqsave(&xhci->lock, flags);
406
407         if (xhci->xhc_state & XHCI_STATE_DYING) {
408                 xhci_warn(xhci, "Abort the command ring,"
409                                 " but the xHCI is dead.\n");
410                 retval = -ESHUTDOWN;
411                 goto fail;
412         }
413
414         /* queue the cmd desriptor to cancel_cmd_list */
415         retval = xhci_queue_cd(xhci, command, cmd_trb);
416         if (retval) {
417                 xhci_warn(xhci, "Queuing command descriptor failed.\n");
418                 goto fail;
419         }
420
421         /* abort command ring */
422         retval = xhci_abort_cmd_ring(xhci);
423         if (retval) {
424                 xhci_err(xhci, "Abort command ring failed\n");
425                 if (unlikely(retval == -ESHUTDOWN)) {
426                         spin_unlock_irqrestore(&xhci->lock, flags);
427                         usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
428                         xhci_dbg(xhci, "xHCI host controller is dead.\n");
429                         return retval;
430                 }
431         }
432
433 fail:
434         spin_unlock_irqrestore(&xhci->lock, flags);
435         return retval;
436 }
437
438 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
439                 unsigned int slot_id,
440                 unsigned int ep_index,
441                 unsigned int stream_id)
442 {
443         __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
444         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
445         unsigned int ep_state = ep->ep_state;
446
447         /* Don't ring the doorbell for this endpoint if there are pending
448          * cancellations because we don't want to interrupt processing.
449          * We don't want to restart any stream rings if there's a set dequeue
450          * pointer command pending because the device can choose to start any
451          * stream once the endpoint is on the HW schedule.
452          * FIXME - check all the stream rings for pending cancellations.
453          */
454         if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
455             (ep_state & EP_HALTED))
456                 return;
457         xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
458         /* The CPU has better things to do at this point than wait for a
459          * write-posting flush.  It'll get there soon enough.
460          */
461 }
462
463 /* Ring the doorbell for any rings with pending URBs */
464 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
465                 unsigned int slot_id,
466                 unsigned int ep_index)
467 {
468         unsigned int stream_id;
469         struct xhci_virt_ep *ep;
470
471         ep = &xhci->devs[slot_id]->eps[ep_index];
472
473         /* A ring has pending URBs if its TD list is not empty */
474         if (!(ep->ep_state & EP_HAS_STREAMS)) {
475                 if (ep->ring && !(list_empty(&ep->ring->td_list)))
476                         xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
477                 return;
478         }
479
480         for (stream_id = 1; stream_id < ep->stream_info->num_streams;
481                         stream_id++) {
482                 struct xhci_stream_info *stream_info = ep->stream_info;
483                 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
484                         xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
485                                                 stream_id);
486         }
487 }
488
489 /*
490  * Find the segment that trb is in.  Start searching in start_seg.
491  * If we must move past a segment that has a link TRB with a toggle cycle state
492  * bit set, then we will toggle the value pointed at by cycle_state.
493  */
494 static struct xhci_segment *find_trb_seg(
495                 struct xhci_segment *start_seg,
496                 union xhci_trb  *trb, int *cycle_state)
497 {
498         struct xhci_segment *cur_seg = start_seg;
499         struct xhci_generic_trb *generic_trb;
500
501         while (cur_seg->trbs > trb ||
502                         &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
503                 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
504                 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
505                         *cycle_state ^= 0x1;
506                 cur_seg = cur_seg->next;
507                 if (cur_seg == start_seg)
508                         /* Looped over the entire list.  Oops! */
509                         return NULL;
510         }
511         return cur_seg;
512 }
513
514
515 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
516                 unsigned int slot_id, unsigned int ep_index,
517                 unsigned int stream_id)
518 {
519         struct xhci_virt_ep *ep;
520
521         ep = &xhci->devs[slot_id]->eps[ep_index];
522         /* Common case: no streams */
523         if (!(ep->ep_state & EP_HAS_STREAMS))
524                 return ep->ring;
525
526         if (stream_id == 0) {
527                 xhci_warn(xhci,
528                                 "WARN: Slot ID %u, ep index %u has streams, "
529                                 "but URB has no stream ID.\n",
530                                 slot_id, ep_index);
531                 return NULL;
532         }
533
534         if (stream_id < ep->stream_info->num_streams)
535                 return ep->stream_info->stream_rings[stream_id];
536
537         xhci_warn(xhci,
538                         "WARN: Slot ID %u, ep index %u has "
539                         "stream IDs 1 to %u allocated, "
540                         "but stream ID %u is requested.\n",
541                         slot_id, ep_index,
542                         ep->stream_info->num_streams - 1,
543                         stream_id);
544         return NULL;
545 }
546
547 /* Get the right ring for the given URB.
548  * If the endpoint supports streams, boundary check the URB's stream ID.
549  * If the endpoint doesn't support streams, return the singular endpoint ring.
550  */
551 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
552                 struct urb *urb)
553 {
554         return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
555                 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
556 }
557
558 /*
559  * Move the xHC's endpoint ring dequeue pointer past cur_td.
560  * Record the new state of the xHC's endpoint ring dequeue segment,
561  * dequeue pointer, and new consumer cycle state in state.
562  * Update our internal representation of the ring's dequeue pointer.
563  *
564  * We do this in three jumps:
565  *  - First we update our new ring state to be the same as when the xHC stopped.
566  *  - Then we traverse the ring to find the segment that contains
567  *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
568  *    any link TRBs with the toggle cycle bit set.
569  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
570  *    if we've moved it past a link TRB with the toggle cycle bit set.
571  *
572  * Some of the uses of xhci_generic_trb are grotty, but if they're done
573  * with correct __le32 accesses they should work fine.  Only users of this are
574  * in here.
575  */
576 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
577                 unsigned int slot_id, unsigned int ep_index,
578                 unsigned int stream_id, struct xhci_td *cur_td,
579                 struct xhci_dequeue_state *state)
580 {
581         struct xhci_virt_device *dev = xhci->devs[slot_id];
582         struct xhci_virt_ep *ep = &dev->eps[ep_index];
583         struct xhci_ring *ep_ring;
584         struct xhci_segment *new_seg;
585         union xhci_trb *new_deq;
586         dma_addr_t addr;
587         u64 hw_dequeue;
588         bool cycle_found = false;
589         bool td_last_trb_found = false;
590
591         ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
592                         ep_index, stream_id);
593         if (!ep_ring) {
594                 xhci_warn(xhci, "WARN can't find new dequeue state "
595                                 "for invalid stream ID %u.\n",
596                                 stream_id);
597                 return;
598         }
599
600         /* Dig out the cycle state saved by the xHC during the stop ep cmd */
601         xhci_dbg(xhci, "Finding endpoint context\n");
602         /* 4.6.9 the css flag is written to the stream context for streams */
603         if (ep->ep_state & EP_HAS_STREAMS) {
604                 struct xhci_stream_ctx *ctx =
605                         &ep->stream_info->stream_ctx_array[stream_id];
606                 hw_dequeue = le64_to_cpu(ctx->stream_ring);
607         } else {
608                 struct xhci_ep_ctx *ep_ctx
609                         = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
610                 hw_dequeue = le64_to_cpu(ep_ctx->deq);
611         }
612
613         new_seg = ep_ring->deq_seg;
614         new_deq = ep_ring->dequeue;
615         state->new_cycle_state = hw_dequeue & 0x1;
616
617         /*
618          * We want to find the pointer, segment and cycle state of the new trb
619          * (the one after current TD's last_trb). We know the cycle state at
620          * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
621          * found.
622          */
623         do {
624                 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
625                     == (dma_addr_t)(hw_dequeue & ~0xf)) {
626                         cycle_found = true;
627                         if (td_last_trb_found)
628                                 break;
629                 }
630                 if (new_deq == cur_td->last_trb)
631                         td_last_trb_found = true;
632
633                 if (cycle_found &&
634                     TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
635                     new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
636                         state->new_cycle_state ^= 0x1;
637
638                 next_trb(xhci, ep_ring, &new_seg, &new_deq);
639
640                 /* Search wrapped around, bail out */
641                 if (new_deq == ep->ring->dequeue) {
642                         xhci_err(xhci, "Error: Failed finding new dequeue state\n");
643                         state->new_deq_seg = NULL;
644                         state->new_deq_ptr = NULL;
645                         return;
646                 }
647
648         } while (!cycle_found || !td_last_trb_found);
649
650         state->new_deq_seg = new_seg;
651         state->new_deq_ptr = new_deq;
652
653         /* Don't update the ring cycle state for the producer (us). */
654         xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
655
656         xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
657                         state->new_deq_seg);
658         addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
659         xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
660                         (unsigned long long) addr);
661 }
662
663 /* flip_cycle means flip the cycle bit of all but the first and last TRB.
664  * (The last TRB actually points to the ring enqueue pointer, which is not part
665  * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
666  */
667 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
668                 struct xhci_td *cur_td, bool flip_cycle)
669 {
670         struct xhci_segment *cur_seg;
671         union xhci_trb *cur_trb;
672
673         for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
674                         true;
675                         next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
676                 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
677                         /* Unchain any chained Link TRBs, but
678                          * leave the pointers intact.
679                          */
680                         cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
681                         /* Flip the cycle bit (link TRBs can't be the first
682                          * or last TRB).
683                          */
684                         if (flip_cycle)
685                                 cur_trb->generic.field[3] ^=
686                                         cpu_to_le32(TRB_CYCLE);
687                         xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
688                         xhci_dbg(xhci, "Address = %p (0x%llx dma); "
689                                         "in seg %p (0x%llx dma)\n",
690                                         cur_trb,
691                                         (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
692                                         cur_seg,
693                                         (unsigned long long)cur_seg->dma);
694                 } else {
695                         cur_trb->generic.field[0] = 0;
696                         cur_trb->generic.field[1] = 0;
697                         cur_trb->generic.field[2] = 0;
698                         /* Preserve only the cycle bit of this TRB */
699                         cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
700                         /* Flip the cycle bit except on the first or last TRB */
701                         if (flip_cycle && cur_trb != cur_td->first_trb &&
702                                         cur_trb != cur_td->last_trb)
703                                 cur_trb->generic.field[3] ^=
704                                         cpu_to_le32(TRB_CYCLE);
705                         cur_trb->generic.field[3] |= cpu_to_le32(
706                                 TRB_TYPE(TRB_TR_NOOP));
707                         xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
708                                         "in seg %p (0x%llx dma)\n",
709                                         cur_trb,
710                                         (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
711                                         cur_seg,
712                                         (unsigned long long)cur_seg->dma);
713                 }
714                 if (cur_trb == cur_td->last_trb)
715                         break;
716         }
717 }
718
719 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
720                 unsigned int ep_index, unsigned int stream_id,
721                 struct xhci_segment *deq_seg,
722                 union xhci_trb *deq_ptr, u32 cycle_state);
723
724 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
725                 unsigned int slot_id, unsigned int ep_index,
726                 unsigned int stream_id,
727                 struct xhci_dequeue_state *deq_state)
728 {
729         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
730
731         xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
732                         "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
733                         deq_state->new_deq_seg,
734                         (unsigned long long)deq_state->new_deq_seg->dma,
735                         deq_state->new_deq_ptr,
736                         (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
737                         deq_state->new_cycle_state);
738         queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
739                         deq_state->new_deq_seg,
740                         deq_state->new_deq_ptr,
741                         (u32) deq_state->new_cycle_state);
742         /* Stop the TD queueing code from ringing the doorbell until
743          * this command completes.  The HC won't set the dequeue pointer
744          * if the ring is running, and ringing the doorbell starts the
745          * ring running.
746          */
747         ep->ep_state |= SET_DEQ_PENDING;
748 }
749
750 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
751                 struct xhci_virt_ep *ep)
752 {
753         ep->ep_state &= ~EP_HALT_PENDING;
754         /* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
755          * timer is running on another CPU, we don't decrement stop_cmds_pending
756          * (since we didn't successfully stop the watchdog timer).
757          */
758         if (del_timer(&ep->stop_cmd_timer))
759                 ep->stop_cmds_pending--;
760 }
761
762 /* Must be called with xhci->lock held in interrupt context */
763 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
764                 struct xhci_td *cur_td, int status, char *adjective)
765 {
766         struct usb_hcd *hcd;
767         struct urb      *urb;
768         struct urb_priv *urb_priv;
769
770         urb = cur_td->urb;
771         urb_priv = urb->hcpriv;
772         urb_priv->td_cnt++;
773         hcd = bus_to_hcd(urb->dev->bus);
774
775         /* Only giveback urb when this is the last td in urb */
776         if (urb_priv->td_cnt == urb_priv->length) {
777                 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
778                         xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
779                         if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
780                                 if (xhci->quirks & XHCI_AMD_PLL_FIX)
781                                         usb_amd_quirk_pll_enable();
782                         }
783                 }
784                 usb_hcd_unlink_urb_from_ep(hcd, urb);
785
786                 spin_unlock(&xhci->lock);
787                 usb_hcd_giveback_urb(hcd, urb, status);
788                 xhci_urb_free_priv(xhci, urb_priv);
789                 spin_lock(&xhci->lock);
790         }
791 }
792
793 /*
794  * When we get a command completion for a Stop Endpoint Command, we need to
795  * unlink any cancelled TDs from the ring.  There are two ways to do that:
796  *
797  *  1. If the HW was in the middle of processing the TD that needs to be
798  *     cancelled, then we must move the ring's dequeue pointer past the last TRB
799  *     in the TD with a Set Dequeue Pointer Command.
800  *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
801  *     bit cleared) so that the HW will skip over them.
802  */
803 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
804                 union xhci_trb *trb, struct xhci_event_cmd *event)
805 {
806         unsigned int slot_id;
807         unsigned int ep_index;
808         struct xhci_virt_device *virt_dev;
809         struct xhci_ring *ep_ring;
810         struct xhci_virt_ep *ep;
811         struct list_head *entry;
812         struct xhci_td *cur_td = NULL;
813         struct xhci_td *last_unlinked_td;
814
815         struct xhci_dequeue_state deq_state;
816
817         if (unlikely(TRB_TO_SUSPEND_PORT(
818                              le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
819                 slot_id = TRB_TO_SLOT_ID(
820                         le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
821                 virt_dev = xhci->devs[slot_id];
822                 if (virt_dev)
823                         handle_cmd_in_cmd_wait_list(xhci, virt_dev,
824                                 event);
825                 else
826                         xhci_warn(xhci, "Stop endpoint command "
827                                 "completion for disabled slot %u\n",
828                                 slot_id);
829                 return;
830         }
831
832         memset(&deq_state, 0, sizeof(deq_state));
833         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
834         ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
835         ep = &xhci->devs[slot_id]->eps[ep_index];
836
837         if (list_empty(&ep->cancelled_td_list)) {
838                 xhci_stop_watchdog_timer_in_irq(xhci, ep);
839                 ep->stopped_td = NULL;
840                 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
841                 return;
842         }
843
844         /* Fix up the ep ring first, so HW stops executing cancelled TDs.
845          * We have the xHCI lock, so nothing can modify this list until we drop
846          * it.  We're also in the event handler, so we can't get re-interrupted
847          * if another Stop Endpoint command completes
848          */
849         list_for_each(entry, &ep->cancelled_td_list) {
850                 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
851                 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
852                                 cur_td->first_trb,
853                                 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
854                 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
855                 if (!ep_ring) {
856                         /* This shouldn't happen unless a driver is mucking
857                          * with the stream ID after submission.  This will
858                          * leave the TD on the hardware ring, and the hardware
859                          * will try to execute it, and may access a buffer
860                          * that has already been freed.  In the best case, the
861                          * hardware will execute it, and the event handler will
862                          * ignore the completion event for that TD, since it was
863                          * removed from the td_list for that endpoint.  In
864                          * short, don't muck with the stream ID after
865                          * submission.
866                          */
867                         xhci_warn(xhci, "WARN Cancelled URB %p "
868                                         "has invalid stream ID %u.\n",
869                                         cur_td->urb,
870                                         cur_td->urb->stream_id);
871                         goto remove_finished_td;
872                 }
873                 /*
874                  * If we stopped on the TD we need to cancel, then we have to
875                  * move the xHC endpoint ring dequeue pointer past this TD.
876                  */
877                 if (cur_td == ep->stopped_td)
878                         xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
879                                         cur_td->urb->stream_id,
880                                         cur_td, &deq_state);
881                 else
882                         td_to_noop(xhci, ep_ring, cur_td, false);
883 remove_finished_td:
884                 /*
885                  * The event handler won't see a completion for this TD anymore,
886                  * so remove it from the endpoint ring's TD list.  Keep it in
887                  * the cancelled TD list for URB completion later.
888                  */
889                 list_del_init(&cur_td->td_list);
890         }
891         last_unlinked_td = cur_td;
892         xhci_stop_watchdog_timer_in_irq(xhci, ep);
893
894         /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
895         if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
896                 xhci_queue_new_dequeue_state(xhci,
897                                 slot_id, ep_index,
898                                 ep->stopped_td->urb->stream_id,
899                                 &deq_state);
900                 xhci_ring_cmd_db(xhci);
901         } else {
902                 /* Otherwise ring the doorbell(s) to restart queued transfers */
903                 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
904         }
905
906         /* Clear stopped_td if endpoint is not halted */
907         if (!(ep->ep_state & EP_HALTED))
908                 ep->stopped_td = NULL;
909
910         /*
911          * Drop the lock and complete the URBs in the cancelled TD list.
912          * New TDs to be cancelled might be added to the end of the list before
913          * we can complete all the URBs for the TDs we already unlinked.
914          * So stop when we've completed the URB for the last TD we unlinked.
915          */
916         do {
917                 cur_td = list_entry(ep->cancelled_td_list.next,
918                                 struct xhci_td, cancelled_td_list);
919                 list_del_init(&cur_td->cancelled_td_list);
920
921                 /* Clean up the cancelled URB */
922                 /* Doesn't matter what we pass for status, since the core will
923                  * just overwrite it (because the URB has been unlinked).
924                  */
925                 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
926
927                 /* Stop processing the cancelled list if the watchdog timer is
928                  * running.
929                  */
930                 if (xhci->xhc_state & XHCI_STATE_DYING)
931                         return;
932         } while (cur_td != last_unlinked_td);
933
934         /* Return to the event handler with xhci->lock re-acquired */
935 }
936
937 /* Watchdog timer function for when a stop endpoint command fails to complete.
938  * In this case, we assume the host controller is broken or dying or dead.  The
939  * host may still be completing some other events, so we have to be careful to
940  * let the event ring handler and the URB dequeueing/enqueueing functions know
941  * through xhci->state.
942  *
943  * The timer may also fire if the host takes a very long time to respond to the
944  * command, and the stop endpoint command completion handler cannot delete the
945  * timer before the timer function is called.  Another endpoint cancellation may
946  * sneak in before the timer function can grab the lock, and that may queue
947  * another stop endpoint command and add the timer back.  So we cannot use a
948  * simple flag to say whether there is a pending stop endpoint command for a
949  * particular endpoint.
950  *
951  * Instead we use a combination of that flag and a counter for the number of
952  * pending stop endpoint commands.  If the timer is the tail end of the last
953  * stop endpoint command, and the endpoint's command is still pending, we assume
954  * the host is dying.
955  */
956 void xhci_stop_endpoint_command_watchdog(unsigned long arg)
957 {
958         struct xhci_hcd *xhci;
959         struct xhci_virt_ep *ep;
960         struct xhci_virt_ep *temp_ep;
961         struct xhci_ring *ring;
962         struct xhci_td *cur_td;
963         int ret, i, j;
964         unsigned long flags;
965
966         ep = (struct xhci_virt_ep *) arg;
967         xhci = ep->xhci;
968
969         spin_lock_irqsave(&xhci->lock, flags);
970
971         ep->stop_cmds_pending--;
972         if (xhci->xhc_state & XHCI_STATE_DYING) {
973                 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
974                                 "xHCI as DYING, exiting.\n");
975                 spin_unlock_irqrestore(&xhci->lock, flags);
976                 return;
977         }
978         if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
979                 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
980                                 "exiting.\n");
981                 spin_unlock_irqrestore(&xhci->lock, flags);
982                 return;
983         }
984
985         xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
986         xhci_warn(xhci, "Assuming host is dying, halting host.\n");
987         /* Oops, HC is dead or dying or at least not responding to the stop
988          * endpoint command.
989          */
990         xhci->xhc_state |= XHCI_STATE_DYING;
991         /* Disable interrupts from the host controller and start halting it */
992         xhci_quiesce(xhci);
993         spin_unlock_irqrestore(&xhci->lock, flags);
994
995         ret = xhci_halt(xhci);
996
997         spin_lock_irqsave(&xhci->lock, flags);
998         if (ret < 0) {
999                 /* This is bad; the host is not responding to commands and it's
1000                  * not allowing itself to be halted.  At least interrupts are
1001                  * disabled. If we call usb_hc_died(), it will attempt to
1002                  * disconnect all device drivers under this host.  Those
1003                  * disconnect() methods will wait for all URBs to be unlinked,
1004                  * so we must complete them.
1005                  */
1006                 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
1007                 xhci_warn(xhci, "Completing active URBs anyway.\n");
1008                 /* We could turn all TDs on the rings to no-ops.  This won't
1009                  * help if the host has cached part of the ring, and is slow if
1010                  * we want to preserve the cycle bit.  Skip it and hope the host
1011                  * doesn't touch the memory.
1012                  */
1013         }
1014         for (i = 0; i < MAX_HC_SLOTS; i++) {
1015                 if (!xhci->devs[i])
1016                         continue;
1017                 for (j = 0; j < 31; j++) {
1018                         temp_ep = &xhci->devs[i]->eps[j];
1019                         ring = temp_ep->ring;
1020                         if (!ring)
1021                                 continue;
1022                         xhci_dbg(xhci, "Killing URBs for slot ID %u, "
1023                                         "ep index %u\n", i, j);
1024                         while (!list_empty(&ring->td_list)) {
1025                                 cur_td = list_first_entry(&ring->td_list,
1026                                                 struct xhci_td,
1027                                                 td_list);
1028                                 list_del_init(&cur_td->td_list);
1029                                 if (!list_empty(&cur_td->cancelled_td_list))
1030                                         list_del_init(&cur_td->cancelled_td_list);
1031                                 xhci_giveback_urb_in_irq(xhci, cur_td,
1032                                                 -ESHUTDOWN, "killed");
1033                         }
1034                         while (!list_empty(&temp_ep->cancelled_td_list)) {
1035                                 cur_td = list_first_entry(
1036                                                 &temp_ep->cancelled_td_list,
1037                                                 struct xhci_td,
1038                                                 cancelled_td_list);
1039                                 list_del_init(&cur_td->cancelled_td_list);
1040                                 xhci_giveback_urb_in_irq(xhci, cur_td,
1041                                                 -ESHUTDOWN, "killed");
1042                         }
1043                 }
1044         }
1045         spin_unlock_irqrestore(&xhci->lock, flags);
1046         xhci_dbg(xhci, "Calling usb_hc_died()\n");
1047         usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1048         xhci_dbg(xhci, "xHCI host controller is dead.\n");
1049 }
1050
1051 /*
1052  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1053  * we need to clear the set deq pending flag in the endpoint ring state, so that
1054  * the TD queueing code can ring the doorbell again.  We also need to ring the
1055  * endpoint doorbell to restart the ring, but only if there aren't more
1056  * cancellations pending.
1057  */
1058 static void handle_set_deq_completion(struct xhci_hcd *xhci,
1059                 struct xhci_event_cmd *event,
1060                 union xhci_trb *trb)
1061 {
1062         unsigned int slot_id;
1063         unsigned int ep_index;
1064         unsigned int stream_id;
1065         struct xhci_ring *ep_ring;
1066         struct xhci_virt_device *dev;
1067         struct xhci_ep_ctx *ep_ctx;
1068         struct xhci_slot_ctx *slot_ctx;
1069
1070         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1071         ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1072         stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1073         dev = xhci->devs[slot_id];
1074
1075         ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1076         if (!ep_ring) {
1077                 xhci_warn(xhci, "WARN Set TR deq ptr command for "
1078                                 "freed stream ID %u\n",
1079                                 stream_id);
1080                 /* XXX: Harmless??? */
1081                 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1082                 return;
1083         }
1084
1085         ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1086         slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1087
1088         if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
1089                 unsigned int ep_state;
1090                 unsigned int slot_state;
1091
1092                 switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
1093                 case COMP_TRB_ERR:
1094                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
1095                                         "of stream ID configuration\n");
1096                         break;
1097                 case COMP_CTX_STATE:
1098                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
1099                                         "to incorrect slot or ep state.\n");
1100                         ep_state = le32_to_cpu(ep_ctx->ep_info);
1101                         ep_state &= EP_STATE_MASK;
1102                         slot_state = le32_to_cpu(slot_ctx->dev_state);
1103                         slot_state = GET_SLOT_STATE(slot_state);
1104                         xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
1105                                         slot_state, ep_state);
1106                         break;
1107                 case COMP_EBADSLT:
1108                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
1109                                         "slot %u was not enabled.\n", slot_id);
1110                         break;
1111                 default:
1112                         xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
1113                                         "completion code of %u.\n",
1114                                   GET_COMP_CODE(le32_to_cpu(event->status)));
1115                         break;
1116                 }
1117                 /* OK what do we do now?  The endpoint state is hosed, and we
1118                  * should never get to this point if the synchronization between
1119                  * queueing, and endpoint state are correct.  This might happen
1120                  * if the device gets disconnected after we've finished
1121                  * cancelling URBs, which might not be an error...
1122                  */
1123         } else {
1124                 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
1125                          le64_to_cpu(ep_ctx->deq));
1126                 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1127                                          dev->eps[ep_index].queued_deq_ptr) ==
1128                     (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
1129                         /* Update the ring's dequeue segment and dequeue pointer
1130                          * to reflect the new position.
1131                          */
1132                         ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
1133                         ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
1134                 } else {
1135                         xhci_warn(xhci, "Mismatch between completed Set TR Deq "
1136                                         "Ptr command & xHCI internal state.\n");
1137                         xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1138                                         dev->eps[ep_index].queued_deq_seg,
1139                                         dev->eps[ep_index].queued_deq_ptr);
1140                 }
1141         }
1142
1143         dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1144         dev->eps[ep_index].queued_deq_seg = NULL;
1145         dev->eps[ep_index].queued_deq_ptr = NULL;
1146         /* Restart any rings with pending URBs */
1147         ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1148 }
1149
1150 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1151                 struct xhci_event_cmd *event,
1152                 union xhci_trb *trb)
1153 {
1154         int slot_id;
1155         unsigned int ep_index;
1156
1157         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1158         ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1159         /* This command will only fail if the endpoint wasn't halted,
1160          * but we don't care.
1161          */
1162         xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1163                  GET_COMP_CODE(le32_to_cpu(event->status)));
1164
1165         /* HW with the reset endpoint quirk needs to have a configure endpoint
1166          * command complete before the endpoint can be used.  Queue that here
1167          * because the HW can't handle two commands being queued in a row.
1168          */
1169         if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1170                 xhci_dbg(xhci, "Queueing configure endpoint command\n");
1171                 xhci_queue_configure_endpoint(xhci,
1172                                 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1173                                 false);
1174                 xhci_ring_cmd_db(xhci);
1175         } else {
1176                 /* Clear our internal halted state */
1177                 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1178         }
1179 }
1180
1181 /* Complete the command and detele it from the devcie's command queue.
1182  */
1183 static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1184                 struct xhci_command *command, u32 status)
1185 {
1186         command->status = status;
1187         list_del(&command->cmd_list);
1188         if (command->completion)
1189                 complete(command->completion);
1190         else
1191                 xhci_free_command(xhci, command);
1192 }
1193
1194
1195 /* Check to see if a command in the device's command queue matches this one.
1196  * Signal the completion or free the command, and return 1.  Return 0 if the
1197  * completed command isn't at the head of the command list.
1198  */
1199 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1200                 struct xhci_virt_device *virt_dev,
1201                 struct xhci_event_cmd *event)
1202 {
1203         struct xhci_command *command;
1204
1205         if (list_empty(&virt_dev->cmd_list))
1206                 return 0;
1207
1208         command = list_entry(virt_dev->cmd_list.next,
1209                         struct xhci_command, cmd_list);
1210         if (xhci->cmd_ring->dequeue != command->command_trb)
1211                 return 0;
1212
1213         xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1214                         GET_COMP_CODE(le32_to_cpu(event->status)));
1215         return 1;
1216 }
1217
1218 /*
1219  * Finding the command trb need to be cancelled and modifying it to
1220  * NO OP command. And if the command is in device's command wait
1221  * list, finishing and freeing it.
1222  *
1223  * If we can't find the command trb, we think it had already been
1224  * executed.
1225  */
1226 static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1227 {
1228         struct xhci_segment *cur_seg;
1229         union xhci_trb *cmd_trb;
1230         u32 cycle_state;
1231
1232         if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1233                 return;
1234
1235         /* find the current segment of command ring */
1236         cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1237                         xhci->cmd_ring->dequeue, &cycle_state);
1238
1239         if (!cur_seg) {
1240                 xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1241                                 xhci->cmd_ring->dequeue,
1242                                 (unsigned long long)
1243                                 xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1244                                         xhci->cmd_ring->dequeue));
1245                 xhci_debug_ring(xhci, xhci->cmd_ring);
1246                 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1247                 return;
1248         }
1249
1250         /* find the command trb matched by cd from command ring */
1251         for (cmd_trb = xhci->cmd_ring->dequeue;
1252                         cmd_trb != xhci->cmd_ring->enqueue;
1253                         next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1254                 /* If the trb is link trb, continue */
1255                 if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1256                         continue;
1257
1258                 if (cur_cd->cmd_trb == cmd_trb) {
1259
1260                         /* If the command in device's command list, we should
1261                          * finish it and free the command structure.
1262                          */
1263                         if (cur_cd->command)
1264                                 xhci_complete_cmd_in_cmd_wait_list(xhci,
1265                                         cur_cd->command, COMP_CMD_STOP);
1266
1267                         /* get cycle state from the origin command trb */
1268                         cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1269                                 & TRB_CYCLE;
1270
1271                         /* modify the command trb to NO OP command */
1272                         cmd_trb->generic.field[0] = 0;
1273                         cmd_trb->generic.field[1] = 0;
1274                         cmd_trb->generic.field[2] = 0;
1275                         cmd_trb->generic.field[3] = cpu_to_le32(
1276                                         TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1277                         break;
1278                 }
1279         }
1280 }
1281
1282 static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1283 {
1284         struct xhci_cd *cur_cd, *next_cd;
1285
1286         if (list_empty(&xhci->cancel_cmd_list))
1287                 return;
1288
1289         list_for_each_entry_safe(cur_cd, next_cd,
1290                         &xhci->cancel_cmd_list, cancel_cmd_list) {
1291                 xhci_cmd_to_noop(xhci, cur_cd);
1292                 list_del(&cur_cd->cancel_cmd_list);
1293                 kfree(cur_cd);
1294         }
1295 }
1296
1297 /*
1298  * traversing the cancel_cmd_list. If the command descriptor according
1299  * to cmd_trb is found, the function free it and return 1, otherwise
1300  * return 0.
1301  */
1302 static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1303                 union xhci_trb *cmd_trb)
1304 {
1305         struct xhci_cd *cur_cd, *next_cd;
1306
1307         if (list_empty(&xhci->cancel_cmd_list))
1308                 return 0;
1309
1310         list_for_each_entry_safe(cur_cd, next_cd,
1311                         &xhci->cancel_cmd_list, cancel_cmd_list) {
1312                 if (cur_cd->cmd_trb == cmd_trb) {
1313                         if (cur_cd->command)
1314                                 xhci_complete_cmd_in_cmd_wait_list(xhci,
1315                                         cur_cd->command, COMP_CMD_STOP);
1316                         list_del(&cur_cd->cancel_cmd_list);
1317                         kfree(cur_cd);
1318                         return 1;
1319                 }
1320         }
1321
1322         return 0;
1323 }
1324
1325 /*
1326  * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1327  * trb pointed by the command ring dequeue pointer is the trb we want to
1328  * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1329  * traverse the cancel_cmd_list to trun the all of the commands according
1330  * to command descriptor to NO-OP trb.
1331  */
1332 static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1333                 int cmd_trb_comp_code)
1334 {
1335         int cur_trb_is_good = 0;
1336
1337         /* Searching the cmd trb pointed by the command ring dequeue
1338          * pointer in command descriptor list. If it is found, free it.
1339          */
1340         cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1341                         xhci->cmd_ring->dequeue);
1342
1343         if (cmd_trb_comp_code == COMP_CMD_ABORT)
1344                 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1345         else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1346                 /* traversing the cancel_cmd_list and canceling
1347                  * the command according to command descriptor
1348                  */
1349                 xhci_cancel_cmd_in_cd_list(xhci);
1350
1351                 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1352                 /*
1353                  * ring command ring doorbell again to restart the
1354                  * command ring
1355                  */
1356                 if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1357                         xhci_ring_cmd_db(xhci);
1358         }
1359         return cur_trb_is_good;
1360 }
1361
1362 static void handle_cmd_completion(struct xhci_hcd *xhci,
1363                 struct xhci_event_cmd *event)
1364 {
1365         int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1366         u64 cmd_dma;
1367         dma_addr_t cmd_dequeue_dma;
1368         struct xhci_input_control_ctx *ctrl_ctx;
1369         struct xhci_virt_device *virt_dev;
1370         unsigned int ep_index;
1371         struct xhci_ring *ep_ring;
1372         unsigned int ep_state;
1373
1374         cmd_dma = le64_to_cpu(event->cmd_trb);
1375         cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1376                         xhci->cmd_ring->dequeue);
1377         /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1378         if (cmd_dequeue_dma == 0) {
1379                 xhci->error_bitmask |= 1 << 4;
1380                 return;
1381         }
1382         /* Does the DMA address match our internal dequeue pointer address? */
1383         if (cmd_dma != (u64) cmd_dequeue_dma) {
1384                 xhci->error_bitmask |= 1 << 5;
1385                 return;
1386         }
1387
1388         if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
1389                 (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
1390                 /* If the return value is 0, we think the trb pointed by
1391                  * command ring dequeue pointer is a good trb. The good
1392                  * trb means we don't want to cancel the trb, but it have
1393                  * been stopped by host. So we should handle it normally.
1394                  * Otherwise, driver should invoke inc_deq() and return.
1395                  */
1396                 if (handle_stopped_cmd_ring(xhci,
1397                                 GET_COMP_CODE(le32_to_cpu(event->status)))) {
1398                         inc_deq(xhci, xhci->cmd_ring, false);
1399                         return;
1400                 }
1401         }
1402
1403         switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1404                 & TRB_TYPE_BITMASK) {
1405         case TRB_TYPE(TRB_ENABLE_SLOT):
1406                 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1407                         xhci->slot_id = slot_id;
1408                 else
1409                         xhci->slot_id = 0;
1410                 complete(&xhci->addr_dev);
1411                 break;
1412         case TRB_TYPE(TRB_DISABLE_SLOT):
1413                 if (xhci->devs[slot_id]) {
1414                         if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1415                                 /* Delete default control endpoint resources */
1416                                 xhci_free_device_endpoint_resources(xhci,
1417                                                 xhci->devs[slot_id], true);
1418                         xhci_free_virt_device(xhci, slot_id);
1419                 }
1420                 break;
1421         case TRB_TYPE(TRB_CONFIG_EP):
1422                 virt_dev = xhci->devs[slot_id];
1423                 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1424                         break;
1425                 /*
1426                  * Configure endpoint commands can come from the USB core
1427                  * configuration or alt setting changes, or because the HW
1428                  * needed an extra configure endpoint command after a reset
1429                  * endpoint command or streams were being configured.
1430                  * If the command was for a halted endpoint, the xHCI driver
1431                  * is not waiting on the configure endpoint command.
1432                  */
1433                 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1434                                 virt_dev->in_ctx);
1435                 /* Input ctx add_flags are the endpoint index plus one */
1436                 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1437                 /* A usb_set_interface() call directly after clearing a halted
1438                  * condition may race on this quirky hardware.  Not worth
1439                  * worrying about, since this is prototype hardware.  Not sure
1440                  * if this will work for streams, but streams support was
1441                  * untested on this prototype.
1442                  */
1443                 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1444                                 ep_index != (unsigned int) -1 &&
1445                     le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1446                     le32_to_cpu(ctrl_ctx->drop_flags)) {
1447                         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1448                         ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1449                         if (!(ep_state & EP_HALTED))
1450                                 goto bandwidth_change;
1451                         xhci_dbg(xhci, "Completed config ep cmd - "
1452                                         "last ep index = %d, state = %d\n",
1453                                         ep_index, ep_state);
1454                         /* Clear internal halted state and restart ring(s) */
1455                         xhci->devs[slot_id]->eps[ep_index].ep_state &=
1456                                 ~EP_HALTED;
1457                         ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1458                         break;
1459                 }
1460 bandwidth_change:
1461                 xhci_dbg(xhci, "Completed config ep cmd\n");
1462                 xhci->devs[slot_id]->cmd_status =
1463                         GET_COMP_CODE(le32_to_cpu(event->status));
1464                 complete(&xhci->devs[slot_id]->cmd_completion);
1465                 break;
1466         case TRB_TYPE(TRB_EVAL_CONTEXT):
1467                 virt_dev = xhci->devs[slot_id];
1468                 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1469                         break;
1470                 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1471                 complete(&xhci->devs[slot_id]->cmd_completion);
1472                 break;
1473         case TRB_TYPE(TRB_ADDR_DEV):
1474                 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1475                 complete(&xhci->addr_dev);
1476                 break;
1477         case TRB_TYPE(TRB_STOP_RING):
1478                 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1479                 break;
1480         case TRB_TYPE(TRB_SET_DEQ):
1481                 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1482                 break;
1483         case TRB_TYPE(TRB_CMD_NOOP):
1484                 break;
1485         case TRB_TYPE(TRB_RESET_EP):
1486                 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1487                 break;
1488         case TRB_TYPE(TRB_RESET_DEV):
1489                 xhci_dbg(xhci, "Completed reset device command.\n");
1490                 slot_id = TRB_TO_SLOT_ID(
1491                         le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1492                 virt_dev = xhci->devs[slot_id];
1493                 if (virt_dev)
1494                         handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1495                 else
1496                         xhci_warn(xhci, "Reset device command completion "
1497                                         "for disabled slot %u\n", slot_id);
1498                 break;
1499         case TRB_TYPE(TRB_NEC_GET_FW):
1500                 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1501                         xhci->error_bitmask |= 1 << 6;
1502                         break;
1503                 }
1504                 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1505                          NEC_FW_MAJOR(le32_to_cpu(event->status)),
1506                          NEC_FW_MINOR(le32_to_cpu(event->status)));
1507                 break;
1508         default:
1509                 /* Skip over unknown commands on the event ring */
1510                 xhci->error_bitmask |= 1 << 6;
1511                 break;
1512         }
1513         inc_deq(xhci, xhci->cmd_ring, false);
1514 }
1515
1516 static void handle_vendor_event(struct xhci_hcd *xhci,
1517                 union xhci_trb *event)
1518 {
1519         u32 trb_type;
1520
1521         trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1522         xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1523         if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1524                 handle_cmd_completion(xhci, &event->event_cmd);
1525 }
1526
1527 /* @port_id: the one-based port ID from the hardware (indexed from array of all
1528  * port registers -- USB 3.0 and USB 2.0).
1529  *
1530  * Returns a zero-based port number, which is suitable for indexing into each of
1531  * the split roothubs' port arrays and bus state arrays.
1532  * Add one to it in order to call xhci_find_slot_id_by_port.
1533  */
1534 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1535                 struct xhci_hcd *xhci, u32 port_id)
1536 {
1537         unsigned int i;
1538         unsigned int num_similar_speed_ports = 0;
1539
1540         /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1541          * and usb2_ports are 0-based indexes.  Count the number of similar
1542          * speed ports, up to 1 port before this port.
1543          */
1544         for (i = 0; i < (port_id - 1); i++) {
1545                 u8 port_speed = xhci->port_array[i];
1546
1547                 /*
1548                  * Skip ports that don't have known speeds, or have duplicate
1549                  * Extended Capabilities port speed entries.
1550                  */
1551                 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1552                         continue;
1553
1554                 /*
1555                  * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1556                  * 1.1 ports are under the USB 2.0 hub.  If the port speed
1557                  * matches the device speed, it's a similar speed port.
1558                  */
1559                 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1560                         num_similar_speed_ports++;
1561         }
1562         return num_similar_speed_ports;
1563 }
1564
1565 static void handle_port_status(struct xhci_hcd *xhci,
1566                 union xhci_trb *event)
1567 {
1568         struct usb_hcd *hcd;
1569         u32 port_id;
1570         u32 temp, temp1;
1571         int max_ports;
1572         int slot_id;
1573         unsigned int faked_port_index;
1574         u8 major_revision;
1575         struct xhci_bus_state *bus_state;
1576         __le32 __iomem **port_array;
1577         bool bogus_port_status = false;
1578
1579         /* Port status change events always have a successful completion code */
1580         if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1581                 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1582                 xhci->error_bitmask |= 1 << 8;
1583         }
1584         port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1585         xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1586
1587         max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1588         if ((port_id <= 0) || (port_id > max_ports)) {
1589                 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1590                 bogus_port_status = true;
1591                 goto cleanup;
1592         }
1593
1594         /* Figure out which usb_hcd this port is attached to:
1595          * is it a USB 3.0 port or a USB 2.0/1.1 port?
1596          */
1597         major_revision = xhci->port_array[port_id - 1];
1598         if (major_revision == 0) {
1599                 xhci_warn(xhci, "Event for port %u not in "
1600                                 "Extended Capabilities, ignoring.\n",
1601                                 port_id);
1602                 bogus_port_status = true;
1603                 goto cleanup;
1604         }
1605         if (major_revision == DUPLICATE_ENTRY) {
1606                 xhci_warn(xhci, "Event for port %u duplicated in"
1607                                 "Extended Capabilities, ignoring.\n",
1608                                 port_id);
1609                 bogus_port_status = true;
1610                 goto cleanup;
1611         }
1612
1613         /*
1614          * Hardware port IDs reported by a Port Status Change Event include USB
1615          * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1616          * resume event, but we first need to translate the hardware port ID
1617          * into the index into the ports on the correct split roothub, and the
1618          * correct bus_state structure.
1619          */
1620         /* Find the right roothub. */
1621         hcd = xhci_to_hcd(xhci);
1622         if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1623                 hcd = xhci->shared_hcd;
1624         bus_state = &xhci->bus_state[hcd_index(hcd)];
1625         if (hcd->speed == HCD_USB3)
1626                 port_array = xhci->usb3_ports;
1627         else
1628                 port_array = xhci->usb2_ports;
1629         /* Find the faked port hub number */
1630         faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1631                         port_id);
1632
1633         temp = xhci_readl(xhci, port_array[faked_port_index]);
1634         if (hcd->state == HC_STATE_SUSPENDED) {
1635                 xhci_dbg(xhci, "resume root hub\n");
1636                 usb_hcd_resume_root_hub(hcd);
1637         }
1638
1639         if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1640                 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1641
1642                 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1643                 if (!(temp1 & CMD_RUN)) {
1644                         xhci_warn(xhci, "xHC is not running.\n");
1645                         goto cleanup;
1646                 }
1647
1648                 if (DEV_SUPERSPEED(temp)) {
1649                         xhci_dbg(xhci, "resume SS port %d\n", port_id);
1650                         xhci_set_link_state(xhci, port_array, faked_port_index,
1651                                                 XDEV_U0);
1652                         slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1653                                         faked_port_index + 1);
1654                         if (!slot_id) {
1655                                 xhci_dbg(xhci, "slot_id is zero\n");
1656                                 goto cleanup;
1657                         }
1658                         xhci_ring_device(xhci, slot_id);
1659                         xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1660                         /* Clear PORT_PLC */
1661                         xhci_test_and_clear_bit(xhci, port_array,
1662                                                 faked_port_index, PORT_PLC);
1663                 } else {
1664                         xhci_dbg(xhci, "resume HS port %d\n", port_id);
1665                         bus_state->resume_done[faked_port_index] = jiffies +
1666                                 msecs_to_jiffies(20);
1667                         mod_timer(&hcd->rh_timer,
1668                                   bus_state->resume_done[faked_port_index]);
1669                         /* Do the rest in GetPortStatus */
1670                 }
1671         }
1672
1673         if (hcd->speed != HCD_USB3)
1674                 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1675                                         PORT_PLC);
1676
1677 cleanup:
1678         /* Update event ring dequeue pointer before dropping the lock */
1679         inc_deq(xhci, xhci->event_ring, true);
1680
1681         /* Don't make the USB core poll the roothub if we got a bad port status
1682          * change event.  Besides, at that point we can't tell which roothub
1683          * (USB 2.0 or USB 3.0) to kick.
1684          */
1685         if (bogus_port_status)
1686                 return;
1687
1688         /*
1689          * xHCI port-status-change events occur when the "or" of all the
1690          * status-change bits in the portsc register changes from 0 to 1.
1691          * New status changes won't cause an event if any other change
1692          * bits are still set.  When an event occurs, switch over to
1693          * polling to avoid losing status changes.
1694          */
1695         xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1696         set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1697         spin_unlock(&xhci->lock);
1698         /* Pass this up to the core */
1699         usb_hcd_poll_rh_status(hcd);
1700         spin_lock(&xhci->lock);
1701 }
1702
1703 /*
1704  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1705  * at end_trb, which may be in another segment.  If the suspect DMA address is a
1706  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1707  * returns 0.
1708  */
1709 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1710                 union xhci_trb  *start_trb,
1711                 union xhci_trb  *end_trb,
1712                 dma_addr_t      suspect_dma)
1713 {
1714         dma_addr_t start_dma;
1715         dma_addr_t end_seg_dma;
1716         dma_addr_t end_trb_dma;
1717         struct xhci_segment *cur_seg;
1718
1719         start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1720         cur_seg = start_seg;
1721
1722         do {
1723                 if (start_dma == 0)
1724                         return NULL;
1725                 /* We may get an event for a Link TRB in the middle of a TD */
1726                 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1727                                 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1728                 /* If the end TRB isn't in this segment, this is set to 0 */
1729                 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1730
1731                 if (end_trb_dma > 0) {
1732                         /* The end TRB is in this segment, so suspect should be here */
1733                         if (start_dma <= end_trb_dma) {
1734                                 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1735                                         return cur_seg;
1736                         } else {
1737                                 /* Case for one segment with
1738                                  * a TD wrapped around to the top
1739                                  */
1740                                 if ((suspect_dma >= start_dma &&
1741                                                         suspect_dma <= end_seg_dma) ||
1742                                                 (suspect_dma >= cur_seg->dma &&
1743                                                  suspect_dma <= end_trb_dma))
1744                                         return cur_seg;
1745                         }
1746                         return NULL;
1747                 } else {
1748                         /* Might still be somewhere in this segment */
1749                         if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1750                                 return cur_seg;
1751                 }
1752                 cur_seg = cur_seg->next;
1753                 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1754         } while (cur_seg != start_seg);
1755
1756         return NULL;
1757 }
1758
1759 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1760                 unsigned int slot_id, unsigned int ep_index,
1761                 unsigned int stream_id,
1762                 struct xhci_td *td, union xhci_trb *event_trb)
1763 {
1764         struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1765         ep->ep_state |= EP_HALTED;
1766         ep->stopped_td = td;
1767         ep->stopped_stream = stream_id;
1768
1769         xhci_queue_reset_ep(xhci, slot_id, ep_index);
1770         xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1771
1772         ep->stopped_td = NULL;
1773         ep->stopped_stream = 0;
1774
1775         xhci_ring_cmd_db(xhci);
1776 }
1777
1778 /* Check if an error has halted the endpoint ring.  The class driver will
1779  * cleanup the halt for a non-default control endpoint if we indicate a stall.
1780  * However, a babble and other errors also halt the endpoint ring, and the class
1781  * driver won't clear the halt in that case, so we need to issue a Set Transfer
1782  * Ring Dequeue Pointer command manually.
1783  */
1784 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1785                 struct xhci_ep_ctx *ep_ctx,
1786                 unsigned int trb_comp_code)
1787 {
1788         /* TRB completion codes that may require a manual halt cleanup */
1789         if (trb_comp_code == COMP_TX_ERR ||
1790                         trb_comp_code == COMP_BABBLE ||
1791                         trb_comp_code == COMP_SPLIT_ERR)
1792                 /* The 0.96 spec says a babbling control endpoint
1793                  * is not halted. The 0.96 spec says it is.  Some HW
1794                  * claims to be 0.95 compliant, but it halts the control
1795                  * endpoint anyway.  Check if a babble halted the
1796                  * endpoint.
1797                  */
1798                 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1799                     cpu_to_le32(EP_STATE_HALTED))
1800                         return 1;
1801
1802         return 0;
1803 }
1804
1805 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1806 {
1807         if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1808                 /* Vendor defined "informational" completion code,
1809                  * treat as not-an-error.
1810                  */
1811                 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1812                                 trb_comp_code);
1813                 xhci_dbg(xhci, "Treating code as success.\n");
1814                 return 1;
1815         }
1816         return 0;
1817 }
1818
1819 /*
1820  * Finish the td processing, remove the td from td list;
1821  * Return 1 if the urb can be given back.
1822  */
1823 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1824         union xhci_trb *event_trb, struct xhci_transfer_event *event,
1825         struct xhci_virt_ep *ep, int *status, bool skip)
1826 {
1827         struct xhci_virt_device *xdev;
1828         struct xhci_ring *ep_ring;
1829         unsigned int slot_id;
1830         int ep_index;
1831         struct urb *urb = NULL;
1832         struct xhci_ep_ctx *ep_ctx;
1833         int ret = 0;
1834         struct urb_priv *urb_priv;
1835         u32 trb_comp_code;
1836
1837         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1838         xdev = xhci->devs[slot_id];
1839         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1840         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1841         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1842         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1843
1844         if (skip)
1845                 goto td_cleanup;
1846
1847         if (trb_comp_code == COMP_STOP_INVAL ||
1848                         trb_comp_code == COMP_STOP) {
1849                 /* The Endpoint Stop Command completion will take care of any
1850                  * stopped TDs.  A stopped TD may be restarted, so don't update
1851                  * the ring dequeue pointer or take this TD off any lists yet.
1852                  */
1853                 ep->stopped_td = td;
1854                 return 0;
1855         } else {
1856                 if (trb_comp_code == COMP_STALL ||
1857                     xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1858                                                       trb_comp_code)) {
1859                         /* Issue a reset endpoint command to clear the host side
1860                          * halt, followed by a set dequeue command to move the
1861                          * dequeue pointer past the TD.
1862                          * The class driver clears the device side halt later.
1863                          */
1864                         xhci_cleanup_halted_endpoint(xhci,
1865                                         slot_id, ep_index, ep_ring->stream_id,
1866                                         td, event_trb);
1867                 } else {
1868                         /* Update ring dequeue pointer */
1869                         while (ep_ring->dequeue != td->last_trb)
1870                                 inc_deq(xhci, ep_ring, false);
1871                         inc_deq(xhci, ep_ring, false);
1872                 }
1873
1874 td_cleanup:
1875                 /* Clean up the endpoint's TD list */
1876                 urb = td->urb;
1877                 urb_priv = urb->hcpriv;
1878
1879                 /* Do one last check of the actual transfer length.
1880                  * If the host controller said we transferred more data than
1881                  * the buffer length, urb->actual_length will be a very big
1882                  * number (since it's unsigned).  Play it safe and say we didn't
1883                  * transfer anything.
1884                  */
1885                 if (urb->actual_length > urb->transfer_buffer_length) {
1886                         xhci_warn(xhci, "URB transfer length is wrong, "
1887                                         "xHC issue? req. len = %u, "
1888                                         "act. len = %u\n",
1889                                         urb->transfer_buffer_length,
1890                                         urb->actual_length);
1891                         urb->actual_length = 0;
1892                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1893                                 *status = -EREMOTEIO;
1894                         else
1895                                 *status = 0;
1896                 }
1897                 list_del_init(&td->td_list);
1898                 /* Was this TD slated to be cancelled but completed anyway? */
1899                 if (!list_empty(&td->cancelled_td_list))
1900                         list_del_init(&td->cancelled_td_list);
1901
1902                 urb_priv->td_cnt++;
1903                 /* Giveback the urb when all the tds are completed */
1904                 if (urb_priv->td_cnt == urb_priv->length) {
1905                         ret = 1;
1906                         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1907                                 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1908                                 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1909                                         == 0) {
1910                                         if (xhci->quirks & XHCI_AMD_PLL_FIX)
1911                                                 usb_amd_quirk_pll_enable();
1912                                 }
1913                         }
1914                 }
1915         }
1916
1917         return ret;
1918 }
1919
1920 /*
1921  * Process control tds, update urb status and actual_length.
1922  */
1923 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1924         union xhci_trb *event_trb, struct xhci_transfer_event *event,
1925         struct xhci_virt_ep *ep, int *status)
1926 {
1927         struct xhci_virt_device *xdev;
1928         struct xhci_ring *ep_ring;
1929         unsigned int slot_id;
1930         int ep_index;
1931         struct xhci_ep_ctx *ep_ctx;
1932         u32 trb_comp_code;
1933
1934         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1935         xdev = xhci->devs[slot_id];
1936         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1937         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1938         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1939         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1940
1941         xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1942         switch (trb_comp_code) {
1943         case COMP_SUCCESS:
1944                 if (event_trb == ep_ring->dequeue) {
1945                         xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1946                                         "without IOC set??\n");
1947                         *status = -ESHUTDOWN;
1948                 } else if (event_trb != td->last_trb) {
1949                         xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1950                                         "without IOC set??\n");
1951                         *status = -ESHUTDOWN;
1952                 } else {
1953                         *status = 0;
1954                 }
1955                 break;
1956         case COMP_SHORT_TX:
1957                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1958                         *status = -EREMOTEIO;
1959                 else
1960                         *status = 0;
1961                 break;
1962         case COMP_STOP_INVAL:
1963         case COMP_STOP:
1964                 return finish_td(xhci, td, event_trb, event, ep, status, false);
1965         default:
1966                 if (!xhci_requires_manual_halt_cleanup(xhci,
1967                                         ep_ctx, trb_comp_code))
1968                         break;
1969                 xhci_dbg(xhci, "TRB error code %u, "
1970                                 "halted endpoint index = %u\n",
1971                                 trb_comp_code, ep_index);
1972                 /* else fall through */
1973         case COMP_STALL:
1974                 /* Did we transfer part of the data (middle) phase? */
1975                 if (event_trb != ep_ring->dequeue &&
1976                                 event_trb != td->last_trb)
1977                         td->urb->actual_length =
1978                                 td->urb->transfer_buffer_length -
1979                                 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1980                 else
1981                         td->urb->actual_length = 0;
1982
1983                 return finish_td(xhci, td, event_trb, event, ep, status, false);
1984         }
1985         /*
1986          * Did we transfer any data, despite the errors that might have
1987          * happened?  I.e. did we get past the setup stage?
1988          */
1989         if (event_trb != ep_ring->dequeue) {
1990                 /* The event was for the status stage */
1991                 if (event_trb == td->last_trb) {
1992                         if (td->urb_length_set) {
1993                                 /* Don't overwrite a previously set error code
1994                                  */
1995                                 if ((*status == -EINPROGRESS || *status == 0) &&
1996                                                 (td->urb->transfer_flags
1997                                                  & URB_SHORT_NOT_OK))
1998                                         /* Did we already see a short data
1999                                          * stage? */
2000                                         *status = -EREMOTEIO;
2001                         } else {
2002                                 td->urb->actual_length =
2003                                         td->urb->transfer_buffer_length;
2004                         }
2005                 } else {
2006                         /*
2007                          * Maybe the event was for the data stage? If so, update
2008                          * already the actual_length of the URB and flag it as
2009                          * set, so that it is not overwritten in the event for
2010                          * the last TRB.
2011                          */
2012                         td->urb_length_set = true;
2013                         td->urb->actual_length =
2014                                 td->urb->transfer_buffer_length -
2015                                 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2016                         xhci_dbg(xhci, "Waiting for status "
2017                                         "stage event\n");
2018                         return 0;
2019                 }
2020         }
2021
2022         return finish_td(xhci, td, event_trb, event, ep, status, false);
2023 }
2024
2025 /*
2026  * Process isochronous tds, update urb packet status and actual_length.
2027  */
2028 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2029         union xhci_trb *event_trb, struct xhci_transfer_event *event,
2030         struct xhci_virt_ep *ep, int *status)
2031 {
2032         struct xhci_ring *ep_ring;
2033         struct urb_priv *urb_priv;
2034         int idx;
2035         int len = 0;
2036         union xhci_trb *cur_trb;
2037         struct xhci_segment *cur_seg;
2038         struct usb_iso_packet_descriptor *frame;
2039         u32 trb_comp_code;
2040         bool skip_td = false;
2041
2042         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2043         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2044         urb_priv = td->urb->hcpriv;
2045         idx = urb_priv->td_cnt;
2046         frame = &td->urb->iso_frame_desc[idx];
2047
2048         /* handle completion code */
2049         switch (trb_comp_code) {
2050         case COMP_SUCCESS:
2051                 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2052                         frame->status = 0;
2053                         break;
2054                 }
2055                 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2056                         trb_comp_code = COMP_SHORT_TX;
2057         case COMP_SHORT_TX:
2058                 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2059                                 -EREMOTEIO : 0;
2060                 break;
2061         case COMP_BW_OVER:
2062                 frame->status = -ECOMM;
2063                 skip_td = true;
2064                 break;
2065         case COMP_BUFF_OVER:
2066         case COMP_BABBLE:
2067                 frame->status = -EOVERFLOW;
2068                 skip_td = true;
2069                 break;
2070         case COMP_DEV_ERR:
2071         case COMP_STALL:
2072                 frame->status = -EPROTO;
2073                 skip_td = true;
2074                 break;
2075         case COMP_TX_ERR:
2076                 frame->status = -EPROTO;
2077                 if (event_trb != td->last_trb)
2078                         return 0;
2079                 skip_td = true;
2080                 break;
2081         case COMP_STOP:
2082         case COMP_STOP_INVAL:
2083                 break;
2084         default:
2085                 frame->status = -1;
2086                 break;
2087         }
2088
2089         if (trb_comp_code == COMP_SUCCESS || skip_td) {
2090                 frame->actual_length = frame->length;
2091                 td->urb->actual_length += frame->length;
2092         } else {
2093                 for (cur_trb = ep_ring->dequeue,
2094                      cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2095                      next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2096                         if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2097                             !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2098                                 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2099                 }
2100                 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2101                         EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2102
2103                 if (trb_comp_code != COMP_STOP_INVAL) {
2104                         frame->actual_length = len;
2105                         td->urb->actual_length += len;
2106                 }
2107         }
2108
2109         return finish_td(xhci, td, event_trb, event, ep, status, false);
2110 }
2111
2112 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2113                         struct xhci_transfer_event *event,
2114                         struct xhci_virt_ep *ep, int *status)
2115 {
2116         struct xhci_ring *ep_ring;
2117         struct urb_priv *urb_priv;
2118         struct usb_iso_packet_descriptor *frame;
2119         int idx;
2120
2121         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2122         urb_priv = td->urb->hcpriv;
2123         idx = urb_priv->td_cnt;
2124         frame = &td->urb->iso_frame_desc[idx];
2125
2126         /* The transfer is partly done. */
2127         frame->status = -EXDEV;
2128
2129         /* calc actual length */
2130         frame->actual_length = 0;
2131
2132         /* Update ring dequeue pointer */
2133         while (ep_ring->dequeue != td->last_trb)
2134                 inc_deq(xhci, ep_ring, false);
2135         inc_deq(xhci, ep_ring, false);
2136
2137         return finish_td(xhci, td, NULL, event, ep, status, true);
2138 }
2139
2140 /*
2141  * Process bulk and interrupt tds, update urb status and actual_length.
2142  */
2143 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2144         union xhci_trb *event_trb, struct xhci_transfer_event *event,
2145         struct xhci_virt_ep *ep, int *status)
2146 {
2147         struct xhci_ring *ep_ring;
2148         union xhci_trb *cur_trb;
2149         struct xhci_segment *cur_seg;
2150         u32 trb_comp_code;
2151
2152         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2153         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2154
2155         switch (trb_comp_code) {
2156         case COMP_SUCCESS:
2157                 /* Double check that the HW transferred everything. */
2158                 if (event_trb != td->last_trb ||
2159                     EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2160                         xhci_warn(xhci, "WARN Successful completion "
2161                                         "on short TX\n");
2162                         if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2163                                 *status = -EREMOTEIO;
2164                         else
2165                                 *status = 0;
2166                         if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2167                                 trb_comp_code = COMP_SHORT_TX;
2168                 } else {
2169                         *status = 0;
2170                 }
2171                 break;
2172         case COMP_SHORT_TX:
2173                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2174                         *status = -EREMOTEIO;
2175                 else
2176                         *status = 0;
2177                 break;
2178         default:
2179                 /* Others already handled above */
2180                 break;
2181         }
2182         if (trb_comp_code == COMP_SHORT_TX)
2183                 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2184                                 "%d bytes untransferred\n",
2185                                 td->urb->ep->desc.bEndpointAddress,
2186                                 td->urb->transfer_buffer_length,
2187                                 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2188         /* Fast path - was this the last TRB in the TD for this URB? */
2189         if (event_trb == td->last_trb) {
2190                 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2191                         td->urb->actual_length =
2192                                 td->urb->transfer_buffer_length -
2193                                 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2194                         if (td->urb->transfer_buffer_length <
2195                                         td->urb->actual_length) {
2196                                 xhci_warn(xhci, "HC gave bad length "
2197                                                 "of %d bytes left\n",
2198                                           EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2199                                 td->urb->actual_length = 0;
2200                                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2201                                         *status = -EREMOTEIO;
2202                                 else
2203                                         *status = 0;
2204                         }
2205                         /* Don't overwrite a previously set error code */
2206                         if (*status == -EINPROGRESS) {
2207                                 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2208                                         *status = -EREMOTEIO;
2209                                 else
2210                                         *status = 0;
2211                         }
2212                 } else {
2213                         td->urb->actual_length =
2214                                 td->urb->transfer_buffer_length;
2215                         /* Ignore a short packet completion if the
2216                          * untransferred length was zero.
2217                          */
2218                         if (*status == -EREMOTEIO)
2219                                 *status = 0;
2220                 }
2221         } else {
2222                 /* Slow path - walk the list, starting from the dequeue
2223                  * pointer, to get the actual length transferred.
2224                  */
2225                 td->urb->actual_length = 0;
2226                 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2227                                 cur_trb != event_trb;
2228                                 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2229                         if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2230                             !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2231                                 td->urb->actual_length +=
2232                                         TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2233                 }
2234                 /* If the ring didn't stop on a Link or No-op TRB, add
2235                  * in the actual bytes transferred from the Normal TRB
2236                  */
2237                 if (trb_comp_code != COMP_STOP_INVAL)
2238                         td->urb->actual_length +=
2239                                 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2240                                 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2241         }
2242
2243         return finish_td(xhci, td, event_trb, event, ep, status, false);
2244 }
2245
2246 /*
2247  * If this function returns an error condition, it means it got a Transfer
2248  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2249  * At this point, the host controller is probably hosed and should be reset.
2250  */
2251 static int handle_tx_event(struct xhci_hcd *xhci,
2252                 struct xhci_transfer_event *event)
2253 {
2254         struct xhci_virt_device *xdev;
2255         struct xhci_virt_ep *ep;
2256         struct xhci_ring *ep_ring;
2257         unsigned int slot_id;
2258         int ep_index;
2259         struct xhci_td *td = NULL;
2260         dma_addr_t event_dma;
2261         struct xhci_segment *event_seg;
2262         union xhci_trb *event_trb;
2263         struct urb *urb = NULL;
2264         int status = -EINPROGRESS;
2265         struct urb_priv *urb_priv;
2266         struct xhci_ep_ctx *ep_ctx;
2267         struct list_head *tmp;
2268         u32 trb_comp_code;
2269         int ret = 0;
2270         int td_num = 0;
2271
2272         slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2273         xdev = xhci->devs[slot_id];
2274         if (!xdev) {
2275                 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2276                 return -ENODEV;
2277         }
2278
2279         /* Endpoint ID is 1 based, our index is zero based */
2280         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2281         ep = &xdev->eps[ep_index];
2282         ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2283         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2284         if (!ep_ring ||
2285             (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2286             EP_STATE_DISABLED) {
2287                 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2288                                 "or incorrect stream ring\n");
2289                 return -ENODEV;
2290         }
2291
2292         /* Count current td numbers if ep->skip is set */
2293         if (ep->skip) {
2294                 list_for_each(tmp, &ep_ring->td_list)
2295                         td_num++;
2296         }
2297
2298         event_dma = le64_to_cpu(event->buffer);
2299         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2300         /* Look for common error cases */
2301         switch (trb_comp_code) {
2302         /* Skip codes that require special handling depending on
2303          * transfer type
2304          */
2305         case COMP_SUCCESS:
2306                 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2307                         break;
2308                 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2309                         trb_comp_code = COMP_SHORT_TX;
2310                 else
2311                         xhci_warn(xhci, "WARN Successful completion on short TX: "
2312                                         "needs XHCI_TRUST_TX_LENGTH quirk?\n");
2313         case COMP_SHORT_TX:
2314                 break;
2315         case COMP_STOP:
2316                 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2317                 break;
2318         case COMP_STOP_INVAL:
2319                 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2320                 break;
2321         case COMP_STALL:
2322                 xhci_dbg(xhci, "Stalled endpoint\n");
2323                 ep->ep_state |= EP_HALTED;
2324                 status = -EPIPE;
2325                 break;
2326         case COMP_TRB_ERR:
2327                 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2328                 status = -EILSEQ;
2329                 break;
2330         case COMP_SPLIT_ERR:
2331         case COMP_TX_ERR:
2332                 xhci_dbg(xhci, "Transfer error on endpoint\n");
2333                 status = -EPROTO;
2334                 break;
2335         case COMP_BABBLE:
2336                 xhci_dbg(xhci, "Babble error on endpoint\n");
2337                 status = -EOVERFLOW;
2338                 break;
2339         case COMP_DB_ERR:
2340                 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2341                 status = -ENOSR;
2342                 break;
2343         case COMP_BW_OVER:
2344                 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2345                 break;
2346         case COMP_BUFF_OVER:
2347                 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2348                 break;
2349         case COMP_UNDERRUN:
2350                 /*
2351                  * When the Isoch ring is empty, the xHC will generate
2352                  * a Ring Overrun Event for IN Isoch endpoint or Ring
2353                  * Underrun Event for OUT Isoch endpoint.
2354                  */
2355                 xhci_dbg(xhci, "underrun event on endpoint\n");
2356                 if (!list_empty(&ep_ring->td_list))
2357                         xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2358                                         "still with TDs queued?\n",
2359                                  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2360                                  ep_index);
2361                 goto cleanup;
2362         case COMP_OVERRUN:
2363                 xhci_dbg(xhci, "overrun event on endpoint\n");
2364                 if (!list_empty(&ep_ring->td_list))
2365                         xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2366                                         "still with TDs queued?\n",
2367                                  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2368                                  ep_index);
2369                 goto cleanup;
2370         case COMP_DEV_ERR:
2371                 xhci_warn(xhci, "WARN: detect an incompatible device");
2372                 status = -EPROTO;
2373                 break;
2374         case COMP_MISSED_INT:
2375                 /*
2376                  * When encounter missed service error, one or more isoc tds
2377                  * may be missed by xHC.
2378                  * Set skip flag of the ep_ring; Complete the missed tds as
2379                  * short transfer when process the ep_ring next time.
2380                  */
2381                 ep->skip = true;
2382                 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2383                 goto cleanup;
2384         default:
2385                 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2386                         status = 0;
2387                         break;
2388                 }
2389                 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2390                                 "busted\n");
2391                 goto cleanup;
2392         }
2393
2394         do {
2395                 /* This TRB should be in the TD at the head of this ring's
2396                  * TD list.
2397                  */
2398                 if (list_empty(&ep_ring->td_list)) {
2399                         /*
2400                          * A stopped endpoint may generate an extra completion
2401                          * event if the device was suspended.  Don't print
2402                          * warnings.
2403                          */
2404                         if (!(trb_comp_code == COMP_STOP ||
2405                                                 trb_comp_code == COMP_STOP_INVAL)) {
2406                                 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2407                                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2408                                                 ep_index);
2409                                 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2410                                                 (le32_to_cpu(event->flags) &
2411                                                  TRB_TYPE_BITMASK)>>10);
2412                                 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2413                         }
2414                         if (ep->skip) {
2415                                 ep->skip = false;
2416                                 xhci_dbg(xhci, "td_list is empty while skip "
2417                                                 "flag set. Clear skip flag.\n");
2418                         }
2419                         ret = 0;
2420                         goto cleanup;
2421                 }
2422
2423                 /* We've skipped all the TDs on the ep ring when ep->skip set */
2424                 if (ep->skip && td_num == 0) {
2425                         ep->skip = false;
2426                         xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2427                                                 "Clear skip flag.\n");
2428                         ret = 0;
2429                         goto cleanup;
2430                 }
2431
2432                 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2433                 if (ep->skip)
2434                         td_num--;
2435
2436                 /* Is this a TRB in the currently executing TD? */
2437                 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2438                                 td->last_trb, event_dma);
2439
2440                 /*
2441                  * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2442                  * is not in the current TD pointed by ep_ring->dequeue because
2443                  * that the hardware dequeue pointer still at the previous TRB
2444                  * of the current TD. The previous TRB maybe a Link TD or the
2445                  * last TRB of the previous TD. The command completion handle
2446                  * will take care the rest.
2447                  */
2448                 if (!event_seg && (trb_comp_code == COMP_STOP ||
2449                                    trb_comp_code == COMP_STOP_INVAL)) {
2450                         ret = 0;
2451                         goto cleanup;
2452                 }
2453
2454                 if (!event_seg) {
2455                         if (!ep->skip ||
2456                             !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2457                                 /* Some host controllers give a spurious
2458                                  * successful event after a short transfer.
2459                                  * Ignore it.
2460                                  */
2461                                 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
2462                                                 ep_ring->last_td_was_short) {
2463                                         ep_ring->last_td_was_short = false;
2464                                         ret = 0;
2465                                         goto cleanup;
2466                                 }
2467                                 /* HC is busted, give up! */
2468                                 xhci_err(xhci,
2469                                         "ERROR Transfer event TRB DMA ptr not "
2470                                         "part of current TD\n");
2471                                 return -ESHUTDOWN;
2472                         }
2473
2474                         ret = skip_isoc_td(xhci, td, event, ep, &status);
2475                         goto cleanup;
2476                 }
2477                 if (trb_comp_code == COMP_SHORT_TX)
2478                         ep_ring->last_td_was_short = true;
2479                 else
2480                         ep_ring->last_td_was_short = false;
2481
2482                 if (ep->skip) {
2483                         xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2484                         ep->skip = false;
2485                 }
2486
2487                 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2488                                                 sizeof(*event_trb)];
2489                 /*
2490                  * No-op TRB should not trigger interrupts.
2491                  * If event_trb is a no-op TRB, it means the
2492                  * corresponding TD has been cancelled. Just ignore
2493                  * the TD.
2494                  */
2495                 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2496                         xhci_dbg(xhci,
2497                                  "event_trb is a no-op TRB. Skip it\n");
2498                         goto cleanup;
2499                 }
2500
2501                 /* Now update the urb's actual_length and give back to
2502                  * the core
2503                  */
2504                 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2505                         ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2506                                                  &status);
2507                 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2508                         ret = process_isoc_td(xhci, td, event_trb, event, ep,
2509                                                  &status);
2510                 else
2511                         ret = process_bulk_intr_td(xhci, td, event_trb, event,
2512                                                  ep, &status);
2513
2514 cleanup:
2515                 /*
2516                  * Do not update event ring dequeue pointer if ep->skip is set.
2517                  * Will roll back to continue process missed tds.
2518                  */
2519                 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2520                         inc_deq(xhci, xhci->event_ring, true);
2521                 }
2522
2523                 if (ret) {
2524                         urb = td->urb;
2525                         urb_priv = urb->hcpriv;
2526
2527                         xhci_urb_free_priv(xhci, urb_priv);
2528
2529                         usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2530                         if ((urb->actual_length != urb->transfer_buffer_length &&
2531                                                 (urb->transfer_flags &
2532                                                  URB_SHORT_NOT_OK)) ||
2533                                         (status != 0 &&
2534                                          !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2535                                 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2536                                                 "expected = %x, status = %d\n",
2537                                                 urb, urb->actual_length,
2538                                                 urb->transfer_buffer_length,
2539                                                 status);
2540                         spin_unlock(&xhci->lock);
2541                         /* EHCI, UHCI, and OHCI always unconditionally set the
2542                          * urb->status of an isochronous endpoint to 0.
2543                          */
2544                         if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2545                                 status = 0;
2546                         usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2547                         spin_lock(&xhci->lock);
2548                 }
2549
2550         /*
2551          * If ep->skip is set, it means there are missed tds on the
2552          * endpoint ring need to take care of.
2553          * Process them as short transfer until reach the td pointed by
2554          * the event.
2555          */
2556         } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2557
2558         return 0;
2559 }
2560
2561 /*
2562  * This function handles all OS-owned events on the event ring.  It may drop
2563  * xhci->lock between event processing (e.g. to pass up port status changes).
2564  * Returns >0 for "possibly more events to process" (caller should call again),
2565  * otherwise 0 if done.  In future, <0 returns should indicate error code.
2566  */
2567 static int xhci_handle_event(struct xhci_hcd *xhci)
2568 {
2569         union xhci_trb *event;
2570         int update_ptrs = 1;
2571         int ret;
2572
2573         if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2574                 xhci->error_bitmask |= 1 << 1;
2575                 return 0;
2576         }
2577
2578         event = xhci->event_ring->dequeue;
2579         /* Does the HC or OS own the TRB? */
2580         if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2581             xhci->event_ring->cycle_state) {
2582                 xhci->error_bitmask |= 1 << 2;
2583                 return 0;
2584         }
2585
2586         /*
2587          * Barrier between reading the TRB_CYCLE (valid) flag above and any
2588          * speculative reads of the event's flags/data below.
2589          */
2590         rmb();
2591         /* FIXME: Handle more event types. */
2592         switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2593         case TRB_TYPE(TRB_COMPLETION):
2594                 handle_cmd_completion(xhci, &event->event_cmd);
2595                 break;
2596         case TRB_TYPE(TRB_PORT_STATUS):
2597                 handle_port_status(xhci, event);
2598                 update_ptrs = 0;
2599                 break;
2600         case TRB_TYPE(TRB_TRANSFER):
2601                 ret = handle_tx_event(xhci, &event->trans_event);
2602                 if (ret < 0)
2603                         xhci->error_bitmask |= 1 << 9;
2604                 else
2605                         update_ptrs = 0;
2606                 break;
2607         default:
2608                 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2609                     TRB_TYPE(48))
2610                         handle_vendor_event(xhci, event);
2611                 else
2612                         xhci->error_bitmask |= 1 << 3;
2613         }
2614         /* Any of the above functions may drop and re-acquire the lock, so check
2615          * to make sure a watchdog timer didn't mark the host as non-responsive.
2616          */
2617         if (xhci->xhc_state & XHCI_STATE_DYING) {
2618                 xhci_dbg(xhci, "xHCI host dying, returning from "
2619                                 "event handler.\n");
2620                 return 0;
2621         }
2622
2623         if (update_ptrs)
2624                 /* Update SW event ring dequeue pointer */
2625                 inc_deq(xhci, xhci->event_ring, true);
2626
2627         /* Are there more items on the event ring?  Caller will call us again to
2628          * check.
2629          */
2630         return 1;
2631 }
2632
2633 /*
2634  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2635  * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2636  * indicators of an event TRB error, but we check the status *first* to be safe.
2637  */
2638 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2639 {
2640         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2641         u32 status;
2642         union xhci_trb *trb;
2643         u64 temp_64;
2644         union xhci_trb *event_ring_deq;
2645         dma_addr_t deq;
2646
2647         spin_lock(&xhci->lock);
2648         trb = xhci->event_ring->dequeue;
2649         /* Check if the xHC generated the interrupt, or the irq is shared */
2650         status = xhci_readl(xhci, &xhci->op_regs->status);
2651         if (status == 0xffffffff)
2652                 goto hw_died;
2653
2654         if (!(status & STS_EINT)) {
2655                 spin_unlock(&xhci->lock);
2656                 return IRQ_NONE;
2657         }
2658         if (status & STS_FATAL) {
2659                 xhci_warn(xhci, "WARNING: Host System Error\n");
2660                 xhci_halt(xhci);
2661 hw_died:
2662                 spin_unlock(&xhci->lock);
2663                 return IRQ_HANDLED;
2664         }
2665
2666         /*
2667          * Clear the op reg interrupt status first,
2668          * so we can receive interrupts from other MSI-X interrupters.
2669          * Write 1 to clear the interrupt status.
2670          */
2671         status |= STS_EINT;
2672         xhci_writel(xhci, status, &xhci->op_regs->status);
2673         /* FIXME when MSI-X is supported and there are multiple vectors */
2674         /* Clear the MSI-X event interrupt status */
2675
2676         if (hcd->irq != -1) {
2677                 u32 irq_pending;
2678                 /* Acknowledge the PCI interrupt */
2679                 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2680                 irq_pending |= IMAN_IP;
2681                 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2682         }
2683
2684         if (xhci->xhc_state & XHCI_STATE_DYING) {
2685                 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2686                                 "Shouldn't IRQs be disabled?\n");
2687                 /* Clear the event handler busy flag (RW1C);
2688                  * the event ring should be empty.
2689                  */
2690                 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2691                 xhci_write_64(xhci, temp_64 | ERST_EHB,
2692                                 &xhci->ir_set->erst_dequeue);
2693                 spin_unlock(&xhci->lock);
2694
2695                 return IRQ_HANDLED;
2696         }
2697
2698         event_ring_deq = xhci->event_ring->dequeue;
2699         /* FIXME this should be a delayed service routine
2700          * that clears the EHB.
2701          */
2702         while (xhci_handle_event(xhci) > 0) {}
2703
2704         temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2705         /* If necessary, update the HW's version of the event ring deq ptr. */
2706         if (event_ring_deq != xhci->event_ring->dequeue) {
2707                 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2708                                 xhci->event_ring->dequeue);
2709                 if (deq == 0)
2710                         xhci_warn(xhci, "WARN something wrong with SW event "
2711                                         "ring dequeue ptr.\n");
2712                 /* Update HC event ring dequeue pointer */
2713                 temp_64 &= ERST_PTR_MASK;
2714                 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2715         }
2716
2717         /* Clear the event handler busy flag (RW1C); event ring is empty. */
2718         temp_64 |= ERST_EHB;
2719         xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2720
2721         spin_unlock(&xhci->lock);
2722
2723         return IRQ_HANDLED;
2724 }
2725
2726 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2727 {
2728         irqreturn_t ret;
2729         struct xhci_hcd *xhci;
2730
2731         xhci = hcd_to_xhci(hcd);
2732         set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2733         if (xhci->shared_hcd)
2734                 set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
2735
2736         ret = xhci_irq(hcd);
2737
2738         return ret;
2739 }
2740
2741 /****           Endpoint Ring Operations        ****/
2742
2743 /*
2744  * Generic function for queueing a TRB on a ring.
2745  * The caller must have checked to make sure there's room on the ring.
2746  *
2747  * @more_trbs_coming:   Will you enqueue more TRBs before calling
2748  *                      prepare_transfer()?
2749  */
2750 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2751                 bool consumer, bool more_trbs_coming, bool isoc,
2752                 u32 field1, u32 field2, u32 field3, u32 field4)
2753 {
2754         struct xhci_generic_trb *trb;
2755
2756         trb = &ring->enqueue->generic;
2757         trb->field[0] = cpu_to_le32(field1);
2758         trb->field[1] = cpu_to_le32(field2);
2759         trb->field[2] = cpu_to_le32(field3);
2760         trb->field[3] = cpu_to_le32(field4);
2761         inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
2762 }
2763
2764 /*
2765  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2766  * FIXME allocate segments if the ring is full.
2767  */
2768 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2769                 u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
2770 {
2771         /* Make sure the endpoint has been added to xHC schedule */
2772         switch (ep_state) {
2773         case EP_STATE_DISABLED:
2774                 /*
2775                  * USB core changed config/interfaces without notifying us,
2776                  * or hardware is reporting the wrong state.
2777                  */
2778                 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2779                 return -ENOENT;
2780         case EP_STATE_ERROR:
2781                 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2782                 /* FIXME event handling code for error needs to clear it */
2783                 /* XXX not sure if this should be -ENOENT or not */
2784                 return -EINVAL;
2785         case EP_STATE_HALTED:
2786                 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2787         case EP_STATE_STOPPED:
2788         case EP_STATE_RUNNING:
2789                 break;
2790         default:
2791                 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2792                 /*
2793                  * FIXME issue Configure Endpoint command to try to get the HC
2794                  * back into a known state.
2795                  */
2796                 return -EINVAL;
2797         }
2798         if (!room_on_ring(xhci, ep_ring, num_trbs)) {
2799                 /* FIXME allocate more room */
2800                 xhci_err(xhci, "ERROR no room on ep ring\n");
2801                 return -ENOMEM;
2802         }
2803
2804         if (enqueue_is_link_trb(ep_ring)) {
2805                 struct xhci_ring *ring = ep_ring;
2806                 union xhci_trb *next;
2807
2808                 next = ring->enqueue;
2809
2810                 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2811                         /* If we're not dealing with 0.95 hardware or isoc rings
2812                          * on AMD 0.96 host, clear the chain bit.
2813                          */
2814                         if (!xhci_link_trb_quirk(xhci) && !(isoc &&
2815                                         (xhci->quirks & XHCI_AMD_0x96_HOST)))
2816                                 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2817                         else
2818                                 next->link.control |= cpu_to_le32(TRB_CHAIN);
2819
2820                         wmb();
2821                         next->link.control ^= cpu_to_le32(TRB_CYCLE);
2822
2823                         /* Toggle the cycle bit after the last ring segment. */
2824                         if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2825                                 ring->cycle_state = (ring->cycle_state ? 0 : 1);
2826                                 if (!in_interrupt()) {
2827                                         xhci_dbg(xhci, "queue_trb: Toggle cycle "
2828                                                 "state for ring %p = %i\n",
2829                                                 ring, (unsigned int)ring->cycle_state);
2830                                 }
2831                         }
2832                         ring->enq_seg = ring->enq_seg->next;
2833                         ring->enqueue = ring->enq_seg->trbs;
2834                         next = ring->enqueue;
2835                 }
2836         }
2837
2838         return 0;
2839 }
2840
2841 static int prepare_transfer(struct xhci_hcd *xhci,
2842                 struct xhci_virt_device *xdev,
2843                 unsigned int ep_index,
2844                 unsigned int stream_id,
2845                 unsigned int num_trbs,
2846                 struct urb *urb,
2847                 unsigned int td_index,
2848                 bool isoc,
2849                 gfp_t mem_flags)
2850 {
2851         int ret;
2852         struct urb_priv *urb_priv;
2853         struct xhci_td  *td;
2854         struct xhci_ring *ep_ring;
2855         struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2856
2857         ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2858         if (!ep_ring) {
2859                 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2860                                 stream_id);
2861                 return -EINVAL;
2862         }
2863
2864         ret = prepare_ring(xhci, ep_ring,
2865                            le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2866                            num_trbs, isoc, mem_flags);
2867         if (ret)
2868                 return ret;
2869
2870         urb_priv = urb->hcpriv;
2871         td = urb_priv->td[td_index];
2872
2873         INIT_LIST_HEAD(&td->td_list);
2874         INIT_LIST_HEAD(&td->cancelled_td_list);
2875
2876         if (td_index == 0) {
2877                 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2878                 if (unlikely(ret))
2879                         return ret;
2880         }
2881
2882         td->urb = urb;
2883         /* Add this TD to the tail of the endpoint ring's TD list */
2884         list_add_tail(&td->td_list, &ep_ring->td_list);
2885         td->start_seg = ep_ring->enq_seg;
2886         td->first_trb = ep_ring->enqueue;
2887
2888         urb_priv->td[td_index] = td;
2889
2890         return 0;
2891 }
2892
2893 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2894 {
2895         int num_sgs, num_trbs, running_total, temp, i;
2896         struct scatterlist *sg;
2897
2898         sg = NULL;
2899         num_sgs = urb->num_mapped_sgs;
2900         temp = urb->transfer_buffer_length;
2901
2902         xhci_dbg(xhci, "count sg list trbs: \n");
2903         num_trbs = 0;
2904         for_each_sg(urb->sg, sg, num_sgs, i) {
2905                 unsigned int previous_total_trbs = num_trbs;
2906                 unsigned int len = sg_dma_len(sg);
2907
2908                 /* Scatter gather list entries may cross 64KB boundaries */
2909                 running_total = TRB_MAX_BUFF_SIZE -
2910                         (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2911                 running_total &= TRB_MAX_BUFF_SIZE - 1;
2912                 if (running_total != 0)
2913                         num_trbs++;
2914
2915                 /* How many more 64KB chunks to transfer, how many more TRBs? */
2916                 while (running_total < sg_dma_len(sg) && running_total < temp) {
2917                         num_trbs++;
2918                         running_total += TRB_MAX_BUFF_SIZE;
2919                 }
2920                 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
2921                                 i, (unsigned long long)sg_dma_address(sg),
2922                                 len, len, num_trbs - previous_total_trbs);
2923
2924                 len = min_t(int, len, temp);
2925                 temp -= len;
2926                 if (temp == 0)
2927                         break;
2928         }
2929         xhci_dbg(xhci, "\n");
2930         if (!in_interrupt())
2931                 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
2932                                 "num_trbs = %d\n",
2933                                 urb->ep->desc.bEndpointAddress,
2934                                 urb->transfer_buffer_length,
2935                                 num_trbs);
2936         return num_trbs;
2937 }
2938
2939 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2940 {
2941         if (num_trbs != 0)
2942                 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2943                                 "TRBs, %d left\n", __func__,
2944                                 urb->ep->desc.bEndpointAddress, num_trbs);
2945         if (running_total != urb->transfer_buffer_length)
2946                 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2947                                 "queued %#x (%d), asked for %#x (%d)\n",
2948                                 __func__,
2949                                 urb->ep->desc.bEndpointAddress,
2950                                 running_total, running_total,
2951                                 urb->transfer_buffer_length,
2952                                 urb->transfer_buffer_length);
2953 }
2954
2955 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2956                 unsigned int ep_index, unsigned int stream_id, int start_cycle,
2957                 struct xhci_generic_trb *start_trb)
2958 {
2959         /*
2960          * Pass all the TRBs to the hardware at once and make sure this write
2961          * isn't reordered.
2962          */
2963         wmb();
2964         if (start_cycle)
2965                 start_trb->field[3] |= cpu_to_le32(start_cycle);
2966         else
2967                 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2968         xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2969 }
2970
2971 /*
2972  * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
2973  * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
2974  * (comprised of sg list entries) can take several service intervals to
2975  * transmit.
2976  */
2977 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2978                 struct urb *urb, int slot_id, unsigned int ep_index)
2979 {
2980         struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2981                         xhci->devs[slot_id]->out_ctx, ep_index);
2982         int xhci_interval;
2983         int ep_interval;
2984
2985         xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2986         ep_interval = urb->interval;
2987         /* Convert to microframes */
2988         if (urb->dev->speed == USB_SPEED_LOW ||
2989                         urb->dev->speed == USB_SPEED_FULL)
2990                 ep_interval *= 8;
2991         /* FIXME change this to a warning and a suggestion to use the new API
2992          * to set the polling interval (once the API is added).
2993          */
2994         if (xhci_interval != ep_interval) {
2995                 if (printk_ratelimit())
2996                         dev_dbg(&urb->dev->dev, "Driver uses different interval"
2997                                         " (%d microframe%s) than xHCI "
2998                                         "(%d microframe%s)\n",
2999                                         ep_interval,
3000                                         ep_interval == 1 ? "" : "s",
3001                                         xhci_interval,
3002                                         xhci_interval == 1 ? "" : "s");
3003                 urb->interval = xhci_interval;
3004                 /* Convert back to frames for LS/FS devices */
3005                 if (urb->dev->speed == USB_SPEED_LOW ||
3006                                 urb->dev->speed == USB_SPEED_FULL)
3007                         urb->interval /= 8;
3008         }
3009         return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3010 }
3011
3012 /*
3013  * The TD size is the number of bytes remaining in the TD (including this TRB),
3014  * right shifted by 10.
3015  * It must fit in bits 21:17, so it can't be bigger than 31.
3016  */
3017 static u32 xhci_td_remainder(unsigned int remainder)
3018 {
3019         u32 max = (1 << (21 - 17 + 1)) - 1;
3020
3021         if ((remainder >> 10) >= max)
3022                 return max << 17;
3023         else
3024                 return (remainder >> 10) << 17;
3025 }
3026
3027 /*
3028  * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3029  * packets remaining in the TD (*not* including this TRB).
3030  *
3031  * Total TD packet count = total_packet_count =
3032  *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3033  *
3034  * Packets transferred up to and including this TRB = packets_transferred =
3035  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3036  *
3037  * TD size = total_packet_count - packets_transferred
3038  *
3039  * It must fit in bits 21:17, so it can't be bigger than 31.
3040  * The last TRB in a TD must have the TD size set to zero.
3041  */
3042 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3043                 unsigned int total_packet_count, struct urb *urb,
3044                 unsigned int num_trbs_left)
3045 {
3046         int packets_transferred;
3047
3048         /* One TRB with a zero-length data packet. */
3049         if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
3050                 return 0;
3051
3052         /* All the TRB queueing functions don't count the current TRB in
3053          * running_total.
3054          */
3055         packets_transferred = (running_total + trb_buff_len) /
3056                 GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3057
3058         if ((total_packet_count - packets_transferred) > 31)
3059                 return 31 << 17;
3060         return (total_packet_count - packets_transferred) << 17;
3061 }
3062
3063 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3064                 struct urb *urb, int slot_id, unsigned int ep_index)
3065 {
3066         struct xhci_ring *ep_ring;
3067         unsigned int num_trbs;
3068         struct urb_priv *urb_priv;
3069         struct xhci_td *td;
3070         struct scatterlist *sg;
3071         int num_sgs;
3072         int trb_buff_len, this_sg_len, running_total;
3073         unsigned int total_packet_count;
3074         bool first_trb;
3075         u64 addr;
3076         bool more_trbs_coming;
3077
3078         struct xhci_generic_trb *start_trb;
3079         int start_cycle;
3080
3081         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3082         if (!ep_ring)
3083                 return -EINVAL;
3084
3085         num_trbs = count_sg_trbs_needed(xhci, urb);
3086         num_sgs = urb->num_mapped_sgs;
3087         total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3088                         usb_endpoint_maxp(&urb->ep->desc));
3089
3090         trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3091                         ep_index, urb->stream_id,
3092                         num_trbs, urb, 0, false, mem_flags);
3093         if (trb_buff_len < 0)
3094                 return trb_buff_len;
3095
3096         urb_priv = urb->hcpriv;
3097         td = urb_priv->td[0];
3098
3099         /*
3100          * Don't give the first TRB to the hardware (by toggling the cycle bit)
3101          * until we've finished creating all the other TRBs.  The ring's cycle
3102          * state may change as we enqueue the other TRBs, so save it too.
3103          */
3104         start_trb = &ep_ring->enqueue->generic;
3105         start_cycle = ep_ring->cycle_state;
3106
3107         running_total = 0;
3108         /*
3109          * How much data is in the first TRB?
3110          *
3111          * There are three forces at work for TRB buffer pointers and lengths:
3112          * 1. We don't want to walk off the end of this sg-list entry buffer.
3113          * 2. The transfer length that the driver requested may be smaller than
3114          *    the amount of memory allocated for this scatter-gather list.
3115          * 3. TRBs buffers can't cross 64KB boundaries.
3116          */
3117         sg = urb->sg;
3118         addr = (u64) sg_dma_address(sg);
3119         this_sg_len = sg_dma_len(sg);
3120         trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3121         trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3122         if (trb_buff_len > urb->transfer_buffer_length)
3123                 trb_buff_len = urb->transfer_buffer_length;
3124         xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
3125                         trb_buff_len);
3126
3127         first_trb = true;
3128         /* Queue the first TRB, even if it's zero-length */
3129         do {
3130                 u32 field = 0;
3131                 u32 length_field = 0;
3132                 u32 remainder = 0;
3133
3134                 /* Don't change the cycle bit of the first TRB until later */
3135                 if (first_trb) {
3136                         first_trb = false;
3137                         if (start_cycle == 0)
3138                                 field |= 0x1;
3139                 } else
3140                         field |= ep_ring->cycle_state;
3141
3142                 /* Chain all the TRBs together; clear the chain bit in the last
3143                  * TRB to indicate it's the last TRB in the chain.
3144                  */
3145                 if (num_trbs > 1) {
3146                         field |= TRB_CHAIN;
3147                 } else {
3148                         /* FIXME - add check for ZERO_PACKET flag before this */
3149                         td->last_trb = ep_ring->enqueue;
3150                         field |= TRB_IOC;
3151                 }
3152
3153                 /* Only set interrupt on short packet for IN endpoints */
3154                 if (usb_urb_dir_in(urb))
3155                         field |= TRB_ISP;
3156
3157                 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
3158                                 "64KB boundary at %#x, end dma = %#x\n",
3159                                 (unsigned int) addr, trb_buff_len, trb_buff_len,
3160                                 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3161                                 (unsigned int) addr + trb_buff_len);
3162                 if (TRB_MAX_BUFF_SIZE -
3163                                 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3164                         xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3165                         xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3166                                         (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3167                                         (unsigned int) addr + trb_buff_len);
3168                 }
3169
3170                 /* Set the TRB length, TD size, and interrupter fields. */
3171                 if (xhci->hci_version < 0x100) {
3172                         remainder = xhci_td_remainder(
3173                                         urb->transfer_buffer_length -
3174                                         running_total);
3175                 } else {
3176                         remainder = xhci_v1_0_td_remainder(running_total,
3177                                         trb_buff_len, total_packet_count, urb,
3178                                         num_trbs - 1);
3179                 }
3180                 length_field = TRB_LEN(trb_buff_len) |
3181                         remainder |
3182                         TRB_INTR_TARGET(0);
3183
3184                 if (num_trbs > 1)
3185                         more_trbs_coming = true;
3186                 else
3187                         more_trbs_coming = false;
3188                 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
3189                                 lower_32_bits(addr),
3190                                 upper_32_bits(addr),
3191                                 length_field,
3192                                 field | TRB_TYPE(TRB_NORMAL));
3193                 --num_trbs;
3194                 running_total += trb_buff_len;
3195
3196                 /* Calculate length for next transfer --
3197                  * Are we done queueing all the TRBs for this sg entry?
3198                  */
3199                 this_sg_len -= trb_buff_len;
3200                 if (this_sg_len == 0) {
3201                         --num_sgs;
3202                         if (num_sgs == 0)
3203                                 break;
3204                         sg = sg_next(sg);
3205                         addr = (u64) sg_dma_address(sg);
3206                         this_sg_len = sg_dma_len(sg);
3207                 } else {
3208                         addr += trb_buff_len;
3209                 }
3210
3211                 trb_buff_len = TRB_MAX_BUFF_SIZE -
3212                         (addr & (TRB_MAX_BUFF_SIZE - 1));
3213                 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3214                 if (running_total + trb_buff_len > urb->transfer_buffer_length)
3215                         trb_buff_len =
3216                                 urb->transfer_buffer_length - running_total;
3217         } while (running_total < urb->transfer_buffer_length);
3218
3219         check_trb_math(urb, num_trbs, running_total);
3220         giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3221                         start_cycle, start_trb);
3222         return 0;
3223 }
3224
3225 /* This is very similar to what ehci-q.c qtd_fill() does */
3226 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3227                 struct urb *urb, int slot_id, unsigned int ep_index)
3228 {
3229         struct xhci_ring *ep_ring;
3230         struct urb_priv *urb_priv;
3231         struct xhci_td *td;
3232         int num_trbs;
3233         struct xhci_generic_trb *start_trb;
3234         bool first_trb;
3235         bool more_trbs_coming;
3236         int start_cycle;
3237         u32 field, length_field;
3238
3239         int running_total, trb_buff_len, ret;
3240         unsigned int total_packet_count;
3241         u64 addr;
3242
3243         if (urb->num_sgs)
3244                 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3245
3246         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3247         if (!ep_ring)
3248                 return -EINVAL;
3249
3250         num_trbs = 0;
3251         /* How much data is (potentially) left before the 64KB boundary? */
3252         running_total = TRB_MAX_BUFF_SIZE -
3253                 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3254         running_total &= TRB_MAX_BUFF_SIZE - 1;
3255
3256         /* If there's some data on this 64KB chunk, or we have to send a
3257          * zero-length transfer, we need at least one TRB
3258          */
3259         if (running_total != 0 || urb->transfer_buffer_length == 0)
3260                 num_trbs++;
3261         /* How many more 64KB chunks to transfer, how many more TRBs? */
3262         while (running_total < urb->transfer_buffer_length) {
3263                 num_trbs++;
3264                 running_total += TRB_MAX_BUFF_SIZE;
3265         }
3266         /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
3267
3268         if (!in_interrupt())
3269                 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
3270                                 "addr = %#llx, num_trbs = %d\n",
3271                                 urb->ep->desc.bEndpointAddress,
3272                                 urb->transfer_buffer_length,
3273                                 urb->transfer_buffer_length,
3274                                 (unsigned long long)urb->transfer_dma,
3275                                 num_trbs);
3276
3277         ret = prepare_transfer(xhci, xhci->devs[slot_id],
3278                         ep_index, urb->stream_id,
3279                         num_trbs, urb, 0, false, mem_flags);
3280         if (ret < 0)
3281                 return ret;
3282
3283         urb_priv = urb->hcpriv;
3284         td = urb_priv->td[0];
3285
3286         /*
3287          * Don't give the first TRB to the hardware (by toggling the cycle bit)
3288          * until we've finished creating all the other TRBs.  The ring's cycle
3289          * state may change as we enqueue the other TRBs, so save it too.
3290          */
3291         start_trb = &ep_ring->enqueue->generic;
3292         start_cycle = ep_ring->cycle_state;
3293
3294         running_total = 0;
3295         total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3296                         usb_endpoint_maxp(&urb->ep->desc));
3297         /* How much data is in the first TRB? */
3298         addr = (u64) urb->transfer_dma;
3299         trb_buff_len = TRB_MAX_BUFF_SIZE -
3300                 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3301         if (trb_buff_len > urb->transfer_buffer_length)
3302                 trb_buff_len = urb->transfer_buffer_length;
3303
3304         first_trb = true;
3305
3306         /* Queue the first TRB, even if it's zero-length */
3307         do {
3308                 u32 remainder = 0;
3309                 field = 0;
3310
3311                 /* Don't change the cycle bit of the first TRB until later */
3312                 if (first_trb) {
3313                         first_trb = false;
3314                         if (start_cycle == 0)
3315                                 field |= 0x1;
3316                 } else
3317                         field |= ep_ring->cycle_state;
3318
3319                 /* Chain all the TRBs together; clear the chain bit in the last
3320                  * TRB to indicate it's the last TRB in the chain.
3321                  */
3322                 if (num_trbs > 1) {
3323                         field |= TRB_CHAIN;
3324                 } else {
3325                         /* FIXME - add check for ZERO_PACKET flag before this */
3326                         td->last_trb = ep_ring->enqueue;
3327                         field |= TRB_IOC;
3328                 }
3329
3330                 /* Only set interrupt on short packet for IN endpoints */
3331                 if (usb_urb_dir_in(urb))
3332                         field |= TRB_ISP;
3333
3334                 /* Set the TRB length, TD size, and interrupter fields. */
3335                 if (xhci->hci_version < 0x100) {
3336                         remainder = xhci_td_remainder(
3337                                         urb->transfer_buffer_length -
3338                                         running_total);
3339                 } else {
3340                         remainder = xhci_v1_0_td_remainder(running_total,
3341                                         trb_buff_len, total_packet_count, urb,
3342                                         num_trbs - 1);
3343                 }
3344                 length_field = TRB_LEN(trb_buff_len) |
3345                         remainder |
3346                         TRB_INTR_TARGET(0);
3347
3348                 if (num_trbs > 1)
3349                         more_trbs_coming = true;
3350                 else
3351                         more_trbs_coming = false;
3352                 queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
3353                                 lower_32_bits(addr),
3354                                 upper_32_bits(addr),
3355                                 length_field,
3356                                 field | TRB_TYPE(TRB_NORMAL));
3357                 --num_trbs;
3358                 running_total += trb_buff_len;
3359
3360                 /* Calculate length for next transfer */
3361                 addr += trb_buff_len;
3362                 trb_buff_len = urb->transfer_buffer_length - running_total;
3363                 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3364                         trb_buff_len = TRB_MAX_BUFF_SIZE;
3365         } while (running_total < urb->transfer_buffer_length);
3366
3367         check_trb_math(urb, num_trbs, running_total);
3368         giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3369                         start_cycle, start_trb);
3370         return 0;
3371 }
3372
3373 /* Caller must have locked xhci->lock */
3374 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3375                 struct urb *urb, int slot_id, unsigned int ep_index)
3376 {
3377         struct xhci_ring *ep_ring;
3378         int num_trbs;
3379         int ret;
3380         struct usb_ctrlrequest *setup;
3381         struct xhci_generic_trb *start_trb;
3382         int start_cycle;
3383         u32 field, length_field;
3384         struct urb_priv *urb_priv;
3385         struct xhci_td *td;
3386
3387         ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3388         if (!ep_ring)
3389                 return -EINVAL;
3390
3391         /*
3392          * Need to copy setup packet into setup TRB, so we can't use the setup
3393          * DMA address.
3394          */
3395         if (!urb->setup_packet)
3396                 return -EINVAL;
3397
3398         if (!in_interrupt())
3399                 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
3400                                 slot_id, ep_index);
3401         /* 1 TRB for setup, 1 for status */
3402         num_trbs = 2;
3403         /*
3404          * Don't need to check if we need additional event data and normal TRBs,
3405          * since data in control transfers will never get bigger than 16MB
3406          * XXX: can we get a buffer that crosses 64KB boundaries?
3407          */
3408         if (urb->transfer_buffer_length > 0)
3409                 num_trbs++;
3410         ret = prepare_transfer(xhci, xhci->devs[slot_id],
3411                         ep_index, urb->stream_id,
3412                         num_trbs, urb, 0, false, mem_flags);
3413         if (ret < 0)
3414                 return ret;
3415
3416         urb_priv = urb->hcpriv;
3417         td = urb_priv->td[0];
3418
3419         /*
3420          * Don't give the first TRB to the hardware (by toggling the cycle bit)
3421          * until we've finished creating all the other TRBs.  The ring's cycle
3422          * state may change as we enqueue the other TRBs, so save it too.
3423          */
3424         start_trb = &ep_ring->enqueue->generic;
3425         start_cycle = ep_ring->cycle_state;
3426
3427         /* Queue setup TRB - see section 6.4.1.2.1 */
3428         /* FIXME better way to translate setup_packet into two u32 fields? */
3429         setup = (struct usb_ctrlrequest *) urb->setup_packet;
3430         field = 0;
3431         field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3432         if (start_cycle == 0)
3433                 field |= 0x1;
3434
3435         /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3436         if (xhci->hci_version == 0x100) {
3437                 if (urb->transfer_buffer_length > 0) {
3438                         if (setup->bRequestType & USB_DIR_IN)
3439                                 field |= TRB_TX_TYPE(TRB_DATA_IN);
3440                         else
3441                                 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3442                 }
3443         }
3444
3445         queue_trb(xhci, ep_ring, false, true, false,
3446                   setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3447                   le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3448                   TRB_LEN(8) | TRB_INTR_TARGET(0),
3449                   /* Immediate data in pointer */
3450                   field);
3451
3452         /* If there's data, queue data TRBs */
3453         /* Only set interrupt on short packet for IN endpoints */
3454         if (usb_urb_dir_in(urb))
3455                 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3456         else
3457                 field = TRB_TYPE(TRB_DATA);
3458
3459         length_field = TRB_LEN(urb->transfer_buffer_length) |
3460                 xhci_td_remainder(urb->transfer_buffer_length) |
3461                 TRB_INTR_TARGET(0);
3462         if (urb->transfer_buffer_length > 0) {
3463                 if (setup->bRequestType & USB_DIR_IN)
3464                         field |= TRB_DIR_IN;
3465                 queue_trb(xhci, ep_ring, false, true, false,
3466                                 lower_32_bits(urb->transfer_dma),
3467                                 upper_32_bits(urb->transfer_dma),
3468                                 length_field,
3469                                 field | ep_ring->cycle_state);
3470         }
3471
3472         /* Save the DMA address of the last TRB in the TD */
3473         td->last_trb = ep_ring->enqueue;
3474
3475         /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3476         /* If the device sent data, the status stage is an OUT transfer */
3477         if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3478                 field = 0;
3479         else
3480                 field = TRB_DIR_IN;
3481         queue_trb(xhci, ep_ring, false, false, false,
3482                         0,
3483                         0,
3484                         TRB_INTR_TARGET(0),
3485                         /* Event on completion */
3486                         field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3487
3488         giveback_first_trb(xhci, slot_id, ep_index, 0,
3489                         start_cycle, start_trb);
3490         return 0;
3491 }
3492
3493 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3494                 struct urb *urb, int i)
3495 {
3496         int num_trbs = 0;
3497         u64 addr, td_len;
3498
3499         addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3500         td_len = urb->iso_frame_desc[i].length;
3501
3502         num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3503                         TRB_MAX_BUFF_SIZE);
3504         if (num_trbs == 0)
3505                 num_trbs++;
3506
3507         return num_trbs;
3508 }
3509
3510 /*
3511  * The transfer burst count field of the isochronous TRB defines the number of
3512  * bursts that are required to move all packets in this TD.  Only SuperSpeed
3513  * devices can burst up to bMaxBurst number of packets per service interval.
3514  * This field is zero based, meaning a value of zero in the field means one
3515  * burst.  Basically, for everything but SuperSpeed devices, this field will be
3516  * zero.  Only xHCI 1.0 host controllers support this field.
3517  */
3518 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3519                 struct usb_device *udev,
3520                 struct urb *urb, unsigned int total_packet_count)
3521 {
3522         unsigned int max_burst;
3523
3524         if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3525                 return 0;
3526
3527         max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3528         return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3529 }
3530
3531 /*
3532  * Returns the number of packets in the last "burst" of packets.  This field is
3533  * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3534  * the last burst packet count is equal to the total number of packets in the
3535  * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3536  * must contain (bMaxBurst + 1) number of packets, but the last burst can
3537  * contain 1 to (bMaxBurst + 1) packets.
3538  */
3539 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3540                 struct usb_device *udev,
3541                 struct urb *urb, unsigned int total_packet_count)
3542 {
3543         unsigned int max_burst;
3544         unsigned int residue;
3545
3546         if (xhci->hci_version < 0x100)
3547                 return 0;
3548
3549         switch (udev->speed) {
3550         case USB_SPEED_SUPER:
3551                 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3552                 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3553                 residue = total_packet_count % (max_burst + 1);
3554                 /* If residue is zero, the last burst contains (max_burst + 1)
3555                  * number of packets, but the TLBPC field is zero-based.
3556                  */
3557                 if (residue == 0)
3558                         return max_burst;
3559                 return residue - 1;
3560         default:
3561                 if (total_packet_count == 0)
3562                         return 0;
3563                 return total_packet_count - 1;
3564         }
3565 }
3566
3567 /* This is for isoc transfer */
3568 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3569                 struct urb *urb, int slot_id, unsigned int ep_index)
3570 {
3571         struct xhci_ring *ep_ring;
3572         struct urb_priv *urb_priv;
3573         struct xhci_td *td;
3574         int num_tds, trbs_per_td;
3575         struct xhci_generic_trb *start_trb;
3576         bool first_trb;
3577         int start_cycle;
3578         u32 field, length_field;
3579         int running_total, trb_buff_len, td_len, td_remain_len, ret;
3580         u64 start_addr, addr;
3581         int i, j;
3582         bool more_trbs_coming;
3583
3584         ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3585
3586         num_tds = urb->number_of_packets;
3587         if (num_tds < 1) {
3588                 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3589                 return -EINVAL;
3590         }
3591
3592         if (!in_interrupt())
3593                 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
3594                                 " addr = %#llx, num_tds = %d\n",
3595                                 urb->ep->desc.bEndpointAddress,
3596                                 urb->transfer_buffer_length,
3597                                 urb->transfer_buffer_length,
3598                                 (unsigned long long)urb->transfer_dma,
3599                                 num_tds);
3600
3601         start_addr = (u64) urb->transfer_dma;
3602         start_trb = &ep_ring->enqueue->generic;
3603         start_cycle = ep_ring->cycle_state;
3604
3605         urb_priv = urb->hcpriv;
3606         /* Queue the first TRB, even if it's zero-length */
3607         for (i = 0; i < num_tds; i++) {
3608                 unsigned int total_packet_count;
3609                 unsigned int burst_count;
3610                 unsigned int residue;
3611
3612                 first_trb = true;
3613                 running_total = 0;
3614                 addr = start_addr + urb->iso_frame_desc[i].offset;
3615                 td_len = urb->iso_frame_desc[i].length;
3616                 td_remain_len = td_len;
3617                 total_packet_count = DIV_ROUND_UP(td_len,
3618                                 GET_MAX_PACKET(
3619                                         usb_endpoint_maxp(&urb->ep->desc)));
3620                 /* A zero-length transfer still involves at least one packet. */
3621                 if (total_packet_count == 0)
3622                         total_packet_count++;
3623                 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3624                                 total_packet_count);
3625                 residue = xhci_get_last_burst_packet_count(xhci,
3626                                 urb->dev, urb, total_packet_count);
3627
3628                 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3629
3630                 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3631                                 urb->stream_id, trbs_per_td, urb, i, true,
3632                                 mem_flags);
3633                 if (ret < 0) {
3634                         if (i == 0)
3635                                 return ret;
3636                         goto cleanup;
3637                 }
3638
3639                 td = urb_priv->td[i];
3640                 for (j = 0; j < trbs_per_td; j++) {
3641                         u32 remainder = 0;
3642                         field = 0;
3643
3644                         if (first_trb) {
3645                                 field = TRB_TBC(burst_count) |
3646                                         TRB_TLBPC(residue);
3647                                 /* Queue the isoc TRB */
3648                                 field |= TRB_TYPE(TRB_ISOC);
3649                                 /* Assume URB_ISO_ASAP is set */
3650                                 field |= TRB_SIA;
3651                                 if (i == 0) {
3652                                         if (start_cycle == 0)
3653                                                 field |= 0x1;
3654                                 } else
3655                                         field |= ep_ring->cycle_state;
3656                                 first_trb = false;
3657                         } else {
3658                                 /* Queue other normal TRBs */
3659                                 field |= TRB_TYPE(TRB_NORMAL);
3660                                 field |= ep_ring->cycle_state;
3661                         }
3662
3663                         /* Only set interrupt on short packet for IN EPs */
3664                         if (usb_urb_dir_in(urb))
3665                                 field |= TRB_ISP;
3666
3667                         /* Chain all the TRBs together; clear the chain bit in
3668                          * the last TRB to indicate it's the last TRB in the
3669                          * chain.
3670                          */
3671                         if (j < trbs_per_td - 1) {
3672                                 field |= TRB_CHAIN;
3673                                 more_trbs_coming = true;
3674                         } else {
3675                                 td->last_trb = ep_ring->enqueue;
3676                                 field |= TRB_IOC;
3677                                 if (xhci->hci_version == 0x100 &&
3678                                                 !(xhci->quirks &
3679                                                         XHCI_AVOID_BEI)) {
3680                                         /* Set BEI bit except for the last td */
3681                                         if (i < num_tds - 1)
3682                                                 field |= TRB_BEI;
3683                                 }
3684                                 more_trbs_coming = false;
3685                         }
3686
3687                         /* Calculate TRB length */
3688                         trb_buff_len = TRB_MAX_BUFF_SIZE -
3689                                 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3690                         if (trb_buff_len > td_remain_len)
3691                                 trb_buff_len = td_remain_len;
3692
3693                         /* Set the TRB length, TD size, & interrupter fields. */
3694                         if (xhci->hci_version < 0x100) {
3695                                 remainder = xhci_td_remainder(
3696                                                 td_len - running_total);
3697                         } else {
3698                                 remainder = xhci_v1_0_td_remainder(
3699                                                 running_total, trb_buff_len,
3700                                                 total_packet_count, urb,
3701                                                 (trbs_per_td - j - 1));
3702                         }
3703                         length_field = TRB_LEN(trb_buff_len) |
3704                                 remainder |
3705                                 TRB_INTR_TARGET(0);
3706
3707                         queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
3708                                 lower_32_bits(addr),
3709                                 upper_32_bits(addr),
3710                                 length_field,
3711                                 field);
3712                         running_total += trb_buff_len;
3713
3714                         addr += trb_buff_len;
3715                         td_remain_len -= trb_buff_len;
3716                 }
3717
3718                 /* Check TD length */
3719                 if (running_total != td_len) {
3720                         xhci_err(xhci, "ISOC TD length unmatch\n");
3721                         ret = -EINVAL;
3722                         goto cleanup;
3723                 }
3724         }
3725
3726         if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3727                 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3728                         usb_amd_quirk_pll_disable();
3729         }
3730         xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3731
3732         giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3733                         start_cycle, start_trb);
3734         return 0;
3735 cleanup:
3736         /* Clean up a partially enqueued isoc transfer. */
3737
3738         for (i--; i >= 0; i--)
3739                 list_del_init(&urb_priv->td[i]->td_list);
3740
3741         /* Use the first TD as a temporary variable to turn the TDs we've queued
3742          * into No-ops with a software-owned cycle bit. That way the hardware
3743          * won't accidentally start executing bogus TDs when we partially
3744          * overwrite them.  td->first_trb and td->start_seg are already set.
3745          */
3746         urb_priv->td[0]->last_trb = ep_ring->enqueue;
3747         /* Every TRB except the first & last will have its cycle bit flipped. */
3748         td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3749
3750         /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3751         ep_ring->enqueue = urb_priv->td[0]->first_trb;
3752         ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3753         ep_ring->cycle_state = start_cycle;
3754         usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3755         return ret;
3756 }
3757
3758 /*
3759  * Check transfer ring to guarantee there is enough room for the urb.
3760  * Update ISO URB start_frame and interval.
3761  * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3762  * update the urb->start_frame by now.
3763  * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3764  */
3765 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3766                 struct urb *urb, int slot_id, unsigned int ep_index)
3767 {
3768         struct xhci_virt_device *xdev;
3769         struct xhci_ring *ep_ring;
3770         struct xhci_ep_ctx *ep_ctx;
3771         int start_frame;
3772         int xhci_interval;
3773         int ep_interval;
3774         int num_tds, num_trbs, i;
3775         int ret;
3776
3777         xdev = xhci->devs[slot_id];
3778         ep_ring = xdev->eps[ep_index].ring;
3779         ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3780
3781         num_trbs = 0;
3782         num_tds = urb->number_of_packets;
3783         for (i = 0; i < num_tds; i++)
3784                 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3785
3786         /* Check the ring to guarantee there is enough room for the whole urb.
3787          * Do not insert any td of the urb to the ring if the check failed.
3788          */
3789         ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3790                            num_trbs, true, mem_flags);
3791         if (ret)
3792                 return ret;
3793
3794         start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3795         start_frame &= 0x3fff;
3796
3797         urb->start_frame = start_frame;
3798         if (urb->dev->speed == USB_SPEED_LOW ||
3799                         urb->dev->speed == USB_SPEED_FULL)
3800                 urb->start_frame >>= 3;
3801
3802         xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3803         ep_interval = urb->interval;
3804         /* Convert to microframes */
3805         if (urb->dev->speed == USB_SPEED_LOW ||
3806                         urb->dev->speed == USB_SPEED_FULL)
3807                 ep_interval *= 8;
3808         /* FIXME change this to a warning and a suggestion to use the new API
3809          * to set the polling interval (once the API is added).
3810          */
3811         if (xhci_interval != ep_interval) {
3812                 if (printk_ratelimit())
3813                         dev_dbg(&urb->dev->dev, "Driver uses different interval"
3814                                         " (%d microframe%s) than xHCI "
3815                                         "(%d microframe%s)\n",
3816                                         ep_interval,
3817                                         ep_interval == 1 ? "" : "s",
3818                                         xhci_interval,
3819                                         xhci_interval == 1 ? "" : "s");
3820                 urb->interval = xhci_interval;
3821                 /* Convert back to frames for LS/FS devices */
3822                 if (urb->dev->speed == USB_SPEED_LOW ||
3823                                 urb->dev->speed == USB_SPEED_FULL)
3824                         urb->interval /= 8;
3825         }
3826         return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3827 }
3828
3829 /****           Command Ring Operations         ****/
3830
3831 /* Generic function for queueing a command TRB on the command ring.
3832  * Check to make sure there's room on the command ring for one command TRB.
3833  * Also check that there's room reserved for commands that must not fail.
3834  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3835  * then only check for the number of reserved spots.
3836  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3837  * because the command event handler may want to resubmit a failed command.
3838  */
3839 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3840                 u32 field3, u32 field4, bool command_must_succeed)
3841 {
3842         int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3843         int ret;
3844
3845         if (!command_must_succeed)
3846                 reserved_trbs++;
3847
3848         ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3849                         reserved_trbs, false, GFP_ATOMIC);
3850         if (ret < 0) {
3851                 xhci_err(xhci, "ERR: No room for command on command ring\n");
3852                 if (command_must_succeed)
3853                         xhci_err(xhci, "ERR: Reserved TRB counting for "
3854                                         "unfailable commands failed.\n");
3855                 return ret;
3856         }
3857         queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
3858                         field3, field4 | xhci->cmd_ring->cycle_state);
3859         return 0;
3860 }
3861
3862 /* Queue a slot enable or disable request on the command ring */
3863 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3864 {
3865         return queue_command(xhci, 0, 0, 0,
3866                         TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3867 }
3868
3869 /* Queue an address device command TRB */
3870 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3871                 u32 slot_id)
3872 {
3873         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3874                         upper_32_bits(in_ctx_ptr), 0,
3875                         TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3876                         false);
3877 }
3878
3879 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
3880                 u32 field1, u32 field2, u32 field3, u32 field4)
3881 {
3882         return queue_command(xhci, field1, field2, field3, field4, false);
3883 }
3884
3885 /* Queue a reset device command TRB */
3886 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3887 {
3888         return queue_command(xhci, 0, 0, 0,
3889                         TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3890                         false);
3891 }
3892
3893 /* Queue a configure endpoint command TRB */
3894 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3895                 u32 slot_id, bool command_must_succeed)
3896 {
3897         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3898                         upper_32_bits(in_ctx_ptr), 0,
3899                         TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3900                         command_must_succeed);
3901 }
3902
3903 /* Queue an evaluate context command TRB */
3904 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3905                 u32 slot_id)
3906 {
3907         return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3908                         upper_32_bits(in_ctx_ptr), 0,
3909                         TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3910                         false);
3911 }
3912
3913 /*
3914  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3915  * activity on an endpoint that is about to be suspended.
3916  */
3917 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3918                 unsigned int ep_index, int suspend)
3919 {
3920         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3921         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3922         u32 type = TRB_TYPE(TRB_STOP_RING);
3923         u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3924
3925         return queue_command(xhci, 0, 0, 0,
3926                         trb_slot_id | trb_ep_index | type | trb_suspend, false);
3927 }
3928
3929 /* Set Transfer Ring Dequeue Pointer command.
3930  * This should not be used for endpoints that have streams enabled.
3931  */
3932 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3933                 unsigned int ep_index, unsigned int stream_id,
3934                 struct xhci_segment *deq_seg,
3935                 union xhci_trb *deq_ptr, u32 cycle_state)
3936 {
3937         dma_addr_t addr;
3938         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3939         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3940         u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3941         u32 type = TRB_TYPE(TRB_SET_DEQ);
3942         struct xhci_virt_ep *ep;
3943
3944         addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3945         if (addr == 0) {
3946                 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3947                 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3948                                 deq_seg, deq_ptr);
3949                 return 0;
3950         }
3951         ep = &xhci->devs[slot_id]->eps[ep_index];
3952         if ((ep->ep_state & SET_DEQ_PENDING)) {
3953                 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3954                 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3955                 return 0;
3956         }
3957         ep->queued_deq_seg = deq_seg;
3958         ep->queued_deq_ptr = deq_ptr;
3959         return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3960                         upper_32_bits(addr), trb_stream_id,
3961                         trb_slot_id | trb_ep_index | type, false);
3962 }
3963
3964 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3965                 unsigned int ep_index)
3966 {
3967         u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3968         u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3969         u32 type = TRB_TYPE(TRB_RESET_EP);
3970
3971         return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
3972                         false);
3973 }